Fixup checkstyle 31/111431/2 master
authorRobert Varga <robert.varga@pantheon.tech>
Tue, 16 Apr 2024 12:23:15 +0000 (14:23 +0200)
committerRobert Varga <robert.varga@pantheon.tech>
Thu, 18 Apr 2024 18:37:30 +0000 (20:37 +0200)
Updated checkstyle is picking up a few new warnings. Fix them up.

Change-Id: Ib5fa92b84c7e4570098a1bd1163f0947a86ed7a9
Signed-off-by: Robert Varga <robert.varga@pantheon.tech>
1974 files changed:
.readthedocs.yml [new file with mode: 0644]
INFO.yaml [new file with mode: 0644]
akka/pom.xml [moved from features/config-netty/pom.xml with 56% similarity]
akka/repackaged-akka-jar/pom.xml [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/LICENSE [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/actor_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/actor_typed_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/cluster_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/cluster_tools_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/cluster_typed_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/distributed_data_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/persistence_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/remote_reference.conf [new file with mode: 0644]
akka/repackaged-akka-jar/src/main/resources/stream_reference.conf [new file with mode: 0644]
akka/repackaged-akka/pom.xml [new file with mode: 0644]
artifacts/pom.xml [moved from opendaylight/md-sal/mdsal-artifacts/pom.xml with 81% similarity]
atomix-storage/LICENSE [new file with mode: 0644]
atomix-storage/pom.xml [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java [new file with mode: 0644]
atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java [new file with mode: 0644]
atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java [new file with mode: 0644]
atomix-storage/src/test/resources/logback.xml [new file with mode: 0644]
benchmark/api/pom.xml
benchmark/api/src/main/yang/dsbenchmark.yang
benchmark/api/src/main/yang/ntfbench-payload.yang
benchmark/api/src/main/yang/ntfbenchmark.yang
benchmark/api/src/main/yang/rpcbench-payload.yang
benchmark/api/src/main/yang/rpcbenchmark.yang
benchmark/artifacts/pom.xml [deleted file]
benchmark/dsbenchmark/pom.xml
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/BaListBuilder.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DatastoreAbstractWriter.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DomListBuilder.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/DsbenchmarkProvider.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListener.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/listener/DsbenchmarkListenerProvider.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxBaWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/simpletx/SimpletxDomWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainBaWrite.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomDelete.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomRead.java
benchmark/dsbenchmark/src/main/java/org/opendaylight/dsbenchmark/txchain/TxchainDomWrite.java
benchmark/dsbenchmark/src/main/resources/OSGI-INF/blueprint/dsbenchmark.xml [deleted file]
benchmark/ntfbenchmark/pom.xml
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/AbstractNtfbenchProducer.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchBlockingProducer.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchNonblockingProducer.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchTestListener.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchWTCListener.java
benchmark/ntfbenchmark/src/main/java/ntfbenchmark/impl/NtfbenchmarkProvider.java
benchmark/ntfbenchmark/src/main/resources/OSGI-INF/blueprint/ntfbenchmark.xml [deleted file]
benchmark/pom.xml
benchmark/rpcbenchmark/pom.xml
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java [new file with mode: 0644]
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCClient.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/GlobalBindingRTCServer.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RTCClient.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java [new file with mode: 0644]
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTClient.java
benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RpcbenchmarkProvider.java
benchmark/rpcbenchmark/src/main/resources/OSGI-INF/blueprint/rpcbenchmark.xml [deleted file]
bundle-parent/pom.xml [new file with mode: 0644]
docs/conf.py
docs/conf.yaml
docs/dev-guide.rst
docs/images/Get.png [new file with mode: 0644]
docs/images/Put.png [new file with mode: 0644]
docs/pom.xml
features/benchmark/features-mdsal-benchmark/pom.xml [deleted file]
features/benchmark/pom.xml [deleted file]
features/config-netty/features-config-netty/pom.xml [deleted file]
features/config-netty/odl-config-netty/pom.xml [deleted file]
features/config-netty/odl-config-netty/src/main/feature/feature.xml [deleted file]
features/extras/odl-extras-all/pom.xml [deleted file]
features/extras/odl-jolokia/src/main/feature/feature.xml [deleted file]
features/extras/pom.xml [deleted file]
features/features-controller-experimental/pom.xml [moved from features/extras/features-extras/pom.xml with 55% similarity]
features/features-controller-testing/pom.xml [moved from features/mdsal/odl-mdsal-model-inventory/pom.xml with 61% similarity]
features/features-controller/pom.xml [moved from features/mdsal/odl-message-bus-collector/pom.xml with 53% similarity]
features/mdsal/features-mdsal/pom.xml [deleted file]
features/mdsal/odl-controller-mdsal-common/pom.xml [deleted file]
features/mdsal/odl-controller-model-topology/pom.xml [deleted file]
features/mdsal/odl-mdsal-broker-local/pom.xml [deleted file]
features/mdsal/odl-mdsal-broker/pom.xml [deleted file]
features/mdsal/odl-mdsal-distributed-datastore/pom.xml [deleted file]
features/mdsal/odl-mdsal-distributed-datastore/src/main/feature/feature.xml [deleted file]
features/mdsal/odl-mdsal-remoterpc-connector/src/main/feature/feature.xml [deleted file]
features/mdsal/pom.xml [deleted file]
features/odl-clustering-test-app/pom.xml [moved from features/mdsal/odl-clustering-test-app/pom.xml with 66% similarity]
features/odl-clustering-test-app/src/main/feature/feature.xml [new file with mode: 0644]
features/odl-controller-akka/pom.xml [new file with mode: 0644]
features/odl-controller-akka/src/main/history/dependencies.xml [new file with mode: 0644]
features/odl-controller-blueprint/pom.xml [moved from features/benchmark/odl-mdsal-benchmark/pom.xml with 56% similarity]
features/odl-controller-blueprint/src/main/feature/feature.xml [new file with mode: 0644]
features/odl-controller-broker-local/pom.xml [moved from features/mdsal/odl-mdsal-all/pom.xml with 50% similarity]
features/odl-controller-broker-local/src/main/feature/feature.xml [moved from features/mdsal/odl-mdsal-broker-local/src/main/feature/feature.xml with 56% similarity]
features/odl-controller-mdsal-common/pom.xml [moved from features/benchmark/odl-dsbenchmark/pom.xml with 57% similarity]
features/odl-controller-mdsal-common/src/main/feature/feature.xml [new file with mode: 0644]
features/odl-controller-scala/pom.xml [new file with mode: 0644]
features/odl-controller-scala/src/main/history/dependencies.xml [new file with mode: 0644]
features/odl-jolokia/pom.xml [moved from features/extras/odl-jolokia/pom.xml with 62% similarity]
features/odl-jolokia/src/main/feature/feature.xml [new file with mode: 0644]
features/odl-mdsal-benchmark/pom.xml [moved from features/benchmark/odl-ntfbenchmark/pom.xml with 54% similarity]
features/odl-mdsal-broker/pom.xml [moved from features/mdsal/odl-mdsal-remoterpc-connector/pom.xml with 55% similarity]
features/odl-mdsal-broker/src/main/feature/feature.xml [new file with mode: 0644]
features/odl-mdsal-clustering-commons/pom.xml [moved from features/mdsal/odl-mdsal-clustering-commons/pom.xml with 59% similarity]
features/odl-mdsal-clustering-commons/src/main/feature/feature.xml [moved from features/mdsal/odl-mdsal-clustering-commons/src/main/feature/feature.xml with 58% similarity]
features/odl-mdsal-distributed-datastore/pom.xml [new file with mode: 0644]
features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml [moved from features/mdsal/odl-mdsal-broker/src/main/feature/feature.xml with 56% similarity]
features/odl-mdsal-remoterpc-connector/pom.xml [moved from features/benchmark/odl-benchmark-api/pom.xml with 59% similarity]
features/odl-toaster/pom.xml [moved from features/mdsal/odl-toaster/pom.xml with 77% similarity]
features/odl-toaster/src/main/feature/feature.xml [new file with mode: 0644]
features/pom.xml
features/single-feature-parent/pom.xml [moved from features/mdsal/odl-mdsal-clustering/pom.xml with 55% similarity]
jolokia/pom.xml [moved from opendaylight/commons/jolokia/pom.xml with 81% similarity]
jolokia/src/main/resources/org.jolokia.osgi.cfg [moved from opendaylight/commons/jolokia/src/main/resources/org.jolokia.osgi.cfg with 100% similarity]
karaf/pom.xml
opendaylight/archetypes/opendaylight-startup/pom.xml [deleted file]
opendaylight/archetypes/opendaylight-startup/src/main/resources/META-INF/maven/archetype-metadata.xml [deleted file]
opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/README.txt [deleted file]
opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/pom.xml [deleted file]
opendaylight/archetypes/pom.xml [deleted file]
opendaylight/blueprint/pom.xml
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintBundleTracker.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/BlueprintContainerRestartServiceImpl.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractDependentComponentFactoryMetadata.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/BindingContext.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ComponentProcessor.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigDefaultXMLReader.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/DataStoreAppConfigMetadata.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/MandatoryServiceReferenceMetadata.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/OpendaylightNamespaceHandler.java
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcRegistrationConverter.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java [deleted file]
opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticServiceReferenceRecipe.java
opendaylight/blueprint/src/main/resources/opendaylight-blueprint-ext-1.0.0.xsd
opendaylight/blueprint/src/test/java/org/opendaylight/controller/blueprint/tests/DataStoreAppConfigDefaultXMLReaderTest.java
opendaylight/blueprint/src/test/resources/opendaylight-sal-test-store-config.xml
opendaylight/config/config-artifacts/pom.xml [deleted file]
opendaylight/config/netty-config-api/pom.xml [deleted file]
opendaylight/config/netty-event-executor-config/pom.xml [deleted file]
opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java [deleted file]
opendaylight/config/netty-event-executor-config/src/main/resources/OSGI-INF/blueprint/netty-event-executor.xml [deleted file]
opendaylight/config/netty-threadgroup-config/pom.xml [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/NioEventLoopGroupCloseable.java [deleted file]
opendaylight/config/netty-threadgroup-config/src/main/resources/OSGI-INF/blueprint/netty-threadgroup.xml [deleted file]
opendaylight/config/netty-timer-config/pom.xml [deleted file]
opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java [deleted file]
opendaylight/config/netty-timer-config/src/main/resources/OSGI-INF/blueprint/netty-timer.xml [deleted file]
opendaylight/config/pom.xml [deleted file]
opendaylight/config/threadpool-config-api/pom.xml [deleted file]
opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java [deleted file]
opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java [deleted file]
opendaylight/config/threadpool-config-impl/pom.xml [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java [deleted file]
opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java [deleted file]
opendaylight/md-sal/benchmark-data-store/pom.xml [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryBrokerWriteTransactionBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryWriteTransactionBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryBrokerWriteTransactionBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java [deleted file]
opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang [deleted file]
opendaylight/md-sal/cds-access-api/pom.xml
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/ABIVersion.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/AbstractVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/FutureVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/PastVersionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ClosedTransactionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadHistoryException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DeadTransactionException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilder.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/NotLeaderException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfOrderRequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/OutOfSequenceEnvelopeException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PersistenceProtocol.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDataModification.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDelete.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionMerge.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionModification.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponse.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionRequest.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionWrite.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/UnknownHistoryException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Envelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FrontendType.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MemberName.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Message.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Request.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestFailure.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestSuccess.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/Response.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/ResponseEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RetiredGenerationException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RuntimeRequestException.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SliceableMessage.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelope.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java [deleted file]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifier.java
opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/UnsupportedRequestException.java
opendaylight/md-sal/cds-access-api/src/main/yang/odl-controller-cds-types.yang [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/ABIVersionTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbortLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractRequestSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/CommitLocalTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestBuilderTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestEmptyTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessNoDataTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java [new file with mode: 0644]
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/ClientIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/FrontendTypeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/LocalHistoryIdentifierTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/MemberNameTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeTest.java
opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/concepts/TransactionIdentifierTest.java
opendaylight/md-sal/cds-access-client/pom.xml
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActor.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/AveragingProgressTracker.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/BackendInfo.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ClientActorContext.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ConnectionEntry.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InitialClientActorContext.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLock.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/InversibleLockException.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ProgressTracker.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ReconnectForwarder.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnection.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/RecoveringClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/SavingClientActorBehavior.java
opendaylight/md-sal/cds-access-client/src/main/java/org/opendaylight/controller/cluster/access/client/TransmitQueue.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/AbstractClientActorTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/AbstractClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/AbstractTransmitQueueTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/AccessClientUtil.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ActorBehaviorTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ClientActorContextTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectedClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectingClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ConnectionEntryTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/MockedSnapshotStore.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/ReconnectingClientConnectionTest.java
opendaylight/md-sal/cds-access-client/src/test/java/org/opendaylight/controller/cluster/access/client/TransmittingTransmitQueueTest.java
opendaylight/md-sal/cds-dom-api/pom.xml
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSDataTreeProducer.java [deleted file]
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSShardAccess.java [deleted file]
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocation.java
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListener.java
opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java [deleted file]
opendaylight/md-sal/cds-mgmt-api/pom.xml [new file with mode: 0644]
opendaylight/md-sal/cds-mgmt-api/src/main/java/module-info.java [new file with mode: 0644]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/CommitStatsMXBean.java [moved from opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBean.java with 93% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreConfigurationMXBean.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreConfigurationMXBean.java with 87% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBean.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBean.java with 59% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardDataTreeListenerInfoMXBean.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardDataTreeListenerInfoMXBean.java with 77% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMXBean.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMXBean.java with 93% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/mgmt/api/DataTreeListenerInfo.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeListenerInfo.java with 69% similarity]
opendaylight/md-sal/cds-mgmt-api/src/main/java/org/opendaylight/controller/cluster/mgmt/api/FollowerInfo.java [moved from opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/FollowerInfo.java with 71% similarity]
opendaylight/md-sal/eos-dom-akka/pom.xml [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/EOSMain.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/BootstrapCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/GetRunningContext.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/RunningContext.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/Terminate.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/EntityRpcHandler.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/OwnerStateChecker.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/AbstractEntityRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidates.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidatesForEntity.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnerForEntity.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipState.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipStateReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/InternalGetReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/OwnerDataResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/SingleEntityOwnerDataResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/AbstractEntityRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ActivateDataCenter.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/CandidatesChanged.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterActivated.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterDeactivated.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DeactivateDataCenter.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialCandidateSync.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialOwnerSync.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InternalClusterEvent.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberDownEvent.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberReachableEvent.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUnreachableEvent.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUpEvent.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerChanged.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorReply.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorRequest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/AbstractCandidateCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRegistryCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFinished.java [moved from opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/BindingTestUtilities.java with 51% similarity]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InternalUpdateResponse.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RegisterCandidate.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/UnregisterCandidate.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/InitialOwnerSync.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/ListenerCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/OwnerChanged.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerRegistry.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/CandidatesChanged.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/RegisterListener.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TerminateListener.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerRegistryCommand.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/UnregisterListener.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/yang/gen/v1/urn/opendaylight/params/xml/ns/yang/controller/entity/owners/norev/EntityNameBuilder.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/main/yang/odl-akka-eos.yang [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/SingleNodeTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeBaseTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisorTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf [new file with mode: 0644]
opendaylight/md-sal/eos-dom-akka/src/test/resources/simplelogger.properties [new file with mode: 0644]
opendaylight/md-sal/mdsal-it-base/pom.xml
opendaylight/md-sal/mdsal-it-base/src/main/java/org/opendaylight/controller/mdsal/it/base/AbstractMdsalTestBase.java
opendaylight/md-sal/mdsal-it-parent/pom.xml
opendaylight/md-sal/mdsal-trace/api/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/api/src/main/java/org/opendaylight/controller/md/sal/trace/api/TracingDOMDataBroker.java [deleted file]
opendaylight/md-sal/mdsal-trace/api/src/main/resources/initial/mdsaltrace_config.xml [deleted file]
opendaylight/md-sal/mdsal-trace/api/src/main/yang/mdsaltrace.yang [deleted file]
opendaylight/md-sal/mdsal-trace/binding-impl/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/binding-impl/src/main/java/org/opendaylight/controller/md/sal/trace/binding/impl/TracingBindingBrokerWiring.java [deleted file]
opendaylight/md-sal/mdsal-trace/binding-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml [deleted file]
opendaylight/md-sal/mdsal-trace/cli/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/cli/src/main/java/org/opendaylight/controller/md/sal/trace/cli/PrintOpenTransactionsCommand.java [deleted file]
opendaylight/md-sal/mdsal-trace/deploy-site.xml [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/AbstractCloseTracked.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTracked.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistry.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistryReportEntry.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedTrait.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/package-info.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/AbstractTracingWriteTransaction.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingBroker.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadWriteTransaction.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingTransactionChain.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingWriteTransaction.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/CloseTrackedRegistryTest.java [deleted file]
opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/TracingBrokerTest.java [deleted file]
opendaylight/md-sal/mdsal-trace/features/features-mdsal-trace/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/src/main/feature/feature.xml [deleted file]
opendaylight/md-sal/mdsal-trace/features/pom.xml [deleted file]
opendaylight/md-sal/mdsal-trace/pom.xml [deleted file]
opendaylight/md-sal/messagebus-api/pom.xml [deleted file]
opendaylight/md-sal/messagebus-api/src/main/yang/event-aggregator.yang [deleted file]
opendaylight/md-sal/messagebus-api/src/main/yang/event-source.yang [deleted file]
opendaylight/md-sal/messagebus-impl/pom.xml [deleted file]
opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImpl.java [deleted file]
opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopic.java [deleted file]
opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopology.java [deleted file]
opendaylight/md-sal/messagebus-impl/src/main/resources/OSGI-INF/blueprint/messagebus.xml [deleted file]
opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImplTest.java [deleted file]
opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopicTest.java [deleted file]
opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopologyTest.java [deleted file]
opendaylight/md-sal/messagebus-spi/pom.xml [deleted file]
opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSource.java [deleted file]
opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistration.java [deleted file]
opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistry.java [deleted file]
opendaylight/md-sal/messagebus-util/pom.xml [deleted file]
opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotification.java [deleted file]
opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/Util.java [deleted file]
opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotificationTest.java [deleted file]
opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/UtilTest.java [deleted file]
opendaylight/md-sal/parent/pom.xml
opendaylight/md-sal/pom.xml
opendaylight/md-sal/sal-akka-raft-example/pom.xml
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/ExampleRoleChangeListener.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/LogGenerator.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/Main.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KeyValue.java
opendaylight/md-sal/sal-akka-raft/pom.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTracker.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java [deleted file]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/FollowerLogInformation.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/GetSnapshotReplyActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java [changed mode: 0755->0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProvider.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohort.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoveryCohort.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupport.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftVersions.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogEntry.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/TimedRunnable.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/CaptureSnapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java [deleted file]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/Replicate.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNow.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractLeader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifier.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTracker.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/SyncStatusTracker.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/GetSnapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/OnDemandRaftState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/client/messages/Shutdown.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AbstractRaftRPC.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/PersistentPayload.java [moved from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/PersistentPayload.java with 88% similarity]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RemoveServer.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestLeadership.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVote.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReply.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/ServerRemoved.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/EmptyState.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayload.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayload.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/ServerInfo.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntry.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntrySerializer.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/Snapshot.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTerm.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ElectionTermImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/FollowerLogInformationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/LeadershipTransferIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MigratedMessagesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActor.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/NonVotingFollowerIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorContextImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorDelegatingPersistentDataProviderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorLeadershipTransferCohortTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorRecoverySupportTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorServerConfigurationSupportTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorSnapshotMessageSupportTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTestKit.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationSingleNodeTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RecoveryIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/ReplicationWithSlicedPayloadIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/TestActorFactory.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/base/messages/TimeoutNowTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehaviorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/CandidateTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/DelayedMessagesElectionScenarioTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerIdentifierTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/SnapshotTrackerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/client/messages/ShutdownTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/AppendEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshotTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteReplyTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/messages/RequestVoteTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ApplyJournalEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/DeleteEntriesTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/EmptyStateTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/ServerConfigurationPayloadTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SimpleReplicatedLogEntryTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/SnapshotTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/UpdateElectionTermTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemoryJournal.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/InMemorySnapshotStore.java
opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf
opendaylight/md-sal/sal-akka-segmented-journal/pom.xml
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntry.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java [deleted file]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournal.java
opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/SegmentedJournalActor.java
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalSpecTest.java
opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/SegmentedFileJournalTest.java
opendaylight/md-sal/sal-binding-api/.gitignore [deleted file]
opendaylight/md-sal/sal-binding-api/pom.xml [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionProviderService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingTransactionChain.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ClusteredDataTreeChangeListener.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataBroker.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataObjectModification.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeListener.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeIdentifier.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeModification.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingDataBroker.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationPublishService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationRejectedException.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/WriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareConsumer.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareProvider.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBrokerAwareActivator.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareBroker.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareConsumer.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareProvider.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationListener.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationProviderService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationService.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcAvailabilityListener.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcConsumerRegistry.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcProviderRegistry.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/package-info.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcContextIdentifier.java [deleted file]
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcRoutingContext.java [deleted file]
opendaylight/md-sal/sal-binding-broker/pom.xml [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/.gitignore [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AbstractNotificationListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AggregatedNotificationListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/CompositeRoutedRpcRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/DelegatedRootRpcRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/FunctionalNotificationListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceWithInterestListeners.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumRpcProviderRegistry.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/ListenerMapGeneration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationInvoker.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedDataBroker.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingBrokerWiring.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingClusteredDOMDataTreeChangeListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterBuilder.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterLoader.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataBrokerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationPublishServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcAdapterRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcImplementationAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcProviderServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMTransactionChainAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingMountPointAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingRpcAdapterRegistration.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToDOMAdapterFactory.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToNormalizedNodeCodec.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ContextReferenceExtractor.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionProviderServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/DirectGetterRouteContextExtractor.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/GetValueRouteContextExtractor.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDOMRpcResultFuture.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedContainerNode.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedDOMNotification.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/RpcServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterBuilder.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterFactory.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterLoader.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/RootBindingAwareBroker.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/main/resources/OSGI-INF/blueprint/binding-broker.xml [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapterTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingNormalizedCodecTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/ContextExtractorTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BackwardsCompatibleNotificationBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BindingDOMDataBrokerAdapterTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1125RegressionTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1333DataChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1418AugmentationTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug2562DeserializedUnkeyedListTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug3090MultiKeyList.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug4513Test.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedNotificationAdapterTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ListInsertionDataChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/WriteTransactionTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractBaseDataBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractConcurrentDataBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTestCustomizer.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataTreeChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractNotificationBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractSchemaAwareTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AssertCollections.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConcurrentDataBrokerTestCustomizer.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConstantSchemaAbstractDataBrokerTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestCustomizer.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestModule.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/SchemaContextSingleton.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/AbstractDataBrokerTestTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/DataBrokerTestModuleTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AbstractDataServiceTest.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AugmentationVerifier.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarListener.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarUpdate.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/CompositeListener.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FlowDelete.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooListener.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooService.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooUpdate.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/Grouping.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/InheritedContextInput.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObject.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObjectKey.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/SimpleInput.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingBrokerTestFactory.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingTestContext.java [deleted file]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/MockSchemaService.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/pom.xml [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/ConcurrentImplicitCreateTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/WildcardedDataChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/DeleteNestedAugmentationListenParentTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentListenAugmentTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentReadChildTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/package-info.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/BrokerIntegrationTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerMountPointTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerRpcTest.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/DOMRpcServiceTestBugfix560.java [deleted file]
opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/MessageCapturingFlowService.java [deleted file]
opendaylight/md-sal/sal-binding-it/pom.xml
opendaylight/md-sal/sal-binding-it/src/main/java/org/opendaylight/controller/test/sal/binding/it/TestHelper.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractIT.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractTestProvider.java [deleted file]
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/DataServiceIT.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/NotificationIT.java
opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/RoutedServiceIT.java
opendaylight/md-sal/sal-binding-util/pom.xml [deleted file]
opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalConsumerInstance.java [deleted file]
opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalProviderInstance.java [deleted file]
opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/BindingContextUtils.java [deleted file]
opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/TypeSafeDataReader.java [deleted file]
opendaylight/md-sal/sal-cluster-admin-api/pom.xml
opendaylight/md-sal/sal-cluster-admin-api/src/main/yang/cluster-admin.yang
opendaylight/md-sal/sal-cluster-admin-impl/pom.xml
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcService.java
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ShardIdentifier.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-impl/src/main/resources/OSGI-INF/blueprint/cluster-admin.xml [deleted file]
opendaylight/md-sal/sal-cluster-admin-impl/src/test/java/org/opendaylight/controller/cluster/datastore/admin/ClusterAdminRpcServiceTest.java
opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AbstractRpcAction.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/pom.xml
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DataPersistenceProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActorWithMetering.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/Dispatchers.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/FileAkkaConfigurationReader.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MessageTracker.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeNavigator.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/NormalizedNodeVisitor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NodeTypes.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputStreamReader.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeOutputStreamWriter.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/PathArgumentTypes.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypes.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodeBuilderWrapper.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/util/AbstractDataTreeModificationCursor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedByteArray.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedInputStream.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/FileBackedOutputStream.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/InputOutputStreamFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/LZ4InputOutputStreamSupport.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/PlainInputOutputStreamSupport.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/AbortSlicing.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/AssembledMessageState.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageAssembler.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlice.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifier.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSliceReply.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/MessageSlicer.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/SliceOptions.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/messaging/SlicedMessageState.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/LeaderStateChanged.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/notifications/RoleChangeNotifier.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStore.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/RemoteYangTextSourceProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProvider.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImpl.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSchemaSourceSerializationProxy.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MessageTrackerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/QuarantinedMonitorActorTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactoryTest.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamReaderWriterTest.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SampleNormalizedNodeSerializable.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SerializationUtilsTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypesTest.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePrunerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/util/TestModel.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/FileBackedOutputStreamTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/SharedFileBackedOutputStreamTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/AbortSlicingTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageAssemblerTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceIdentifierTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceReplyTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSliceTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/messaging/MessageSlicingIntegrationTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreSpecTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/persistence/LocalSnapshotStoreTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteSchemaProviderTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/RemoteYangTextSourceProviderImplTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/schema/provider/impl/YangTextSourceSerializationProxyTest.java
opendaylight/md-sal/sal-clustering-commons/src/test/resources/LocalSnapshotStoreTest.conf
opendaylight/md-sal/sal-clustering-commons/src/test/resources/odl-ctlr1923.yang [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-config/pom.xml
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/datastore.cfg
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/factory-akka.conf
opendaylight/md-sal/sal-common-api/pom.xml [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFuture.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/RegistrationListener.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCohort.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCoordinator.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitHandler.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataBroker.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadTransaction.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncTransaction.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataReader.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataStoreUnavailableException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataValidationFailedException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/LogicalDatastoreType.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/OptimisticLockFailedException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/ReadFailedException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChain.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainClosedException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainFactory.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainListener.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitFailedException.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationPublishService.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationSubscriptionService.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChange.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangeListener.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangePublisher.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutedRegistration.java [deleted file]
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutingTable.java [deleted file]
opendaylight/md-sal/sal-common-api/src/test/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFutureTest.java [deleted file]
opendaylight/md-sal/sal-common-impl/pom.xml [deleted file]
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationException.java [deleted file]
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationOperation.java [deleted file]
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizer.java [deleted file]
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataSchemaContainerProxy.java [deleted file]
opendaylight/md-sal/sal-common-impl/src/test/resources/normalization-test.yang [deleted file]
opendaylight/md-sal/sal-common-util/pom.xml
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/AbstractMXBean.java
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBean.java [deleted file]
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBeanImpl.java [deleted file]
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerStats.java [deleted file]
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStats.java
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/Arguments.java [deleted file]
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/NoopAutoCloseable.java [deleted file]
opendaylight/md-sal/sal-connector-api/pom.xml [deleted file]
opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareRpcRouter.java [deleted file]
opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareZeroMqRpcRouter.java [deleted file]
opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/ConnectorListener.java [deleted file]
opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/RpcRouter.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/pom.xml
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/impl/ActorSystemProviderImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/QuarantinedMonitorActorPropsFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractShardedTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ClientBackedWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/CommitStatsMXBeanImpl.java [moved from opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImpl.java with 81% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMStoreThreePhaseCommitCohortAdaptor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandle.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/BouncingReconnectForwarder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DataStoreClient.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/GetClientRequest.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ProxyHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ShardBackendInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleDataStoreClientBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/SimpleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/VotingFuture.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeNotificationPublisherActorProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ClusterWrapperImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DOMDataTreeCandidateTO.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataStoreVersions.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorRegistry.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortRegistrationProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreConfigurationMXBeanImpl.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreConfigurationMXBeanImpl.java with 76% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospectorFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreInfoMXBeanImpl.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBeanImpl.java with 59% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreSnapshotRestore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreContextIntrospectorFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreSnapshotRestore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultShardDataTreeChangeListenerPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeChangeListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreInterface.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendClientMetadataBuilder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendHistoryMetadataBuilder.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadOnlyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/FrontendTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderFrontendState.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderLocalDelegateFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDatastoreContextIntrospectorFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OnDemandShardStateCache.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/OnDemandShardStateCache.java with 90% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadOnlyShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ReadWriteShardDataTreeTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangeListenerPublisherActorProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeChangePublisherActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeListenerInfoMXBeanImpl.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardDataTreeListenerInfoMXBeanImpl.java with 55% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisher.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeNotificationPublisherActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTransactionParent.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardStats.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java with 87% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionActorFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionMessageRetrySupport.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/StandaloneFrontendHistory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionType.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/compat/LegacyDOMStoreAdapter.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/AbstractModuleShardConfigProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/Configuration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/FileModuleShardConfigProvider.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/HybridModuleShardConfigProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ModuleConfig.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ModuleShardConfigProvider.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ModuleShardConfiguration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/PrefixShardConfiguration.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ShardConfig.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnerChangeListener.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListener.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipCandidateRegistration.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipService.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListener.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnersModel.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipChangePublisher.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupport.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShard.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardCommitCoordinator.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatistics.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateAdded.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateRemoved.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterCandidateLocal.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterListenerLocal.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RemoveAllCandidates.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/SelectOwner.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterCandidateLocal.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterListenerLocal.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/AbstractEntityOwnerSelectionStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfig.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReader.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/FirstCandidateSelectionStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/exceptions/NoShardLeaderException.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardMBeanFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractRead.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ActorInitialized.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AddPrefixShardReplica.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AddShardReplica.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ChangeShardMembersVotingStatus.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeNotificationListenerRegistrationReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateShard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/FindPrimary.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/FlipShardMembersVotingStatus.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClientsReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/LocalPrimaryShardFound.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerDown.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerUp.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PersistAbortTransactionPayload.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PrimaryShardInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadData.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeNotificationListenerReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RemoteFindPrimary.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RemovePrefixShardReplica.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RemoveShardReplica.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ShardLeaderStateChanged.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/UpdateSchemaContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/VersionedExternalizableMessage.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/CompositeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MergeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/Modification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/modification/WriteModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionException.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractVersionedShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CreateLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DataTreeCandidateInputOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DatastoreSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DeletedDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DisableTrackingPayload.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendClientMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendHistoryMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendShardDataTreeSnapshotMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MetadataShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ModifiedDataTreeCandidateNode.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PayloadVersion.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeLocalHistoryPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeTransactionPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardDataTreeSnapshotMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardManagerSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ShardSnapshotState.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayload.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/AbstractShardManagerCreator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/AtomicShardContextProvider.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardInformation.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerInfo.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardPeerAddressResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/SwitchShardBehavior.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/DefaultShardStrategy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/ModuleShardStrategy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/PrefixShardStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/ShardStrategy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/ShardStrategyFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/AbstractBatchedModificationsCursor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtils.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ClusterUtils.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/DataTreeModificationOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ImmutableUnsignedLongSet.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/MutableUnsignedLongSet.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/NormalizedNodeAggregator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/NormalizedNodeXMLOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/PrimaryShardInfoFutureCache.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/PruningDataTreeModification.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmap.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongRangeSet.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSet.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImpl.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DOMDataTreeShardCreationFailedException.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardChangePublisher.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontend.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModification.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationContext.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationCursor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactoryBuilder.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/LookupTask.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigUpdateHandler.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigWriter.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyProducer.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyTransaction.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardedDataTreeActor.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardingServiceAddressResolver.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/InitConfigListener.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/LookupPrefixShard.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerCreated.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerRemoved.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardCreated.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemovalLookup.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemoved.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerCreated.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerRemoved.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/StartConfigShardLookup.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/OSGI-INF/blueprint/clustered-datastore.xml [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/distributed-datastore-provider.yang
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/entity-owners.yang [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/prefix-shard-configuration.yang [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedReadWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedTransactionChainTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ClientBackedWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/CommitStatsMXBeanImplTest.java [moved from opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java with 85% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBrokerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/TestClientBackedDataStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHandleTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientLocalHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursorTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/DirectTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/DistributedDataStoreClientBehaviorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/EmptyTransactionCommitCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadOnlyProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/LocalReadWriteProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolverTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/RemoteProxyTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/SingleClientHistoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/TestUtils.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/TransactionTester.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractDistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractShardManagerTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupportTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataTreeCohortIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextContextPropertiesUpdaterTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospectorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreContextTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DatastoreSnapshotRestoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreRemotingIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreWithSegmentedJournalIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListenerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/FrontendReadWriteTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/IntegrationTestKit.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/JsonExportTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalShardStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/MemberNode.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/OperationCallback.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationCallback.java with 62% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxyTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeMocking.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardDataTreeTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinatorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardStatsTest.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsTest.java with 70% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTestKit.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohortTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestShard.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java with 82% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapperTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java with 90% similarity]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallbackTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/actors/DataTreeNotificationListenerRegistrationActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/actors/ShardSnapshotActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplBaseTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplFileTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplHybridTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractClusterRefEntityOwnershipTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnershipTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipServiceTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActorTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupportTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatisticsTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReaderTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LastCandidateSelectionStrategy.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategyTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChainTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadDataTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadyTransactionReplyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/DeleteModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/MergeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/MutableCompositeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/WriteModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/AbortTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/AbstractIdentifiablePayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CloseLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CommitTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/CreateLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendShardDataTreeSnapshotMetadataTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeLocalHistoryPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/PurgeTransactionPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardDataTreeSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardManagerSnapshotTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/ShardSnapshotStateTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayloadTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerGetSnapshotReplyActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardPeerAddressResolverTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/TestShardManager.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardstrategy/ModuleShardStrategyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardstrategy/ShardStrategyFactoryTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/ActorUtilsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockClusterWrapper.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockConfiguration.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/NormalizedNodeAggregatorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/PruningDataTreeModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/TransactionRateLimiterTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmapTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSetTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImplTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontendTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActorTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/CarsModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/CompositeModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/PeopleModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/SchemaContextHelper.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/md/cluster/datastore/model/TestModel.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedJournalExport.json [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedSnapshotExport.json [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1-and-2.conf [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1.conf [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/segmented.conf
opendaylight/md-sal/sal-dom-api/.gitignore [deleted file]
opendaylight/md-sal/sal-dom-api/pom.xml [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/ClusteredDOMDataTreeChangeListener.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionProviderService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBroker.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBrokerExtension.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeListener.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeCommitCohortRegistry.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeIdentifier.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMEvent.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMExtensibleService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotification.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationListener.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationPublishService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationRejectedException.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcAvailabilityListener.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcException.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcIdentifier.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementation.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationNotAvailableException.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationRegistration.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcProviderService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcResult.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMServiceExtension.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DefaultDOMRpcException.java [deleted file]
opendaylight/md-sal/sal-dom-broker/pom.xml [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMDataBroker.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMRpcRoutingTableEntry.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/CommitCoordinationTask.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouter.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterEvent.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouter.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRoutingTable.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/GlobalDOMRpcRoutingTableEntry.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongDataBroker.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/RoutedDOMRpcRoutingTableEntry.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/SerializedDOMDataBroker.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/TransactionCommitFailedExceptionMapper.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/UnknownDOMRpcRoutingTableEntry.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/LegacyShardedDOMDataBrokerAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/SchemaContextProviders.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/util/ProxySchemaContext.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/resources/OSGI-INF/blueprint/dom-broker.xml [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/BlockingTransactionChainListener.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerPerformanceTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataTreeListenerTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransactionTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouterTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMTransactionChainTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransactionTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChainTest.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/test/resources/odl-datastore-test.yang [deleted file]
opendaylight/md-sal/sal-dom-compat/pom.xml [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/AbstractDOMRpcResultFutureAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataBrokerAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeChangeServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMNotificationServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMRpcServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreThreePhaseCommitCohortAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreTransactionChainAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreWriteTransactionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMTransactionChainAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMActionServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMNotificationServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcResultFutureAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcServiceAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/MdsalDOMRpcResultFutureAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/ReadFailedExceptionAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/RpcAvailabilityListenerAdapter.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/TransactionUtils.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/package-info.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapterTest.java [deleted file]
opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapterTest.java [deleted file]
opendaylight/md-sal/sal-dom-spi/pom.xml [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/rpc/RpcRoutingStrategy.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMDataTreeChangeListenerRegistration.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcImplementationRegistration.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcProviderService.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListener.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListenerRegistry.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DefaultDOMRpcResult.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataBroker.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadOnlyTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationPublishService.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationService.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcImplementation.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcProviderService.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcResult.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcService.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTreeChangePublisher.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractSnapshotBackedTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStore.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionChain.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionFactory.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTreeChangePublisher.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedTransactions.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedWriteTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/package-info.java [deleted file]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/statistics/DOMStoreStatsTracker.java [deleted file]
opendaylight/md-sal/sal-dummy-distributed-datastore/pom.xml
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/java/org/opendaylight/controller/dummy/datastore/DummyShardManager.java
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-2.conf
opendaylight/md-sal/sal-dummy-distributed-datastore/src/main/resources/member-3.conf
opendaylight/md-sal/sal-inmemory-datastore/pom.xml [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreConfigProperties.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMStoreTreeChangePublisher.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/AbstractDataTreeChangeListenerTest.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DatastoreTestTask.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DefaultDataTreeChangeListenerTestSuite.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDataStoreTest.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/RootScopeSubtreeTest.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/SchemaUpdateForTransactionTest.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeBaseTest.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/test/resources/odl-datastore-test.yang [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/pom.xml
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteFuture.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteImplementation.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OSGiRemoteOpsProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsInvoker.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsListener.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java with 59% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsManager.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsRegistrar.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteActionImplementation.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionException.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionFuture.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMRpcFuture.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderConfig.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfig.java with 78% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementation.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProvider.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcErrorsException.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcInvoker.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcManager.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcRegistrar.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractExecute.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractResponse.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ActionResponse.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteAction.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteRpc.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/RpcResponse.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/AbstractRoutingTable.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistry.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRoutingTable.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RoutingTable.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreAccess.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreActor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/GossipEnvelope.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/LocalBucket.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/AbstractRegistryMXBean.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBean.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteRpcRegistryMXBean.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteRpcRegistryMXBeanImpl.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/OSGI-INF/blueprint/remote-rpc.xml [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/AbstractOpsTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/AbstractRpcTest.java with 66% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsBrokerTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcBrokerTest.java with 78% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsListenerTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcListenerTest.java with 53% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsRegistrarTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsImplementationTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderConfigTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfigTest.java with 60% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactoryTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderTest.java with 72% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementationTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactoryTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcErrorsExceptionTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcRegistrarTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteOpsTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteRpcTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/OpsResponseTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/RpcResponseTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistryTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/GossiperTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImplTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteRpcRegistryMXBeanImplTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf
opendaylight/md-sal/sal-test-model/pom.xml
opendaylight/md-sal/sal-test-model/src/main/java/org/opendaylight/controller/md/sal/test/model/util/ListsBindingUtils.java
opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/pom.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractRpcAction.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/DefaultInstanceIdentifierSupport.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/InstanceIdentifierSupport.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterCommitCohortCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterLoggingDtclCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterOwnershipCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StopStressTestCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StressTestCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterCommitCohortCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterLoggingDtclsCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterOwnershipCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/purchase/BuyCarCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/AddShardReplicaCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/CheckPublishNotificationsCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/IsClientAbortedCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterBoundConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterDefaultConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterFlappingSingletonCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterSingletonConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RemoveShardReplicaCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/ShutdownShardReplicaCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/StartPublishNotificationsCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDdtlCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDtclCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeYnlCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterBoundConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterDefaultConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterFlappingSingletonCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterSingletonConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDdtlCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDtclCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeYnlCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/WriteTransactionsCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/people/AddPersonCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/rpc/test/BasicGlobalCommand.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/pom.xml
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/odl-mdsal-lowlevel-control.yang
opendaylight/md-sal/samples/clustering-test-app/pom.xml
opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/CarBoughtListener.java [moved from opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java with 50% similarity]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/AddPersonImpl.java [moved from opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java with 51% similarity]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/BasicRpcTestProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarDataTreeChangeListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarEntryDataTreeCommitCohort.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/CarProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/MdsalLowLevelTestProvider.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/AbstractTransactionHandler.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/FinalizableScheduledExecutorService.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/FlappingSingletonService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/GetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsDOMDataTreeLIstener.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixLeaderHandler.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixShardHandler.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/ProduceTransactionsHandler.java [deleted file]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PublishNotificationsTask.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/RoutedGetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/SingletonGetConstantService.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/WriteTransactionsHandler.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/YnlListener.java
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml [deleted file]
opendaylight/md-sal/samples/pom.xml
opendaylight/md-sal/samples/toaster-consumer/pom.xml
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/sample/kitchen/api/KitchenService.java
opendaylight/md-sal/samples/toaster-consumer/src/main/java/org/opendaylight/controller/sample/kitchen/impl/KitchenServiceImpl.java
opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml [deleted file]
opendaylight/md-sal/samples/toaster-it/pom.xml
opendaylight/md-sal/samples/toaster-it/src/test/java/org/opendaylight/controller/sample/toaster/it/ToasterTest.java
opendaylight/md-sal/samples/toaster-provider/pom.xml
opendaylight/md-sal/samples/toaster-provider/src/main/java/org/opendaylight/controller/sample/toaster/provider/OpendaylightToaster.java
opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml [deleted file]
opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang [deleted file]
opendaylight/md-sal/samples/toaster-provider/src/test/java/org/opendaylight/controller/sample/toaster/provider/OpenDaylightToasterTest.java
opendaylight/md-sal/samples/toaster/pom.xml
opendaylight/model/model-inventory/pom.xml [deleted file]
opendaylight/model/model-inventory/src/main/yang/opendaylight-inventory.yang [deleted file]
opendaylight/model/model-topology/pom.xml [deleted file]
opendaylight/model/model-topology/src/main/yang/opendaylight-topology-inventory.yang [deleted file]
opendaylight/model/model-topology/src/main/yang/opendaylight-topology-view.yang [deleted file]
opendaylight/model/model-topology/src/main/yang/opendaylight-topology.yang [deleted file]
opendaylight/model/pom.xml [deleted file]
pom.xml
tox.ini

diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644 (file)
index 0000000..48b1206
--- /dev/null
@@ -0,0 +1,21 @@
+# .readthedocs.yml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.11"
+  jobs:
+    post_checkout:
+      - git fetch --unshallow || true
+
+sphinx:
+  configuration: docs/conf.py
+
+python:
+  install:
+    - requirements: docs/requirements.txt
diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644 (file)
index 0000000..229dd9d
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,68 @@
+---
+project: 'controller'
+project_creation_date: '2015-01-08'
+project_category: ''
+lifecycle_state: 'Incubation'
+project_lead: &odl_controller_ptl
+    name: 'Robert Varga'
+    email: 'nite@hq.sk'
+    company: 'hq'
+    id: 'rovarga'
+    timezone: 'Unknown/Unknown'
+primary_contact: *odl_controller_ptl
+issue_tracking:
+    type: 'jira'
+    url: 'https://jira.opendaylight.org/projects/'
+    key: 'controller'
+mailing_list:
+    type: 'groups.io'
+    url: 'kernel-dev@lists.opendaylight.org'
+    tag: '[]'
+realtime_discussion:
+    type: 'irc'
+    server: 'freenode.net'
+    channel: '#opendaylight'
+meetings:
+    - type: 'gotomeeting+irc'
+      agenda: 'https://wiki.opendaylight.org'
+      url: ''
+      server: 'freenode.net'
+      channel: '#opendaylight'
+      repeats: ''
+      time: ''
+repositories:
+    - controller
+committers:
+    - <<: *odl_controller_ptl
+    - name: 'Stephen Kitt'
+      email: 'skitt@redhat.com'
+      company: 'Redhat'
+      id: 'skitt'
+      timezone: 'Unknown/Unknown'
+    - name: 'Tom Pantelis'
+      email: 'tompantelis@gmail.com'
+      company: ''
+      id: 'tpantelis'
+      timezone: 'Unknown/Unknown'
+    - name: 'Ed Warnicke'
+      email: 'hagbard@gmail.com'
+      company: ''
+      id: 'hagbard'
+      timezone: 'Unknown/Unknown'
+    - name: 'Michael Vorburger'
+      email: 'mike@vorburger.ch'
+      company: 'vorburger'
+      id: 'vorburger'
+      timezone: 'Unknown/Unknown'
+    - name: 'Anil Vishnoi'
+      email: 'avishnoi@redhat.com'
+      company: 'Redhat'
+      id: 'Avishnoi'
+      timezone: 'Unknown/Unknown'
+tsc:
+    # yamllint disable rule:line-length
+    approval: 'https://meetings.opendaylight.org/opendaylight-meeting/2015/tsc/opendaylight-meeting-tsc.2015-01-08-18.00.txt'
+    changes:
+        - type: ''
+          name: ''
+          link: ''
similarity index 56%
rename from features/config-netty/pom.xml
rename to akka/pom.xml
index 5203030cf3739905c09a79fbe1b336447b70ca19..afd11d74106310b8f6d9a518096119dd45203779 100644 (file)
@@ -1,34 +1,32 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
 
  This program and the accompanying materials are made available under the
  terms of the Eclipse Public License v1.0 which accompanies this distribution,
  and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
+-->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-config-netty-aggregator</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
+    <artifactId>akka-aggregator</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
+    <properties>
+        <maven.deploy.skip>true</maven.deploy.skip>
+        <maven.install.skip>true</maven.install.skip>
+    </properties>
+
     <modules>
-        <module>features-config-netty</module>
-        <module>odl-config-netty</module>
+        <module>repackaged-akka-jar</module>
+        <module>repackaged-akka</module>
     </modules>
-
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-    </scm>
 </project>
diff --git a/akka/repackaged-akka-jar/pom.xml b/akka/repackaged-akka-jar/pom.xml
new file mode 100644 (file)
index 0000000..6c62c5d
--- /dev/null
@@ -0,0 +1,139 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.opendaylight.odlparent</groupId>
+        <artifactId>odlparent</artifactId>
+        <version>13.0.11</version>
+        <relativePath/>
+    </parent>
+
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>repackaged-akka-jar</artifactId>
+    <packaging>jar</packaging>
+    <version>9.0.3-SNAPSHOT</version>
+    <name>${project.artifactId}</name>
+
+    <properties>
+        <!-- We do not want to leak this artifact -->
+        <maven.deploy.skip>true</maven.deploy.skip>
+    </properties>
+
+    <dependencies>
+        <!-- Note: when bumping versions, make sure to update configurations in src/main/resources -->
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-actor_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-actor-typed_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-cluster_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-cluster-typed_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-osgi_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-persistence_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-protobuf_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-remote_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-slf4j_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-stream_2.13</artifactId>
+            <version>2.6.21</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>unpack-license</id>
+                        <configuration>
+                            <!-- Akka is Apache-2.0 licensed -->
+                            <skip>true</skip>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-shade-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>shade</goal>
+                        </goals>
+                        <configuration>
+                            <createDependencyReducedPom>false</createDependencyReducedPom>
+                            <shadeSourcesContent>true</shadeSourcesContent>
+                            <createSourcesJar>true</createSourcesJar>
+                            <promoteTransitiveDependencies>true</promoteTransitiveDependencies>
+                            <artifactSet>
+                                <includes>
+                                    <include>com.typesafe.akka</include>
+                                </includes>
+                            </artifactSet>
+                            <filters>
+                                <filter>
+                                    <artifact>com.typesafe.akka:*</artifact>
+                                    <excludes>
+                                        <exclude>META-INF/MANIFEST.MF</exclude>
+                                        <exclude>reference.conf</exclude>
+                                    </excludes>
+                                </filter>
+                            </filters>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-source-plugin</artifactId>
+                <configuration>
+                    <!-- We handle this through shade plugin -->
+                    <skipSource>true</skipSource>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/akka/repackaged-akka-jar/src/main/resources/LICENSE b/akka/repackaged-akka-jar/src/main/resources/LICENSE
new file mode 100644 (file)
index 0000000..c7d5a56
--- /dev/null
@@ -0,0 +1,212 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+---------------
+
+Licenses for dependency projects can be found here:
+[http://akka.io/docs/akka/snapshot/project/licenses.html]
+
+---------------
+
+akka-protobuf contains the sources of Google protobuf 2.5.0 runtime support,
+moved into the source package `akka.protobuf` so as to avoid version conflicts.
+For license information see COPYING.protobuf
diff --git a/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf b/akka/repackaged-akka-jar/src/main/resources/actor_reference.conf
new file mode 100644 (file)
index 0000000..d41cb39
--- /dev/null
@@ -0,0 +1,1351 @@
+####################################
+# Akka Actor Reference Config File #
+####################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# Akka version, checked against the runtime version of Akka. Loaded from generated conf file.
+include "version"
+
+akka {
+  # Home directory of Akka, modules in the deploy directory will be loaded
+  home = ""
+
+  # Loggers to register at boot time (akka.event.Logging$DefaultLogger logs
+  # to STDOUT)
+  loggers = ["akka.event.Logging$DefaultLogger"]
+
+  # Filter of log events that is used by the LoggingAdapter before
+  # publishing log events to the eventStream. It can perform
+  # fine grained filtering based on the log source. The default
+  # implementation filters on the `loglevel`.
+  # FQCN of the LoggingFilter. The Class of the FQCN must implement
+  # akka.event.LoggingFilter and have a public constructor with
+  # (akka.actor.ActorSystem.Settings, akka.event.EventStream) parameters.
+  logging-filter = "akka.event.DefaultLoggingFilter"
+
+  # Specifies the default loggers dispatcher
+  loggers-dispatcher = "akka.actor.default-dispatcher"
+
+  # Loggers are created and registered synchronously during ActorSystem
+  # start-up, and since they are actors, this timeout is used to bound the
+  # waiting time
+  logger-startup-timeout = 5s
+
+  # Log level used by the configured loggers (see "loggers") as soon
+  # as they have been started; before that, see "stdout-loglevel"
+  # Options: OFF, ERROR, WARNING, INFO, DEBUG
+  loglevel = "INFO"
+
+  # Log level for the very basic logger activated during ActorSystem startup.
+  # This logger prints the log messages to stdout (System.out).
+  # Options: OFF, ERROR, WARNING, INFO, DEBUG
+  stdout-loglevel = "WARNING"
+
+  # Log the complete configuration at INFO level when the actor system is started.
+  # This is useful when you are uncertain of what configuration is used.
+  log-config-on-start = off
+
+  # Log at info level when messages are sent to dead letters, or published to
+  # eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`.
+  # Possible values:
+  # on: all dead letters are logged
+  # off: no logging of dead letters
+  # n: positive integer, number of dead letters that will be logged
+  log-dead-letters = 10
+
+  # Possibility to turn off logging of dead letters while the actor system
+  # is shutting down. Logging is only done when enabled by 'log-dead-letters'
+  # setting.
+  log-dead-letters-during-shutdown = off
+
+  # When log-dead-letters is enabled, this will re-enable the logging after configured duration.
+  # infinite: suspend the logging forever;
+  # or a duration (eg: 5 minutes), after which the logging will be re-enabled.
+  log-dead-letters-suspend-duration = 5 minutes
+
+  # List FQCN of extensions which shall be loaded at actor system startup.
+  # Library extensions are regular extensions that are loaded at startup and are
+  # available for third party library authors to enable auto-loading of extensions when
+  # present on the classpath. This is done by appending entries:
+  # 'library-extensions += "Extension"' in the library `reference.conf`.
+  #
+  # Should not be set by end user applications in 'application.conf', use the extensions property for that
+  #
+  library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"]
+
+  # List FQCN of extensions which shall be loaded at actor system startup.
+  # Should be on the format: 'extensions = ["foo", "bar"]' etc.
+  # See the Akka Documentation for more info about Extensions
+  extensions = []
+
+  # Toggles whether threads created by this ActorSystem should be daemons or not
+  daemonic = off
+
+  # JVM shutdown, System.exit(-1), in case of a fatal error,
+  # such as OutOfMemoryError
+  jvm-exit-on-fatal-error = on
+
+  # Akka installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will
+  # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`.
+  # This property is related to `akka.coordinated-shutdown.run-by-jvm-shutdown-hook` below.
+  # This property makes it possible to disable all such hooks if the application itself
+  # or a higher level framework such as Play prefers to install the JVM shutdown hook and
+  # terminate the ActorSystem itself, with or without using CoordinatedShutdown.
+  jvm-shutdown-hooks = on
+
+  # Version must be the same across all modules and if they are different the startup
+  # will fail. It's possible but not recommended, to disable this check, and only log a warning,
+  # by setting this property to `off`.
+  fail-mixed-versions = on
+
+  # Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running
+  # on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops
+  # with this toggle.
+  java-flight-recorder {
+    enabled = true
+  }
+
+  actor {
+
+    # Either one of "local", "remote" or "cluster" or the
+    # FQCN of the ActorRefProvider to be used; the below is the built-in default,
+    # note that "remote" and "cluster" requires the akka-remote and akka-cluster
+    # artifacts to be on the classpath.
+    provider = "local"
+
+    # The guardian "/user" will use this class to obtain its supervisorStrategy.
+    # It needs to be a subclass of akka.actor.SupervisorStrategyConfigurator.
+    # In addition to the default there is akka.actor.StoppingSupervisorStrategy.
+    guardian-supervisor-strategy = "akka.actor.DefaultSupervisorStrategy"
+
+    # Timeout for Extension creation and a few other potentially blocking
+    # initialization tasks.
+    creation-timeout = 20s
+
+    # Serializes and deserializes (non-primitive) messages to ensure immutability,
+    # this is only intended for testing.
+    serialize-messages = off
+
+    # Serializes and deserializes creators (in Props) to ensure that they can be
+    # sent over the network, this is only intended for testing. Purely local deployments
+    # as marked with deploy.scope == LocalScope are exempt from verification.
+    serialize-creators = off
+
+    # If serialize-messages or serialize-creators are enabled classes that starts with
+    # a prefix listed here are not verified.
+    no-serialization-verification-needed-class-prefix = ["akka."]
+
+    # Timeout for send operations to top-level actors which are in the process
+    # of being started. This is only relevant if using a bounded mailbox or the
+    # CallingThreadDispatcher for a top-level actor.
+    unstarted-push-timeout = 10s
+
+    # TypedActor deprecated since 2.6.0.
+    typed {
+      # Default timeout for the deprecated TypedActor (not the new actor APIs in 2.6)
+      # methods with non-void return type.
+      timeout = 5s
+    }
+
+    # Mapping between ´deployment.router' short names to fully qualified class names
+    router.type-mapping {
+      from-code = "akka.routing.NoRouter"
+      round-robin-pool = "akka.routing.RoundRobinPool"
+      round-robin-group = "akka.routing.RoundRobinGroup"
+      random-pool = "akka.routing.RandomPool"
+      random-group = "akka.routing.RandomGroup"
+      balancing-pool = "akka.routing.BalancingPool"
+      smallest-mailbox-pool = "akka.routing.SmallestMailboxPool"
+      broadcast-pool = "akka.routing.BroadcastPool"
+      broadcast-group = "akka.routing.BroadcastGroup"
+      scatter-gather-pool = "akka.routing.ScatterGatherFirstCompletedPool"
+      scatter-gather-group = "akka.routing.ScatterGatherFirstCompletedGroup"
+      tail-chopping-pool = "akka.routing.TailChoppingPool"
+      tail-chopping-group = "akka.routing.TailChoppingGroup"
+      consistent-hashing-pool = "akka.routing.ConsistentHashingPool"
+      consistent-hashing-group = "akka.routing.ConsistentHashingGroup"
+    }
+
+    deployment {
+
+      # deployment id pattern - on the format: /parent/child etc.
+      default {
+
+        # The id of the dispatcher to use for this actor.
+        # If undefined or empty the dispatcher specified in code
+        # (Props.withDispatcher) is used, or default-dispatcher if not
+        # specified at all.
+        dispatcher = ""
+
+        # The id of the mailbox to use for this actor.
+        # If undefined or empty the default mailbox of the configured dispatcher
+        # is used or if there is no mailbox configuration the mailbox specified
+        # in code (Props.withMailbox) is used.
+        # If there is a mailbox defined in the configured dispatcher then that
+        # overrides this setting.
+        mailbox = ""
+
+        # routing (load-balance) scheme to use
+        # - available: "from-code", "round-robin", "random", "smallest-mailbox",
+        #              "scatter-gather", "broadcast"
+        # - or:        Fully qualified class name of the router class.
+        #              The class must extend akka.routing.CustomRouterConfig and
+        #              have a public constructor with com.typesafe.config.Config
+        #              and optional akka.actor.DynamicAccess parameter.
+        # - default is "from-code";
+        # Whether or not an actor is transformed to a Router is decided in code
+        # only (Props.withRouter). The type of router can be overridden in the
+        # configuration; specifying "from-code" means that the values specified
+        # in the code shall be used.
+        # In case of routing, the actors to be routed to can be specified
+        # in several ways:
+        # - nr-of-instances: will create that many children
+        # - routees.paths: will route messages to these paths using ActorSelection,
+        #   i.e. will not create children
+        # - resizer: dynamically resizable number of routees as specified in
+        #   resizer below
+        router = "from-code"
+
+        # number of children to create in case of a router;
+        # this setting is ignored if routees.paths is given
+        nr-of-instances = 1
+
+        # within is the timeout used for routers containing future calls
+        within = 5 seconds
+
+        # number of virtual nodes per node for consistent-hashing router
+        virtual-nodes-factor = 10
+
+        tail-chopping-router {
+          # interval is duration between sending message to next routee
+          interval = 10 milliseconds
+        }
+
+        routees {
+          # Alternatively to giving nr-of-instances you can specify the full
+          # paths of those actors which should be routed to. This setting takes
+          # precedence over nr-of-instances
+          paths = []
+        }
+
+        # To use a dedicated dispatcher for the routees of the pool you can
+        # define the dispatcher configuration inline with the property name
+        # 'pool-dispatcher' in the deployment section of the router.
+        # For example:
+        # pool-dispatcher {
+        #   fork-join-executor.parallelism-min = 5
+        #   fork-join-executor.parallelism-max = 5
+        # }
+
+        # Routers with dynamically resizable number of routees; this feature is
+        # enabled by including (parts of) this section in the deployment
+        resizer {
+
+          enabled = off
+
+          # The fewest number of routees the router should ever have.
+          lower-bound = 1
+
+          # The most number of routees the router should ever have.
+          # Must be greater than or equal to lower-bound.
+          upper-bound = 10
+
+          # Threshold used to evaluate if a routee is considered to be busy
+          # (under pressure). Implementation depends on this value (default is 1).
+          # 0:   number of routees currently processing a message.
+          # 1:   number of routees currently processing a message has
+          #      some messages in mailbox.
+          # > 1: number of routees with at least the configured pressure-threshold
+          #      messages in their mailbox. Note that estimating mailbox size of
+          #      default UnboundedMailbox is O(N) operation.
+          pressure-threshold = 1
+
+          # Percentage to increase capacity whenever all routees are busy.
+          # For example, 0.2 would increase 20% (rounded up), i.e. if current
+          # capacity is 6 it will request an increase of 2 more routees.
+          rampup-rate = 0.2
+
+          # Minimum fraction of busy routees before backing off.
+          # For example, if this is 0.3, then we'll remove some routees only when
+          # less than 30% of routees are busy, i.e. if current capacity is 10 and
+          # 3 are busy then the capacity is unchanged, but if 2 or less are busy
+          # the capacity is decreased.
+          # Use 0.0 or negative to avoid removal of routees.
+          backoff-threshold = 0.3
+
+          # Fraction of routees to be removed when the resizer reaches the
+          # backoffThreshold.
+          # For example, 0.1 would decrease 10% (rounded up), i.e. if current
+          # capacity is 9 it will request an decrease of 1 routee.
+          backoff-rate = 0.1
+
+          # Number of messages between resize operation.
+          # Use 1 to resize before each message.
+          messages-per-resize = 10
+        }
+
+        # Routers with dynamically resizable number of routees based on
+        # performance metrics.
+        # This feature is enabled by including (parts of) this section in
+        # the deployment, cannot be enabled together with default resizer.
+        optimal-size-exploring-resizer {
+
+          enabled = off
+
+          # The fewest number of routees the router should ever have.
+          lower-bound = 1
+
+          # The most number of routees the router should ever have.
+          # Must be greater than or equal to lower-bound.
+          upper-bound = 10
+
+          # probability of doing a ramping down when all routees are busy
+          # during exploration.
+          chance-of-ramping-down-when-full = 0.2
+
+          # Interval between each resize attempt
+          action-interval = 5s
+
+          # If the routees have not been fully utilized (i.e. all routees busy)
+          # for such length, the resizer will downsize the pool.
+          downsize-after-underutilized-for = 72h
+
+          # Duration exploration, the ratio between the largest step size and
+          # current pool size. E.g. if the current pool size is 50, and the
+          # explore-step-size is 0.1, the maximum pool size change during
+          # exploration will be +- 5
+          explore-step-size = 0.1
+
+          # Probability of doing an exploration v.s. optimization.
+          chance-of-exploration = 0.4
+
+          # When downsizing after a long streak of underutilization, the resizer
+          # will downsize the pool to the highest utiliziation multiplied by a
+          # a downsize ratio. This downsize ratio determines the new pools size
+          # in comparison to the highest utilization.
+          # E.g. if the highest utilization is 10, and the down size ratio
+          # is 0.8, the pool will be downsized to 8
+          downsize-ratio = 0.8
+
+          # When optimizing, the resizer only considers the sizes adjacent to the
+          # current size. This number indicates how many adjacent sizes to consider.
+          optimization-range = 16
+
+          # The weight of the latest metric over old metrics when collecting
+          # performance metrics.
+          # E.g. if the last processing speed is 10 millis per message at pool
+          # size 5, and if the new processing speed collected is 6 millis per
+          # message at pool size 5. Given a weight of 0.3, the metrics
+          # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis
+          # Obviously, this number should be between 0 and 1.
+          weight-of-latest-metric = 0.5
+        }
+      }
+
+      "/IO-DNS/inet-address" {
+        mailbox = "unbounded"
+        router = "consistent-hashing-pool"
+        nr-of-instances = 4
+      }
+
+      "/IO-DNS/inet-address/*" {
+        dispatcher = "akka.actor.default-blocking-io-dispatcher"
+      }
+
+      "/IO-DNS/async-dns" {
+        mailbox = "unbounded"
+        router = "round-robin-pool"
+        nr-of-instances = 1
+      }
+    }
+
+    default-dispatcher {
+      # Must be one of the following
+      # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting
+      # MessageDispatcherConfigurator with a public constructor with
+      # both com.typesafe.config.Config parameter and
+      # akka.dispatch.DispatcherPrerequisites parameters.
+      # PinnedDispatcher must be used together with executor=thread-pool-executor.
+      type = "Dispatcher"
+
+      # Which kind of ExecutorService to use for this dispatcher
+      # Valid options:
+      #  - "default-executor" requires a "default-executor" section
+      #  - "fork-join-executor" requires a "fork-join-executor" section
+      #  - "thread-pool-executor" requires a "thread-pool-executor" section
+      #  - "affinity-pool-executor" requires an "affinity-pool-executor" section
+      #  - A FQCN of a class extending ExecutorServiceConfigurator
+      executor = "default-executor"
+
+      # This will be used if you have set "executor = "default-executor"".
+      # If an ActorSystem is created with a given ExecutionContext, this
+      # ExecutionContext will be used as the default executor for all
+      # dispatchers in the ActorSystem configured with
+      # executor = "default-executor". Note that "default-executor"
+      # is the default value for executor, and therefore used if not
+      # specified otherwise. If no ExecutionContext is given,
+      # the executor configured in "fallback" will be used.
+      default-executor {
+        fallback = "fork-join-executor"
+      }
+
+      # This will be used if you have set "executor = "affinity-pool-executor""
+      # Underlying thread pool implementation is akka.dispatch.affinity.AffinityPool.
+      # This executor is classified as "ApiMayChange".
+      affinity-pool-executor {
+        # Min number of threads to cap factor-based parallelism number to
+        parallelism-min = 4
+
+        # The parallelism factor is used to determine thread pool size using the
+        # following formula: ceil(available processors * factor). Resulting size
+        # is then bounded by the parallelism-min and parallelism-max values.
+        parallelism-factor = 0.8
+
+        # Max number of threads to cap factor-based parallelism number to.
+        parallelism-max = 64
+
+        # Each worker in the pool uses a separate bounded MPSC queue. This value
+        # indicates the upper bound of the queue. Whenever an attempt to enqueue
+        # a task is made and the queue does not have capacity to accommodate
+        # the task, the rejection handler created by the rejection handler specified
+        # in "rejection-handler" is invoked.
+        task-queue-size = 512
+
+        # FQCN of the Rejection handler used in the pool.
+        # Must have an empty public constructor and must
+        # implement akka.actor.affinity.RejectionHandlerFactory.
+        rejection-handler = "akka.dispatch.affinity.ThrowOnOverflowRejectionHandler"
+
+        # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
+        # The tradeoff is that to have low latency more CPU time must be used to be
+        # able to react quickly on incoming messages or send as fast as possible after
+        # backoff backpressure.
+        # Level 1 strongly prefer low CPU consumption over low latency.
+        # Level 10 strongly prefer low latency over low CPU consumption.
+        idle-cpu-level = 5
+
+        # FQCN of the akka.dispatch.affinity.QueueSelectorFactory.
+        # The Class of the FQCN must have a public constructor with a
+        # (com.typesafe.config.Config) parameter.
+        # A QueueSelectorFactory create instances of akka.dispatch.affinity.QueueSelector,
+        # that is responsible for determining which task queue a Runnable should be enqueued in.
+        queue-selector = "akka.dispatch.affinity.FairDistributionHashCache"
+
+        # When using the "akka.dispatch.affinity.FairDistributionHashCache" queue selector
+        # internally the AffinityPool uses two methods to determine which task
+        # queue to allocate a Runnable to:
+        # - map based - maintains a round robin counter and a map of Runnable
+        # hashcodes to queues that they have been associated with. This ensures
+        # maximum fairness in terms of work distribution, meaning that each worker
+        # will get approximately equal amount of mailboxes to execute. This is suitable
+        # in cases where we have a small number of actors that will be scheduled on
+        # the pool and we want to ensure the maximum possible utilization of the
+        # available threads.
+        # - hash based - the task - queue in which the runnable should go is determined
+        # by using an uniformly distributed int to int hash function which uses the
+        # hash code of the Runnable as an input. This is preferred in situations where we
+        # have enough number of distinct actors to ensure statistically uniform
+        # distribution of work across threads or we are ready to sacrifice the
+        # former for the added benefit of avoiding map look-ups.
+        fair-work-distribution {
+          # The value serves as a threshold which determines the point at which the
+          # pool switches from the first to the second work distribution schemes.
+          # For example, if the value is set to 128, the pool can observe up to
+          # 128 unique actors and schedule their mailboxes using the map based
+          # approach. Once this number is reached the pool switches to hash based
+          # task distribution mode. If the value is set to 0, the map based
+          # work distribution approach is disabled and only the hash based is
+          # used irrespective of the number of unique actors. Valid range is
+          # 0 to 2048 (inclusive)
+          threshold = 128
+        }
+      }
+
+      # This will be used if you have set "executor = "fork-join-executor""
+      # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool
+      fork-join-executor {
+        # Min number of threads to cap factor-based parallelism number to
+        parallelism-min = 8
+
+        # The parallelism factor is used to determine thread pool size using the
+        # following formula: ceil(available processors * factor). Resulting size
+        # is then bounded by the parallelism-min and parallelism-max values.
+        parallelism-factor = 1.0
+
+        # Max number of threads to cap factor-based parallelism number to
+        parallelism-max = 64
+
+        # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack
+        # like peeking mode which "pop".
+        task-peeking-mode = "FIFO"
+      }
+
+      # This will be used if you have set "executor = "thread-pool-executor""
+      # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor
+      thread-pool-executor {
+        # Keep alive time for threads
+        keep-alive-time = 60s
+
+        # Define a fixed thread pool size with this property. The corePoolSize
+        # and the maximumPoolSize of the ThreadPoolExecutor will be set to this
+        # value, if it is defined. Then the other pool-size properties will not
+        # be used.
+        #
+        # Valid values are: `off` or a positive integer.
+        fixed-pool-size = off
+
+        # Min number of threads to cap factor-based corePoolSize number to
+        core-pool-size-min = 8
+
+        # The core-pool-size-factor is used to determine corePoolSize of the
+        # ThreadPoolExecutor using the following formula:
+        # ceil(available processors * factor).
+        # Resulting size is then bounded by the core-pool-size-min and
+        # core-pool-size-max values.
+        core-pool-size-factor = 3.0
+
+        # Max number of threads to cap factor-based corePoolSize number to
+        core-pool-size-max = 64
+
+        # Minimum number of threads to cap factor-based maximumPoolSize number to
+        max-pool-size-min = 8
+
+        # The max-pool-size-factor is used to determine maximumPoolSize of the
+        # ThreadPoolExecutor using the following formula:
+        # ceil(available processors * factor)
+        # The maximumPoolSize will not be less than corePoolSize.
+        # It is only used if using a bounded task queue.
+        max-pool-size-factor  = 3.0
+
+        # Max number of threads to cap factor-based maximumPoolSize number to
+        max-pool-size-max = 64
+
+        # Specifies the bounded capacity of the task queue (< 1 == unbounded)
+        task-queue-size = -1
+
+        # Specifies which type of task queue will be used, can be "array" or
+        # "linked" (default)
+        task-queue-type = "linked"
+
+        # Allow core threads to time out
+        allow-core-timeout = on
+      }
+
+      # How long time the dispatcher will wait for new actors until it shuts down
+      shutdown-timeout = 1s
+
+      # Throughput defines the number of messages that are processed in a batch
+      # before the thread is returned to the pool. Set to 1 for as fair as possible.
+      throughput = 5
+
+      # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
+      throughput-deadline-time = 0ms
+
+      # For BalancingDispatcher: If the balancing dispatcher should attempt to
+      # schedule idle actors using the same dispatcher when a message comes in,
+      # and the dispatchers ExecutorService is not fully busy already.
+      attempt-teamwork = on
+
+      # If this dispatcher requires a specific type of mailbox, specify the
+      # fully-qualified class name here; the actually created mailbox will
+      # be a subtype of this type. The empty string signifies no requirement.
+      mailbox-requirement = ""
+    }
+
+    # Default separate internal dispatcher to run Akka internal tasks and actors on
+    # protecting them against starvation because of accidental blocking in user actors (which run on the
+    # default dispatcher)
+    internal-dispatcher {
+      type = "Dispatcher"
+      executor = "fork-join-executor"
+      throughput = 5
+      fork-join-executor {
+        parallelism-min = 4
+        parallelism-factor = 1.0
+        parallelism-max = 64
+      }
+    }
+
+    default-blocking-io-dispatcher {
+      type = "Dispatcher"
+      executor = "thread-pool-executor"
+      throughput = 1
+
+      thread-pool-executor {
+        fixed-pool-size = 16
+      }
+    }
+
+    default-mailbox {
+      # FQCN of the MailboxType. The Class of the FQCN must have a public
+      # constructor with
+      # (akka.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters.
+      mailbox-type = "akka.dispatch.UnboundedMailbox"
+
+      # If the mailbox is bounded then it uses this setting to determine its
+      # capacity. The provided value must be positive.
+      # NOTICE:
+      # Up to version 2.1 the mailbox type was determined based on this setting;
+      # this is no longer the case, the type must explicitly be a bounded mailbox.
+      mailbox-capacity = 1000
+
+      # If the mailbox is bounded then this is the timeout for enqueueing
+      # in case the mailbox is full. Negative values signify infinite
+      # timeout, which should be avoided as it bears the risk of dead-lock.
+      mailbox-push-timeout-time = 10s
+
+      # For Actor with Stash: The default capacity of the stash.
+      # If negative (or zero) then an unbounded stash is used (default)
+      # If positive then a bounded stash is used and the capacity is set using
+      # the property
+      stash-capacity = -1
+    }
+
+    mailbox {
+      # Mapping between message queue semantics and mailbox configurations.
+      # Used by akka.dispatch.RequiresMessageQueue[T] to enforce different
+      # mailbox types on actors.
+      # If your Actor implements RequiresMessageQueue[T], then when you create
+      # an instance of that actor its mailbox type will be decided by looking
+      # up a mailbox configuration via T in this mapping
+      requirements {
+        "akka.dispatch.UnboundedMessageQueueSemantics" =
+          akka.actor.mailbox.unbounded-queue-based
+        "akka.dispatch.BoundedMessageQueueSemantics" =
+          akka.actor.mailbox.bounded-queue-based
+        "akka.dispatch.DequeBasedMessageQueueSemantics" =
+          akka.actor.mailbox.unbounded-deque-based
+        "akka.dispatch.UnboundedDequeBasedMessageQueueSemantics" =
+          akka.actor.mailbox.unbounded-deque-based
+        "akka.dispatch.BoundedDequeBasedMessageQueueSemantics" =
+          akka.actor.mailbox.bounded-deque-based
+        "akka.dispatch.MultipleConsumerSemantics" =
+          akka.actor.mailbox.unbounded-queue-based
+        "akka.dispatch.ControlAwareMessageQueueSemantics" =
+          akka.actor.mailbox.unbounded-control-aware-queue-based
+        "akka.dispatch.UnboundedControlAwareMessageQueueSemantics" =
+          akka.actor.mailbox.unbounded-control-aware-queue-based
+        "akka.dispatch.BoundedControlAwareMessageQueueSemantics" =
+          akka.actor.mailbox.bounded-control-aware-queue-based
+        "akka.event.LoggerMessageQueueSemantics" =
+          akka.actor.mailbox.logger-queue
+      }
+
+      unbounded-queue-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.UnboundedMailbox"
+      }
+
+      bounded-queue-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.BoundedMailbox"
+      }
+
+      unbounded-deque-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox"
+      }
+
+      bounded-deque-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.BoundedDequeBasedMailbox"
+      }
+
+      unbounded-control-aware-queue-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.UnboundedControlAwareMailbox"
+      }
+
+      bounded-control-aware-queue-based {
+        # FQCN of the MailboxType, The Class of the FQCN must have a public
+        # constructor with (akka.actor.ActorSystem.Settings,
+        # com.typesafe.config.Config) parameters.
+        mailbox-type = "akka.dispatch.BoundedControlAwareMailbox"
+      }
+
+      # The LoggerMailbox will drain all messages in the mailbox
+      # when the system is shutdown and deliver them to the StandardOutLogger.
+      # Do not change this unless you know what you are doing.
+      logger-queue {
+        mailbox-type = "akka.event.LoggerMailboxType"
+      }
+    }
+
+    debug {
+      # enable function of Actor.loggable(), which is to log any received message
+      # at DEBUG level, see the “Testing Actor Systems” section of the Akka
+      # Documentation at https://akka.io/docs
+      receive = off
+
+      # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.)
+      autoreceive = off
+
+      # enable DEBUG logging of actor lifecycle changes
+      lifecycle = off
+
+      # enable DEBUG logging of all LoggingFSMs for events, transitions and timers
+      fsm = off
+
+      # enable DEBUG logging of subscription changes on the eventStream
+      event-stream = off
+
+      # enable DEBUG logging of unhandled messages
+      unhandled = off
+
+      # enable WARN logging of misconfigured routers
+      router-misconfiguration = off
+    }
+
+    # SECURITY BEST-PRACTICE is to disable java serialization for its multiple
+    # known attack surfaces.
+    #
+    # This setting is a short-cut to
+    # - using DisabledJavaSerializer instead of JavaSerializer
+    #
+    # Completely disable the use of `akka.serialization.JavaSerialization` by the
+    # Akka Serialization extension, instead DisabledJavaSerializer will
+    # be inserted which will fail explicitly if attempts to use java serialization are made.
+    #
+    # The log messages emitted by such serializer SHOULD be treated as potential
+    # attacks which the serializer prevented, as they MAY indicate an external operator
+    # attempting to send malicious messages intending to use java serialization as attack vector.
+    # The attempts are logged with the SECURITY marker.
+    #
+    # Please note that this option does not stop you from manually invoking java serialization
+    #
+    allow-java-serialization = on
+
+    # Log warnings when the Java serialization is used to serialize messages.
+    # Java serialization is not very performant and should not be used in production
+    # environments unless you don't care about performance and security. In that case
+    # you can turn this off.
+    warn-about-java-serializer-usage = on
+
+    # To be used with the above warn-about-java-serializer-usage
+    # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off,
+    # warnings are suppressed for classes extending NoSerializationVerificationNeeded
+    # to reduce noise.
+    warn-on-no-serialization-verification = on
+
+    # Entries for pluggable serializers and their bindings.
+    serializers {
+      java = "akka.serialization.JavaSerializer"
+      bytes = "akka.serialization.ByteArraySerializer"
+      primitive-long = "akka.serialization.LongSerializer"
+      primitive-int = "akka.serialization.IntSerializer"
+      primitive-string = "akka.serialization.StringSerializer"
+      primitive-bytestring = "akka.serialization.ByteStringSerializer"
+      primitive-boolean = "akka.serialization.BooleanSerializer"
+    }
+
+    # Class to Serializer binding. You only need to specify the name of an
+    # interface or abstract base class of the messages. In case of ambiguity it
+    # is using the most specific configured class, or giving a warning and
+    # choosing the “first” one.
+    #
+    # To disable one of the default serializers, assign its class to "none", like
+    # "java.io.Serializable" = none
+    serialization-bindings {
+      "[B" = bytes
+      "java.io.Serializable" = java
+
+      "java.lang.String" = primitive-string
+      "akka.util.ByteString$ByteString1C" = primitive-bytestring
+      "akka.util.ByteString$ByteString1" = primitive-bytestring
+      "akka.util.ByteString$ByteStrings" = primitive-bytestring
+      "java.lang.Long" = primitive-long
+      "scala.Long" = primitive-long
+      "java.lang.Integer" = primitive-int
+      "scala.Int" = primitive-int
+      "java.lang.Boolean" = primitive-boolean
+      "scala.Boolean" = primitive-boolean
+    }
+
+    # Configuration namespace of serialization identifiers.
+    # Each serializer implementation must have an entry in the following format:
+    # `akka.actor.serialization-identifiers."FQCN" = ID`
+    # where `FQCN` is fully qualified class name of the serializer implementation
+    # and `ID` is globally unique serializer identifier number.
+    # Identifier values from 0 to 40 are reserved for Akka internal usage.
+    serialization-identifiers {
+      "akka.serialization.JavaSerializer" = 1
+      "akka.serialization.ByteArraySerializer" = 4
+
+      primitive-long = 18
+      primitive-int = 19
+      primitive-string = 20
+      primitive-bytestring = 21
+      primitive-boolean = 35
+    }
+
+  }
+
+  serialization.protobuf {
+    # deprecated, use `allowed-classes` instead
+    whitelist-class = [
+      "com.google.protobuf.GeneratedMessage",
+      "com.google.protobuf.GeneratedMessageV3",
+      "scalapb.GeneratedMessageCompanion",
+      "akka.protobuf.GeneratedMessage",
+      "akka.protobufv3.internal.GeneratedMessageV3"
+    ]
+
+    # Additional classes that are allowed even if they are not defined in `serialization-bindings`.
+    # It can be exact class name or name of super class or interfaces (one level).
+    # This is useful when a class is not used for serialization any more and therefore removed
+    # from `serialization-bindings`, but should still be possible to deserialize.
+    allowed-classes = ${akka.serialization.protobuf.whitelist-class}
+
+  }
+
+  # Used to set the behavior of the scheduler.
+  # Changing the default values may change the system behavior drastically so make
+  # sure you know what you're doing! See the Scheduler section of the Akka
+  # Documentation for more details.
+  scheduler {
+    # The LightArrayRevolverScheduler is used as the default scheduler in the
+    # system. It does not execute the scheduled tasks on exact time, but on every
+    # tick, it will run everything that is (over)due. You can increase or decrease
+    # the accuracy of the execution timing by specifying smaller or larger tick
+    # duration. If you are scheduling a lot of tasks you should consider increasing
+    # the ticks per wheel.
+    # Note that it might take up to 1 tick to stop the Timer, so setting the
+    # tick-duration to a high value will make shutting down the actor system
+    # take longer.
+    tick-duration = 10ms
+
+    # The timer uses a circular wheel of buckets to store the timer tasks.
+    # This should be set such that the majority of scheduled timeouts (for high
+    # scheduling frequency) will be shorter than one rotation of the wheel
+    # (ticks-per-wheel * ticks-duration)
+    # THIS MUST BE A POWER OF TWO!
+    ticks-per-wheel = 512
+
+    # This setting selects the timer implementation which shall be loaded at
+    # system start-up.
+    # The class given here must implement the akka.actor.Scheduler interface
+    # and offer a public constructor which takes three arguments:
+    #  1) com.typesafe.config.Config
+    #  2) akka.event.LoggingAdapter
+    #  3) java.util.concurrent.ThreadFactory
+    implementation = akka.actor.LightArrayRevolverScheduler
+
+    # When shutting down the scheduler, there will typically be a thread which
+    # needs to be stopped, and this timeout determines how long to wait for
+    # that to happen. In case of timeout the shutdown of the actor system will
+    # proceed without running possibly still enqueued tasks.
+    shutdown-timeout = 5s
+  }
+
+  io {
+
+    # By default the select loops run on dedicated threads, hence using a
+    # PinnedDispatcher
+    pinned-dispatcher {
+      type = "PinnedDispatcher"
+      executor = "thread-pool-executor"
+      thread-pool-executor.allow-core-timeout = off
+    }
+
+    tcp {
+
+      # The number of selectors to stripe the served channels over; each of
+      # these will use one select loop on the selector-dispatcher.
+      nr-of-selectors = 1
+
+      # Maximum number of open channels supported by this TCP module; there is
+      # no intrinsic general limit, this setting is meant to enable DoS
+      # protection by limiting the number of concurrently connected clients.
+      # Also note that this is a "soft" limit; in certain cases the implementation
+      # will accept a few connections more or a few less than the number configured
+      # here. Must be an integer > 0 or "unlimited".
+      max-channels = 256000
+
+      # When trying to assign a new connection to a selector and the chosen
+      # selector is at full capacity, retry selector choosing and assignment
+      # this many times before giving up
+      selector-association-retries = 10
+
+      # The maximum number of connection that are accepted in one go,
+      # higher numbers decrease latency, lower numbers increase fairness on
+      # the worker-dispatcher
+      batch-accept-limit = 10
+
+      # The number of bytes per direct buffer in the pool used to read or write
+      # network data from the kernel.
+      direct-buffer-size = 128 KiB
+
+      # The maximal number of direct buffers kept in the direct buffer pool for
+      # reuse.
+      direct-buffer-pool-limit = 1000
+
+      # The duration a connection actor waits for a `Register` message from
+      # its commander before aborting the connection.
+      register-timeout = 5s
+
+      # The maximum number of bytes delivered by a `Received` message. Before
+      # more data is read from the network the connection actor will try to
+      # do other work.
+      # The purpose of this setting is to impose a smaller limit than the
+      # configured receive buffer size. When using value 'unlimited' it will
+      # try to read all from the receive buffer.
+      max-received-message-size = unlimited
+
+      # Enable fine grained logging of what goes on inside the implementation.
+      # Be aware that this may log more than once per message sent to the actors
+      # of the tcp implementation.
+      trace-logging = off
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # to be used for running the select() calls in the selectors
+      selector-dispatcher = "akka.io.pinned-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the read/write worker actors
+      worker-dispatcher = "akka.actor.internal-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the selector management actors
+      management-dispatcher = "akka.actor.internal-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # on which file IO tasks are scheduled
+      file-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+      # The maximum number of bytes (or "unlimited") to transfer in one batch
+      # when using `WriteFile` command which uses `FileChannel.transferTo` to
+      # pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo`
+      # may block for a long time when network IO is faster than file IO.
+      # Decreasing the value may improve fairness while increasing may improve
+      # throughput.
+      file-io-transferTo-limit = 512 KiB
+
+      # The number of times to retry the `finishConnect` call after being notified about
+      # OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that
+      # `finishConnect` will succeed, which is the case on Android.
+      finish-connect-retries = 5
+
+      # On Windows connection aborts are not reliably detected unless an OP_READ is
+      # registered on the selector _after_ the connection has been reset. This
+      # workaround enables an OP_CONNECT which forces the abort to be visible on Windows.
+      # Enabling this setting on other platforms than Windows will cause various failures
+      # and undefined behavior.
+      # Possible values of this key are on, off and auto where auto will enable the
+      # workaround if Windows is detected automatically.
+      windows-connection-abort-workaround-enabled = off
+    }
+
+    udp {
+
+      # The number of selectors to stripe the served channels over; each of
+      # these will use one select loop on the selector-dispatcher.
+      nr-of-selectors = 1
+
+      # Maximum number of open channels supported by this UDP module Generally
+      # UDP does not require a large number of channels, therefore it is
+      # recommended to keep this setting low.
+      max-channels = 4096
+
+      # The select loop can be used in two modes:
+      # - setting "infinite" will select without a timeout, hogging a thread
+      # - setting a positive timeout will do a bounded select call,
+      #   enabling sharing of a single thread between multiple selectors
+      #   (in this case you will have to use a different configuration for the
+      #   selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
+      # - setting it to zero means polling, i.e. calling selectNow()
+      select-timeout = infinite
+
+      # When trying to assign a new connection to a selector and the chosen
+      # selector is at full capacity, retry selector choosing and assignment
+      # this many times before giving up
+      selector-association-retries = 10
+
+      # The maximum number of datagrams that are read in one go,
+      # higher numbers decrease latency, lower numbers increase fairness on
+      # the worker-dispatcher
+      receive-throughput = 3
+
+      # The number of bytes per direct buffer in the pool used to read or write
+      # network data from the kernel.
+      direct-buffer-size = 128 KiB
+
+      # The maximal number of direct buffers kept in the direct buffer pool for
+      # reuse.
+      direct-buffer-pool-limit = 1000
+
+      # Enable fine grained logging of what goes on inside the implementation.
+      # Be aware that this may log more than once per message sent to the actors
+      # of the tcp implementation.
+      trace-logging = off
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # to be used for running the select() calls in the selectors
+      selector-dispatcher = "akka.io.pinned-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the read/write worker actors
+      worker-dispatcher = "akka.actor.internal-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the selector management actors
+      management-dispatcher = "akka.actor.internal-dispatcher"
+    }
+
+    udp-connected {
+
+      # The number of selectors to stripe the served channels over; each of
+      # these will use one select loop on the selector-dispatcher.
+      nr-of-selectors = 1
+
+      # Maximum number of open channels supported by this UDP module Generally
+      # UDP does not require a large number of channels, therefore it is
+      # recommended to keep this setting low.
+      max-channels = 4096
+
+      # The select loop can be used in two modes:
+      # - setting "infinite" will select without a timeout, hogging a thread
+      # - setting a positive timeout will do a bounded select call,
+      #   enabling sharing of a single thread between multiple selectors
+      #   (in this case you will have to use a different configuration for the
+      #   selector-dispatcher, e.g. using "type=Dispatcher" with size 1)
+      # - setting it to zero means polling, i.e. calling selectNow()
+      select-timeout = infinite
+
+      # When trying to assign a new connection to a selector and the chosen
+      # selector is at full capacity, retry selector choosing and assignment
+      # this many times before giving up
+      selector-association-retries = 10
+
+      # The maximum number of datagrams that are read in one go,
+      # higher numbers decrease latency, lower numbers increase fairness on
+      # the worker-dispatcher
+      receive-throughput = 3
+
+      # The number of bytes per direct buffer in the pool used to read or write
+      # network data from the kernel.
+      direct-buffer-size = 128 KiB
+
+      # The maximal number of direct buffers kept in the direct buffer pool for
+      # reuse.
+      direct-buffer-pool-limit = 1000
+
+      # Enable fine grained logging of what goes on inside the implementation.
+      # Be aware that this may log more than once per message sent to the actors
+      # of the tcp implementation.
+      trace-logging = off
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # to be used for running the select() calls in the selectors
+      selector-dispatcher = "akka.io.pinned-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the read/write worker actors
+      worker-dispatcher = "akka.actor.internal-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the selector management actors
+      management-dispatcher = "akka.actor.internal-dispatcher"
+    }
+
+    dns {
+      # Fully qualified config path which holds the dispatcher configuration
+      # for the manager and resolver router actors.
+      # For actual router configuration see akka.actor.deployment./IO-DNS/*
+      dispatcher = "akka.actor.internal-dispatcher"
+
+      # Name of the subconfig at path akka.io.dns, see inet-address below
+      #
+      # Change to `async-dns` to use the new "native" DNS resolver,
+      # which is also capable of resolving SRV records.
+      resolver = "inet-address"
+
+      # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records.
+      # To be replaced by `akka.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does)
+      inet-address {
+        # Must implement akka.io.DnsProvider
+        provider-object = "akka.io.InetAddressDnsProvider"
+
+        # To set the time to cache name resolutions
+        # Possible values:
+        # default: sun.net.InetAddressCachePolicy.get() and getNegative()
+        # forever: cache forever
+        # never: no caching
+        # n [time unit]: positive timeout with unit, for example 30s
+        positive-ttl = default
+        negative-ttl = default
+
+        # How often to sweep out expired cache entries.
+        # Note that this interval has nothing to do with TTLs
+        cache-cleanup-interval = 120s
+      }
+
+      async-dns {
+        provider-object = "akka.io.dns.internal.AsyncDnsProvider"
+
+        # Set upper bound for caching successfully resolved dns entries
+        # if the DNS record has a smaller TTL value than the setting that
+        # will be used. Default is to use the record TTL with no cap.
+        # Possible values:
+        # forever: always use the minimum TTL from the found records
+        # never: never cache
+        # n [time unit] = cap the caching to this value
+        positive-ttl = forever
+
+        # Set how long the fact that a DNS record could not be found is
+        # cached. If a new resolution is done while the fact is cached it will
+        # be failed and not result in an actual DNS resolution. Default is
+        # to never cache.
+        # Possible values:
+        # never: never cache
+        # forever: cache a missing DNS record forever (you probably will not want to do this)
+        # n [time unit] = cache for this long
+        negative-ttl = never
+
+        # Configures nameservers to query during DNS resolution.
+        # Defaults to the nameservers that would be used by the JVM by default.
+        # Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers
+        # If multiple are defined then they are tried in order until one responds
+        nameservers = default
+
+        # The time that a request is allowed to live before being discarded
+        # given no reply. The lower bound of this should always be the amount
+        # of time to reasonably expect a DNS server to reply within.
+        # If multiple name servers are provided then each gets this long to response before trying
+        # the next one
+        resolve-timeout = 5s
+
+        # How often to sweep out expired cache entries.
+        # Note that this interval has nothing to do with TTLs
+        cache-cleanup-interval = 120s
+
+        # Configures the list of search domains.
+        # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
+        # other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or
+        # a list of domains, eg, [ "example.com", "example.net" ].
+        search-domains = default
+
+        # Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on
+        # the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see
+        # https://linux.die.net/man/5/resolver for more info.
+        # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on
+        # other platforms, will default to 1).
+        ndots = default
+      }
+    }
+  }
+
+
+  # CoordinatedShutdown is an extension that will perform registered
+  # tasks in the order that is defined by the phases. It is started
+  # by calling CoordinatedShutdown(system).run(). This can be triggered
+  # by different things, for example:
+  # - JVM shutdown hook will by default run CoordinatedShutdown
+  # - Cluster node will automatically run CoordinatedShutdown when it
+  #   sees itself as Exiting
+  # - A management console or other application specific command can
+  #   run CoordinatedShutdown
+  coordinated-shutdown {
+    # The timeout that will be used for a phase if not specified with
+    # 'timeout' in the phase
+    default-phase-timeout = 5 s
+
+    # Terminate the ActorSystem in the last phase actor-system-terminate.
+    terminate-actor-system = on
+
+    # Exit the JVM (System.exit(0)) in the last phase actor-system-terminate
+    # if this is set to 'on'. It is done after termination of the
+    # ActorSystem if terminate-actor-system=on, otherwise it is done
+    # immediately when the last phase is reached.
+    exit-jvm = off
+
+    # Exit status to use on System.exit(int) when 'exit-jvm' is 'on'.
+    exit-code = 0
+
+    # Run the coordinated shutdown when the JVM process exits, e.g.
+    # via kill SIGTERM signal (SIGINT ctrl-c doesn't work).
+    # This property is related to `akka.jvm-shutdown-hooks` above.
+    run-by-jvm-shutdown-hook = on
+
+    # Run the coordinated shutdown when ActorSystem.terminate is called.
+    # Enabling this and disabling terminate-actor-system is not a supported
+    # combination (will throw ConfigurationException at startup).
+    run-by-actor-system-terminate = on
+
+    # When Coordinated Shutdown is triggered an instance of `Reason` is
+    # required. That value can be used to override the default settings.
+    # Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be
+    # overridden depending on the reason.
+    reason-overrides {
+      # Overrides are applied using the `reason.getClass.getName`.
+      # Overrides the `exit-code` when the `Reason` is a cluster
+      # Downing or a Cluster Join Unsuccessful event
+      "akka.actor.CoordinatedShutdown$ClusterDowningReason$" {
+        exit-code = -1
+      }
+      "akka.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" {
+        exit-code = -1
+      }
+    }
+
+    #//#coordinated-shutdown-phases
+    # CoordinatedShutdown is enabled by default and will run the tasks that
+    # are added to these phases by individual Akka modules and user logic.
+    #
+    # The phases are ordered as a DAG by defining the dependencies between the phases
+    # to make sure shutdown tasks are run in the right order.
+    #
+    # In general user tasks belong in the first few phases, but there may be use
+    # cases where you would want to hook in new phases or register tasks later in
+    # the DAG.
+    #
+    # Each phase is defined as a named config section with the
+    # following optional properties:
+    # - timeout=15s: Override the default-phase-timeout for this phase.
+    # - recover=off: If the phase fails the shutdown is aborted
+    #                and depending phases will not be executed.
+    # - enabled=off: Skip all tasks registered in this phase. DO NOT use
+    #                this to disable phases unless you are absolutely sure what the
+    #                consequences are. Many of the built in tasks depend on other tasks
+    #                having been executed in earlier phases and may break if those are disabled.
+    # depends-on=[]: Run the phase after the given phases
+    phases {
+
+      # The first pre-defined phase that applications can add tasks to.
+      # Note that more phases can be added in the application's
+      # configuration by overriding this phase with an additional
+      # depends-on.
+      before-service-unbind {
+      }
+
+      # Stop accepting new incoming connections.
+      # This is where you can register tasks that makes a server stop accepting new connections. Already
+      # established connections should be allowed to continue and complete if possible.
+      service-unbind {
+        depends-on = [before-service-unbind]
+      }
+
+      # Wait for requests that are in progress to be completed.
+      # This is where you register tasks that will wait for already established connections to complete, potentially
+      # also first telling them that it is time to close down.
+      service-requests-done {
+        depends-on = [service-unbind]
+      }
+
+      # Final shutdown of service endpoints.
+      # This is where you would add tasks that forcefully kill connections that are still around.
+      service-stop {
+        depends-on = [service-requests-done]
+      }
+
+      # Phase for custom application tasks that are to be run
+      # after service shutdown and before cluster shutdown.
+      before-cluster-shutdown {
+        depends-on = [service-stop]
+      }
+
+      # Graceful shutdown of the Cluster Sharding regions.
+      # This phase is not meant for users to add tasks to.
+      cluster-sharding-shutdown-region {
+        timeout = 10 s
+        depends-on = [before-cluster-shutdown]
+      }
+
+      # Emit the leave command for the node that is shutting down.
+      # This phase is not meant for users to add tasks to.
+      cluster-leave {
+        depends-on = [cluster-sharding-shutdown-region]
+      }
+
+      # Shutdown cluster singletons
+      # This is done as late as possible to allow the shard region shutdown triggered in
+      # the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down.
+      # This phase is not meant for users to add tasks to.
+      cluster-exiting {
+        timeout = 10 s
+        depends-on = [cluster-leave]
+      }
+
+      # Wait until exiting has been completed
+      # This phase is not meant for users to add tasks to.
+      cluster-exiting-done {
+        depends-on = [cluster-exiting]
+      }
+
+      # Shutdown the cluster extension
+      # This phase is not meant for users to add tasks to.
+      cluster-shutdown {
+        depends-on = [cluster-exiting-done]
+      }
+
+      # Phase for custom application tasks that are to be run
+      # after cluster shutdown and before ActorSystem termination.
+      before-actor-system-terminate {
+        depends-on = [cluster-shutdown]
+      }
+
+      # Last phase. See terminate-actor-system and exit-jvm above.
+      # Don't add phases that depends on this phase because the
+      # dispatcher and scheduler of the ActorSystem have been shutdown.
+      # This phase is not meant for users to add tasks to.
+      actor-system-terminate {
+        timeout = 10 s
+        depends-on = [before-actor-system-terminate]
+      }
+    }
+    #//#coordinated-shutdown-phases
+  }
+
+  #//#circuit-breaker-default
+  # Configuration for circuit breakers created with the APIs accepting an id to
+  # identify or look up the circuit breaker.
+  # Note: Circuit breakers created without ids are not affected by this configuration.
+  # A child configuration section with the same name as the circuit breaker identifier
+  # will be used, with fallback to the `akka.circuit-breaker.default` section.
+  circuit-breaker {
+
+    # Default configuration that is used if a configuration section
+    # with the circuit breaker identifier is not defined.
+    default {
+      # Number of failures before opening the circuit.
+      max-failures = 10
+
+      # Duration of time after which to consider a call a failure.
+      call-timeout = 10s
+
+      # Duration of time in open state after which to attempt to close
+      # the circuit, by first entering the half-open state.
+      reset-timeout = 15s
+
+      # The upper bound of reset-timeout
+      max-reset-timeout = 36500d
+
+      # Exponential backoff
+      # For details see https://en.wikipedia.org/wiki/Exponential_backoff
+      exponential-backoff = 1.0
+
+      # Additional random delay based on this factor is added to backoff
+      # For example 0.2 adds up to 20% delay
+      # In order to skip this additional delay set as 0
+      random-factor = 0.0
+
+      # A allowlist of fqcn of Exceptions that the CircuitBreaker
+      # should not consider failures. By default all exceptions are
+      # considered failures.
+      exception-allowlist = []
+    }
+  }
+  #//#circuit-breaker-default
+
+}
diff --git a/akka/repackaged-akka-jar/src/main/resources/actor_typed_reference.conf b/akka/repackaged-akka-jar/src/main/resources/actor_typed_reference.conf
new file mode 100644 (file)
index 0000000..d34d52a
--- /dev/null
@@ -0,0 +1,129 @@
+akka.actor.typed {
+
+  # List FQCN of `akka.actor.typed.ExtensionId`s which shall be loaded at actor system startup.
+  # Should be on the format: 'extensions = ["com.example.MyExtId1", "com.example.MyExtId2"]' etc.
+  # See the Akka Documentation for more info about Extensions
+  extensions = []
+
+  # List FQCN of extensions which shall be loaded at actor system startup.
+  # Library extensions are regular extensions that are loaded at startup and are
+  # available for third party library authors to enable auto-loading of extensions when
+  # present on the classpath. This is done by appending entries:
+  # 'library-extensions += "Extension"' in the library `reference.conf`.
+  #
+  # Should not be set by end user applications in 'application.conf', use the extensions property for that
+  #
+  library-extensions = ${?akka.actor.typed.library-extensions} []
+
+  # Receptionist is started eagerly to allow clustered receptionist to gather remote registrations early on.
+  library-extensions += "akka.actor.typed.receptionist.Receptionist$"
+
+  # While an actor is restarted (waiting for backoff to expire and children to stop)
+  # incoming messages and signals are stashed, and delivered later to the newly restarted
+  # behavior. This property defines the capacity in number of messages of the stash
+  # buffer. If the capacity is exceed then additional incoming messages are dropped.
+  restart-stash-capacity = 1000
+
+  # Typed mailbox defaults to the single consumber mailbox as balancing dispatcher is not supported
+  default-mailbox {
+    mailbox-type = "akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
+  }
+}
+
+# Load typed extensions by a classic extension.
+akka.library-extensions += "akka.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions"
+
+akka.actor {
+  serializers {
+    typed-misc = "akka.actor.typed.internal.MiscMessageSerializer"
+    service-key = "akka.actor.typed.internal.receptionist.ServiceKeySerializer"
+  }
+
+  serialization-identifiers {
+    "akka.actor.typed.internal.MiscMessageSerializer" = 24
+    "akka.actor.typed.internal.receptionist.ServiceKeySerializer" = 26
+  }
+
+  serialization-bindings {
+    "akka.actor.typed.ActorRef" = typed-misc
+    "akka.actor.typed.internal.adapter.ActorRefAdapter" = typed-misc
+    "akka.actor.typed.internal.receptionist.DefaultServiceKey" = service-key
+  }
+}
+
+# When using Akka Typed (having akka-actor-typed in classpath) the
+# akka.event.slf4j.Slf4jLogger is enabled instead of the DefaultLogger
+# even though it has not been explicitly defined in `akka.loggers`
+# configuration.
+#
+# Slf4jLogger will be used for all Akka classic logging via eventStream,
+# including logging from Akka internals. The Slf4jLogger is then using
+# an ordinary org.slf4j.Logger to emit the log events.
+#
+# The Slf4jLoggingFilter is also enabled automatically.
+#
+# This behavior can be disabled by setting this property to `off`.
+akka.use-slf4j = on
+
+akka.reliable-delivery {
+  producer-controller {
+
+    # To avoid head of line blocking from serialization and transfer
+    # of large messages this can be enabled.
+    # Large messages are chunked into pieces of the given size in bytes. The
+    # chunked messages are sent separatetely and assembled on the consumer side.
+    # Serialization and deserialization is performed by the ProducerController and
+    # ConsumerController respectively instead of in the remote transport layer.
+    chunk-large-messages = off
+
+    durable-queue {
+      # The ProducerController uses this timeout for the requests to
+      # the durable queue. If there is no reply within the timeout it
+      # will be retried.
+      request-timeout = 3s
+
+      # The ProducerController retries requests to the durable queue this
+      # number of times before failing.
+      retry-attempts = 10
+
+      # The ProducerController retries sending the first message with this interval
+      # until it has been confirmed.
+      resend-first-interval = 1s
+    }
+  }
+
+  consumer-controller {
+    # Number of messages in flight between ProducerController and
+    # ConsumerController. The ConsumerController requests for more messages
+    # when half of the window has been used.
+    flow-control-window = 50
+
+    # The ConsumerController resends flow control messages to the
+    # ProducerController with the resend-interval-min, and increasing
+    # it gradually to resend-interval-max when idle.
+    resend-interval-min = 2s
+    resend-interval-max = 30s
+
+    # If this is enabled lost messages will not be resent, but flow control is used.
+    # This can be more efficient since messages don't have to be
+    # kept in memory in the `ProducerController` until they have been
+    # confirmed, but the drawback is that lost messages will not be delivered.
+    only-flow-control = false
+  }
+
+  work-pulling {
+    producer-controller = ${akka.reliable-delivery.producer-controller}
+    producer-controller {
+      # Limit of how many messages that can be buffered when there
+      # is no demand from the consumer side.
+      buffer-size = 1000
+
+      # Ask timeout for sending message to worker until receiving Ack from worker
+      internal-ask-timeout = 60s
+
+      # Chunked messages not implemented for work-pulling yet. Override to not
+      # propagate property from akka.reliable-delivery.producer-controller.
+      chunk-large-messages = off
+    }
+  }
+}
diff --git a/akka/repackaged-akka-jar/src/main/resources/cluster_reference.conf b/akka/repackaged-akka-jar/src/main/resources/cluster_reference.conf
new file mode 100644 (file)
index 0000000..6fa0e94
--- /dev/null
@@ -0,0 +1,504 @@
+######################################
+# Akka Cluster Reference Config File #
+######################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka {
+
+  cluster {
+    # Initial contact points of the cluster.
+    # The nodes to join automatically at startup.
+    # Comma separated full URIs defined by a string on the form of
+    # "akka://system@hostname:port"
+    # Leave as empty if the node is supposed to be joined manually.
+    seed-nodes = []
+
+    # How long to wait for one of the seed nodes to reply to initial join request.
+    # When this is the first seed node and there is no positive reply from the other
+    # seed nodes within this timeout it will join itself to bootstrap the cluster.
+    # When this is not the first seed node the join attempts will be performed with
+    # this interval.  
+    seed-node-timeout = 5s
+
+    # If a join request fails it will be retried after this period.
+    # Disable join retry by specifying "off".
+    retry-unsuccessful-join-after = 10s
+    
+    # The joining of given seed nodes will by default be retried indefinitely until
+    # a successful join. That process can be aborted if unsuccessful by defining this
+    # timeout. When aborted it will run CoordinatedShutdown, which by default will
+    # terminate the ActorSystem. CoordinatedShutdown can also be configured to exit
+    # the JVM. It is useful to define this timeout if the seed-nodes are assembled
+    # dynamically and a restart with new seed-nodes should be tried after unsuccessful
+    # attempts.   
+    shutdown-after-unsuccessful-join-seed-nodes = off
+
+    # Time margin after which shards or singletons that belonged to a downed/removed
+    # partition are created in surviving partition. The purpose of this margin is that
+    # in case of a network partition the persistent actors in the non-surviving partitions
+    # must be stopped before corresponding persistent actors are started somewhere else.
+    # This is useful if you implement downing strategies that handle network partitions,
+    # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+    # Disable with "off" or specify a duration to enable.
+    #
+    # When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
+    # the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
+    # if this down-removal-margin is undefined.
+    down-removal-margin = off
+
+    # Pluggable support for downing of nodes in the cluster.
+    # If this setting is left empty the `NoDowning` provider is used and no automatic downing will be performed.
+    #
+    # If specified the value must be the fully qualified class name of a subclass of
+    # `akka.cluster.DowningProvider` having a public one argument constructor accepting an `ActorSystem`
+    downing-provider-class = ""
+
+    # Artery only setting
+    # When a node has been gracefully removed, let this time pass (to allow for example
+    # cluster singleton handover to complete) and then quarantine the removed node.
+    quarantine-removed-node-after = 5s
+
+    # If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
+    # split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'
+    # so they become part of the cluster even during a network split. The leader will
+    # move `Joining` members to 'WeaklyUp' after this configured duration without convergence.
+    # The leader will move 'WeaklyUp' members to 'Up' status once convergence has been reached.
+    allow-weakly-up-members = 7s
+
+    # The roles of this member. List of strings, e.g. roles = ["A", "B"].
+    # The roles are part of the membership information and can be used by
+    # routers or other services to distribute work to certain member types,
+    # e.g. front-end and back-end nodes.
+    # Roles are not allowed to start with "dc-" as that is reserved for the
+    # special role assigned from the data-center a node belongs to (see the
+    # multi-data-center section below)
+    roles = []
+    
+    # Run the coordinated shutdown from phase 'cluster-shutdown' when the cluster
+    # is shutdown for other reasons than when leaving, e.g. when downing. This
+    # will terminate the ActorSystem when the cluster extension is shutdown.
+    run-coordinated-shutdown-when-down = on
+
+    role {
+      # Minimum required number of members of a certain role before the leader
+      # changes member status of 'Joining' members to 'Up'. Typically used together
+      # with 'Cluster.registerOnMemberUp' to defer some action, such as starting
+      # actors, until the cluster has reached a certain size.
+      # E.g. to require 2 nodes with role 'frontend' and 3 nodes with role 'backend':
+      #   frontend.min-nr-of-members = 2
+      #   backend.min-nr-of-members = 3
+      #<role-name>.min-nr-of-members = 1
+    }
+
+    # Application version of the deployment. Used by rolling update features
+    # to distinguish between old and new nodes. The typical convention is to use
+    # 3 digit version numbers `major.minor.patch`, but 1 or two digits are also
+    # supported.
+    #
+    # If no `.` is used it is interpreted as a single digit version number or as
+    # plain alphanumeric if it couldn't be parsed as a number.
+    #
+    # It may also have a qualifier at the end for 2 or 3 digit version numbers such
+    # as "1.2-RC1".
+    # For 1 digit with qualifier, 1-RC1, it is interpreted as plain alphanumeric.
+    #
+    # It has support for https://github.com/dwijnand/sbt-dynver format with `+` or
+    # `-` separator. The number of commits from the tag is handled as a numeric part.
+    # For example `1.0.0+3-73475dce26` is less than `1.0.10+10-ed316bd024` (3 < 10).
+    app-version = "0.0.0"
+
+    # Minimum required number of members before the leader changes member status
+    # of 'Joining' members to 'Up'. Typically used together with
+    # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors,
+    # until the cluster has reached a certain size.
+    min-nr-of-members = 1
+
+    # Enable/disable info level logging of cluster events.
+    # These are logged with logger name `akka.cluster.Cluster`.
+    log-info = on
+
+    # Enable/disable verbose info-level logging of cluster events
+    # for temporary troubleshooting. Defaults to 'off'.
+    # These are logged with logger name `akka.cluster.Cluster`.
+    log-info-verbose = off
+
+    # Enable or disable JMX MBeans for management of the cluster
+    jmx.enabled = on
+
+    # Enable or disable multiple JMX MBeans in the same JVM
+    # If this is disabled, the MBean Object name is "akka:type=Cluster"
+    # If this is enabled, them MBean Object names become "akka:type=Cluster,port=$clusterPortNumber"
+    jmx.multi-mbeans-in-same-jvm = off
+
+    # how long should the node wait before starting the periodic tasks
+    # maintenance tasks?
+    periodic-tasks-initial-delay = 1s
+
+    # how often should the node send out gossip information?
+    gossip-interval = 1s
+    
+    # discard incoming gossip messages if not handled within this duration
+    gossip-time-to-live = 2s
+
+    # how often should the leader perform maintenance tasks?
+    leader-actions-interval = 1s
+
+    # how often should the node move nodes, marked as unreachable by the failure
+    # detector, out of the membership ring?
+    unreachable-nodes-reaper-interval = 1s
+
+    # How often the current internal stats should be published.
+    # A value of 0s can be used to always publish the stats, when it happens.
+    # Disable with "off".
+    publish-stats-interval = off
+
+    # The id of the dispatcher to use for cluster actors.
+    # If specified you need to define the settings of the actual dispatcher.
+    use-dispatcher = "akka.actor.internal-dispatcher"
+
+    # Gossip to random node with newer or older state information, if any with
+    # this probability. Otherwise Gossip to any random live node.
+    # Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
+    gossip-different-view-probability = 0.8
+    
+    # Reduced the above probability when the number of nodes in the cluster
+    # greater than this value.
+    reduce-gossip-different-view-probability = 400
+
+    # When a node is removed the removal is marked with a tombstone
+    # which is kept at least this long, after which it is pruned, if there is a partition
+    # longer than this it could lead to removed nodes being re-added to the cluster
+    prune-gossip-tombstones-after = 24h
+
+    # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
+    # [Hayashibara et al]) used by the cluster subsystem to detect unreachable
+    # members.
+    # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
+    # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
+    # i.e. around 5.5 seconds with default settings.
+    failure-detector {
+
+      # FQCN of the failure detector implementation.
+      # It must implement akka.remote.FailureDetector and have
+      # a public constructor with a com.typesafe.config.Config and
+      # akka.actor.EventStream parameter.
+      implementation-class = "akka.remote.PhiAccrualFailureDetector"
+
+      # How often keep-alive heartbeat messages should be sent to each connection.
+      heartbeat-interval = 1 s
+
+      # Defines the failure detector threshold.
+      # A low threshold is prone to generate many wrong suspicions but ensures
+      # a quick detection in the event of a real crash. Conversely, a high
+      # threshold generates fewer mistakes but needs more time to detect
+      # actual crashes.
+      threshold = 8.0
+
+      # Number of the samples of inter-heartbeat arrival times to adaptively
+      # calculate the failure timeout for connections.
+      max-sample-size = 1000
+
+      # Minimum standard deviation to use for the normal distribution in
+      # AccrualFailureDetector. Too low standard deviation might result in
+      # too much sensitivity for sudden, but normal, deviations in heartbeat
+      # inter arrival times.
+      min-std-deviation = 100 ms
+
+      # Number of potentially lost/delayed heartbeats that will be
+      # accepted before considering it to be an anomaly.
+      # This margin is important to be able to survive sudden, occasional,
+      # pauses in heartbeat arrivals, due to for example garbage collect or
+      # network drop.
+      acceptable-heartbeat-pause = 3 s
+
+      # Number of member nodes that each member will send heartbeat messages to,
+      # i.e. each node will be monitored by this number of other nodes.
+      monitored-by-nr-of-members = 9
+      
+      # After the heartbeat request has been sent the first failure detection
+      # will start after this period, even though no heartbeat message has
+      # been received.
+      expected-response-after = 1 s
+
+    }
+
+    # Configures multi-dc specific heartbeating and other mechanisms,
+    # many of them have a direct counter-part in "one datacenter mode",
+    # in which case these settings would not be used at all - they only apply,
+    # if your cluster nodes are configured with at-least 2 different `akka.cluster.data-center` values.
+    multi-data-center {
+
+      # Defines which data center this node belongs to. It is typically used to make islands of the
+      # cluster that are colocated. This can be used to make the cluster aware that it is running
+      # across multiple availability zones or regions. It can also be used for other logical
+      # grouping of nodes.
+      self-data-center = "default"
+
+
+      # Try to limit the number of connections between data centers. Used for gossip and heartbeating.
+      # This will not limit connections created for the messaging of the application.
+      # If the cluster does not span multiple data centers, this value has no effect.
+      cross-data-center-connections = 5
+
+      # The n oldest nodes in a data center will choose to gossip to another data center with
+      # this probability. Must be a value between 0.0 and 1.0 where 0.0 means never, 1.0 means always.
+      # When a data center is first started (nodes < 5) a higher probability is used so other data
+      # centers find out about the new nodes more quickly
+      cross-data-center-gossip-probability = 0.2
+
+      failure-detector {
+        # FQCN of the failure detector implementation.
+        # It must implement akka.remote.FailureDetector and have
+        # a public constructor with a com.typesafe.config.Config and
+        # akka.actor.EventStream parameter.
+        implementation-class = "akka.remote.DeadlineFailureDetector"
+  
+        # Number of potentially lost/delayed heartbeats that will be
+        # accepted before considering it to be an anomaly.
+        # This margin is important to be able to survive sudden, occasional,
+        # pauses in heartbeat arrivals, due to for example garbage collect or
+        # network drop.
+        acceptable-heartbeat-pause = 10 s
+        
+        # How often keep-alive heartbeat messages should be sent to each connection.
+        heartbeat-interval = 3 s
+  
+        # After the heartbeat request has been sent the first failure detection
+        # will start after this period, even though no heartbeat message has
+        # been received.
+        expected-response-after = 1 s
+      }
+    }
+
+    # If the tick-duration of the default scheduler is longer than the
+    # tick-duration configured here a dedicated scheduler will be used for
+    # periodic tasks of the cluster, otherwise the default scheduler is used.
+    # See akka.scheduler settings for more details.
+    scheduler {
+      tick-duration = 33ms
+      ticks-per-wheel = 512
+    }
+
+    debug {
+      # Log heartbeat events (very verbose, useful mostly when debugging heartbeating issues).
+      # These are logged with logger name `akka.cluster.ClusterHeartbeat`.
+      verbose-heartbeat-logging = off
+
+      # log verbose details about gossip
+      verbose-gossip-logging = off
+    }
+
+    configuration-compatibility-check {
+
+      # Enforce configuration compatibility checks when joining a cluster.
+      # Set to off to allow joining nodes to join a cluster even when configuration incompatibilities are detected or
+      # when the cluster does not support this feature. Compatibility checks are always performed and warning and
+      # error messages are logged.
+      #
+      # This is particularly useful for rolling updates on clusters that do not support that feature. Since the old
+      # cluster won't be able to send the compatibility confirmation to the joining node, the joining node won't be able
+      # to 'know' if its allowed to join.
+      enforce-on-join = on
+
+      # Add named entry to this section with fully qualified class name of the JoinConfigCompatChecker
+      # to enable.
+      # Checkers defined in reference.conf can be disabled by application by using empty string value
+      # for the named entry.
+      checkers {
+        akka-cluster = "akka.cluster.JoinConfigCompatCheckCluster"
+      }
+
+      # Some configuration properties might not be appropriate to transfer between nodes
+      # and such properties can be excluded from the configuration compatibility check by adding
+      # the paths of the properties to this list. Sensitive paths are grouped by key. Modules and third-party libraries
+      # can define their own set of sensitive paths without clashing with each other (as long they use unique keys).
+      #
+      # All properties starting with the paths defined here are excluded, i.e. you can add the path of a whole
+      # section here to skip everything inside that section.
+      sensitive-config-paths {
+        akka = [
+          "user.home", "user.name", "user.dir",
+          "socksNonProxyHosts", "http.nonProxyHosts", "ftp.nonProxyHosts",
+          "akka.remote.secure-cookie",
+          "akka.remote.classic.netty.ssl.security",
+          # Pre 2.6 path, keep around to avoid sending things misconfigured with old paths
+          "akka.remote.netty.ssl.security",
+          "akka.remote.artery.ssl"
+        ]
+      }
+
+    }
+  }
+
+  actor.deployment.default.cluster {
+    # enable cluster aware router that deploys to nodes in the cluster
+    enabled = off
+
+    # Maximum number of routees that will be deployed on each cluster
+    # member node.
+    # Note that max-total-nr-of-instances defines total number of routees, but
+    # number of routees per node will not be exceeded, i.e. if you
+    # define max-total-nr-of-instances = 50 and max-nr-of-instances-per-node = 2
+    # it will deploy 2 routees per new member in the cluster, up to
+    # 25 members.
+    max-nr-of-instances-per-node = 1
+    
+    # Maximum number of routees that will be deployed, in total
+    # on all nodes. See also description of max-nr-of-instances-per-node.
+    # For backwards compatibility reasons, nr-of-instances
+    # has the same purpose as max-total-nr-of-instances for cluster
+    # aware routers and nr-of-instances (if defined by user) takes
+    # precedence over max-total-nr-of-instances. 
+    max-total-nr-of-instances = 10000
+
+    # Defines if routees are allowed to be located on the same node as
+    # the head router actor, or only on remote nodes.
+    # Useful for master-worker scenario where all routees are remote.
+    allow-local-routees = on
+
+    # Use members with all specified roles, or all members if undefined or empty.
+    use-roles = []
+
+    # Deprecated, since Akka 2.5.4, replaced by use-roles
+    # Use members with specified role, or all members if undefined or empty.
+    use-role = ""
+  }
+
+  # Protobuf serializer for cluster messages
+  actor {
+    serializers {
+      akka-cluster = "akka.cluster.protobuf.ClusterMessageSerializer"
+    }
+
+    serialization-bindings {
+      "akka.cluster.ClusterMessage" = akka-cluster
+      "akka.cluster.routing.ClusterRouterPool" = akka-cluster
+    }
+    
+    serialization-identifiers {
+      "akka.cluster.protobuf.ClusterMessageSerializer" = 5
+    }
+    
+  }
+
+}
+
+#//#split-brain-resolver
+
+# To enable the split brain resolver you first need to enable the provider in your application.conf:
+# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+akka.cluster.split-brain-resolver {
+  # Select one of the available strategies (see descriptions below):
+  # static-quorum, keep-majority, keep-oldest, down-all, lease-majority
+  active-strategy = keep-majority
+
+  #//#stable-after
+  # Time margin after which shards or singletons that belonged to a downed/removed
+  # partition are created in surviving partition. The purpose of this margin is that
+  # in case of a network partition the persistent actors in the non-surviving partitions
+  # must be stopped before corresponding persistent actors are started somewhere else.
+  # This is useful if you implement downing strategies that handle network partitions,
+  # e.g. by keeping the larger side of the partition and shutting down the smaller side.
+  # Decision is taken by the strategy when there has been no membership or
+  # reachability changes for this duration, i.e. the cluster state is stable.
+  stable-after = 20s
+  #//#stable-after
+
+  # When reachability observations by the failure detector are changed the SBR decisions
+  # are deferred until there are no changes within the 'stable-after' duration.
+  # If this continues for too long it might be an indication of an unstable system/network
+  # and it could result in delayed or conflicting decisions on separate sides of a network
+  # partition.
+  # As a precaution for that scenario all nodes are downed if no decision is made within
+  # `stable-after + down-all-when-unstable` from the first unreachability event.
+  # The measurement is reset if all unreachable have been healed, downed or removed, or
+  # if there are no changes within `stable-after * 2`.
+  # The value can be on, off, or a duration.
+  # By default it is 'on' and then it is derived to be 3/4 of stable-after, but not less than
+  # 4 seconds.
+  down-all-when-unstable = on
+
+}
+#//#split-brain-resolver
+
+# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
+# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
+# side of the partition. In other words, the 'size' defines the minimum number of nodes
+# that the cluster must have to be operational. If there are unreachable nodes when starting
+# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
+# This is not an issue if you start all nodes at approximately the same time.
+#
+# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
+# then both sides may down each other and thereby form two separate clusters. For example,
+# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
+# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
+# itself. A warning is logged if this recommendation is violated.
+#//#static-quorum
+akka.cluster.split-brain-resolver.static-quorum {
+  # minimum number of nodes that the cluster must have
+  quorum-size = undefined
+
+  # if the 'role' is defined the decision is based only on members with that 'role'
+  role = ""
+}
+#//#static-quorum
+
+# Down the unreachable nodes if the current node is in the majority part based the last known
+# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
+# the parts are of equal size the part containing the node with the lowest address is kept.
+# Note that if there are more than two partitions and none is in majority each part
+# will shutdown itself, terminating the whole cluster.
+#//#keep-majority
+akka.cluster.split-brain-resolver.keep-majority {
+  # if the 'role' is defined the decision is based only on members with that 'role'
+  role = ""
+}
+#//#keep-majority
+
+# Down the part that does not contain the oldest member (current singleton).
+#
+# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
+# Then, if the oldest node has partitioned from all other nodes the oldest
+# will down itself and keep all other nodes running. The strategy will not
+# down the single oldest node when it is the only remaining node in the cluster.
+#
+# Note that if the oldest node crashes the others will remove it from the cluster
+# when 'down-if-alone' is 'on', otherwise they will down themselves if the
+# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
+#//#keep-oldest
+akka.cluster.split-brain-resolver.keep-oldest {
+  # Enable downing of the oldest node when it is partitioned from all other nodes
+  down-if-alone = on
+
+  # if the 'role' is defined the decision is based only on members with that 'role',
+  # i.e. using the oldest member (singleton) within the nodes with that role
+  role = ""
+}
+#//#keep-oldest
+
+# Keep the part that can acquire the lease, and down the other part.
+# Best effort is to keep the side that has most nodes, i.e. the majority side.
+# This is achieved by adding a delay before trying to acquire the lease on the
+# minority side.
+#//#lease-majority
+akka.cluster.split-brain-resolver.lease-majority {
+  lease-implementation = ""
+
+  # The recommended format for the lease name is "<service-name>-akka-sbr".
+  # When lease-name is not defined, the name will be set to "<actor-system-name>-akka-sbr"
+  lease-name = ""
+
+  # This delay is used on the minority side before trying to acquire the lease,
+  # as an best effort to try to keep the majority side.
+  acquire-lease-delay-for-minority = 2s
+
+  # Release the lease after this duration.
+  release-after = 40s
+
+  # If the 'role' is defined the majority/minority is based only on members with that 'role'.
+  role = ""
+}
+#//#lease-majority
diff --git a/akka/repackaged-akka-jar/src/main/resources/cluster_tools_reference.conf b/akka/repackaged-akka-jar/src/main/resources/cluster_tools_reference.conf
new file mode 100644 (file)
index 0000000..783326f
--- /dev/null
@@ -0,0 +1,231 @@
+############################################
+# Akka Cluster Tools Reference Config File #
+############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# //#pub-sub-ext-config
+# Settings for the DistributedPubSub extension
+akka.cluster.pub-sub {
+  # Actor name of the mediator actor, /system/distributedPubSubMediator
+  name = distributedPubSubMediator
+
+  # Start the mediator on members tagged with this role.
+  # All members are used if undefined or empty.
+  role = ""
+
+  # The routing logic to use for 'Send'
+  # Possible values: random, round-robin, broadcast
+  routing-logic = random
+
+  # How often the DistributedPubSubMediator should send out gossip information
+  gossip-interval = 1s
+
+  # Removed entries are pruned after this duration
+  removed-time-to-live = 120s
+
+  # Maximum number of elements to transfer in one message when synchronizing the registries.
+  # Next chunk will be transferred in next round of gossip.
+  max-delta-elements = 3000
+
+  # When a message is published to a topic with no subscribers send it to the dead letters.
+  send-to-dead-letters-when-no-subscribers = on
+
+  # The id of the dispatcher to use for DistributedPubSubMediator actors.
+  # If specified you need to define the settings of the actual dispatcher.
+  use-dispatcher = "akka.actor.internal-dispatcher"
+}
+# //#pub-sub-ext-config
+
+# Protobuf serializer for cluster DistributedPubSubMeditor messages
+akka.actor {
+  serializers {
+    akka-pubsub = "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer"
+  }
+  serialization-bindings {
+    "akka.cluster.pubsub.DistributedPubSubMessage" = akka-pubsub
+    "akka.cluster.pubsub.DistributedPubSubMediator$Internal$SendToOneSubscriber" = akka-pubsub
+  }
+  serialization-identifiers {
+    "akka.cluster.pubsub.protobuf.DistributedPubSubMessageSerializer" = 9
+  }
+}
+
+
+# //#receptionist-ext-config
+# Settings for the ClusterClientReceptionist extension
+akka.cluster.client.receptionist {
+  # Actor name of the ClusterReceptionist actor, /system/receptionist
+  name = receptionist
+
+  # Start the receptionist on members tagged with this role.
+  # All members are used if undefined or empty.
+  role = ""
+
+  # The receptionist will send this number of contact points to the client
+  number-of-contacts = 3
+
+  # The actor that tunnel response messages to the client will be stopped
+  # after this time of inactivity.
+  response-tunnel-receive-timeout = 30s
+
+  # The id of the dispatcher to use for ClusterReceptionist actors.
+  # If specified you need to define the settings of the actual dispatcher.
+  use-dispatcher = "akka.actor.internal-dispatcher"
+
+  # How often failure detection heartbeat messages should be received for
+  # each ClusterClient
+  heartbeat-interval = 2s
+
+  # Number of potentially lost/delayed heartbeats that will be
+  # accepted before considering it to be an anomaly.
+  # The ClusterReceptionist is using the akka.remote.DeadlineFailureDetector, which
+  # will trigger if there are no heartbeats within the duration
+  # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with
+  # the default settings.
+  acceptable-heartbeat-pause = 13s
+
+  # Failure detection checking interval for checking all ClusterClients
+  failure-detection-interval = 2s
+}
+# //#receptionist-ext-config
+
+# //#cluster-client-config
+# Settings for the ClusterClient
+akka.cluster.client {
+  # Actor paths of the ClusterReceptionist actors on the servers (cluster nodes)
+  # that the client will try to contact initially. It is mandatory to specify
+  # at least one initial contact.
+  # Comma separated full actor paths defined by a string on the form of
+  # "akka://system@hostname:port/system/receptionist"
+  initial-contacts = []
+
+  # Interval at which the client retries to establish contact with one of
+  # ClusterReceptionist on the servers (cluster nodes)
+  establishing-get-contacts-interval = 3s
+
+  # Interval at which the client will ask the ClusterReceptionist for
+  # new contact points to be used for next reconnect.
+  refresh-contacts-interval = 60s
+
+  # How often failure detection heartbeat messages should be sent
+  heartbeat-interval = 2s
+
+  # Number of potentially lost/delayed heartbeats that will be
+  # accepted before considering it to be an anomaly.
+  # The ClusterClient is using the akka.remote.DeadlineFailureDetector, which
+  # will trigger if there are no heartbeats within the duration
+  # heartbeat-interval + acceptable-heartbeat-pause, i.e. 15 seconds with
+  # the default settings.
+  acceptable-heartbeat-pause = 13s
+
+  # If connection to the receptionist is not established the client will buffer
+  # this number of messages and deliver them the connection is established.
+  # When the buffer is full old messages will be dropped when new messages are sent
+  # via the client. Use 0 to disable buffering, i.e. messages will be dropped
+  # immediately if the location of the singleton is unknown.
+  # Maximum allowed buffer size is 10000.
+  buffer-size = 1000
+
+  # If connection to the receiptionist is lost and the client has not been
+  # able to acquire a new connection for this long the client will stop itself.
+  # This duration makes it possible to watch the cluster client and react on a more permanent
+  # loss of connection with the cluster, for example by accessing some kind of
+  # service registry for an updated set of initial contacts to start a new cluster client with.
+  # If this is not wanted it can be set to "off" to disable the timeout and retry
+  # forever.
+  reconnect-timeout = off
+}
+# //#cluster-client-config
+
+# Protobuf serializer for ClusterClient messages
+akka.actor {
+  serializers {
+    akka-cluster-client = "akka.cluster.client.protobuf.ClusterClientMessageSerializer"
+  }
+  serialization-bindings {
+    "akka.cluster.client.ClusterClientMessage" = akka-cluster-client
+  }
+  serialization-identifiers {
+    "akka.cluster.client.protobuf.ClusterClientMessageSerializer" = 15
+  }
+}
+
+# //#singleton-config
+akka.cluster.singleton {
+  # The actor name of the child singleton actor.
+  singleton-name = "singleton"
+
+  # Singleton among the nodes tagged with specified role.
+  # If the role is not specified it's a singleton among all nodes in the cluster.
+  role = ""
+
+  # When a node is becoming oldest it sends hand-over request to previous oldest,
+  # that might be leaving the cluster. This is retried with this interval until
+  # the previous oldest confirms that the hand over has started or the previous
+  # oldest member is removed from the cluster (+ akka.cluster.down-removal-margin).
+  hand-over-retry-interval = 1s
+
+  # The number of retries are derived from hand-over-retry-interval and
+  # akka.cluster.down-removal-margin (or ClusterSingletonManagerSettings.removalMargin),
+  # but it will never be less than this property.
+  # After the hand over retries and it's still not able to exchange the hand over messages
+  # with the previous oldest it will restart itself by throwing ClusterSingletonManagerIsStuck,
+  # to start from a clean state. After that it will still not start the singleton instance
+  # until the previous oldest node has been removed from the cluster.
+  # On the other side, on the previous oldest node, the same number of retries - 3 are used
+  # and after that the singleton instance is stopped.
+  # For large clusters it might be necessary to increase this to avoid too early timeouts while
+  # gossip dissemination of the Leaving to Exiting phase occurs. For normal leaving scenarios
+  # it will not be a quicker hand over by reducing this value, but in extreme failure scenarios
+  # the recovery might be faster.
+  min-number-of-hand-over-retries = 15
+
+  # Config path of the lease to be taken before creating the singleton actor
+  # if the lease is lost then the actor is restarted and it will need to re-acquire the lease
+  # the default is no lease
+  use-lease = ""
+
+  # The interval between retries for acquiring the lease
+  lease-retry-interval = 5s
+}
+# //#singleton-config
+
+# //#singleton-proxy-config
+akka.cluster.singleton-proxy {
+  # The actor name of the singleton actor that is started by the ClusterSingletonManager
+  singleton-name = ${akka.cluster.singleton.singleton-name}
+
+  # The role of the cluster nodes where the singleton can be deployed.
+  # Corresponding to the role used by the `ClusterSingletonManager`. If the role is not
+  # specified it's a singleton among all nodes in the cluster, and the `ClusterSingletonManager`
+  # must then also be configured in same way.
+  role = ""
+
+  # Interval at which the proxy will try to resolve the singleton instance.
+  singleton-identification-interval = 1s
+
+  # If the location of the singleton is unknown the proxy will buffer this
+  # number of messages and deliver them when the singleton is identified.
+  # When the buffer is full old messages will be dropped when new messages are
+  # sent via the proxy.
+  # Use 0 to disable buffering, i.e. messages will be dropped immediately if
+  # the location of the singleton is unknown.
+  # Maximum allowed buffer size is 10000.
+  buffer-size = 1000
+}
+# //#singleton-proxy-config
+
+# Serializer for cluster ClusterSingleton messages
+akka.actor {
+  serializers {
+    akka-singleton = "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer"
+  }
+  serialization-bindings {
+    "akka.cluster.singleton.ClusterSingletonMessage" = akka-singleton
+  }
+  serialization-identifiers {
+    "akka.cluster.singleton.protobuf.ClusterSingletonMessageSerializer" = 14
+  }
+}
diff --git a/akka/repackaged-akka-jar/src/main/resources/cluster_typed_reference.conf b/akka/repackaged-akka-jar/src/main/resources/cluster_typed_reference.conf
new file mode 100644 (file)
index 0000000..4cd45a5
--- /dev/null
@@ -0,0 +1,66 @@
+############################################
+# Akka Cluster Typed Reference Config File #
+############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+akka.cluster.typed.receptionist {
+  # Updates with Distributed Data are done with this consistency level.
+  # Possible values: local, majority, all, 2, 3, 4 (n)
+  write-consistency = local
+
+  # Period task to remove actor references that are hosted by removed nodes,
+  # in case of abrupt termination.
+  pruning-interval = 3 s
+
+  # The periodic task to remove actor references that are hosted by removed nodes
+  # will only remove entries older than this duration. The reason for this
+  # is to avoid removing entries of nodes that haven't been visible as joining.
+  prune-removed-older-than = 60 s
+
+  # Shard the services over this many Distributed Data keys, with large amounts of different
+  # service keys storing all of them in the same Distributed Data entry would lead to large updates
+  # etc. instead the keys are sharded across this number of keys. This must be the same on all nodes
+  # in a cluster, changing it requires a full cluster restart (stopping all nodes before starting them again)
+  distributed-key-count = 5
+
+  # Settings for the Distributed Data replicator used by Receptionist.
+  # Same layout as akka.cluster.distributed-data.
+  distributed-data = ${akka.cluster.distributed-data}
+  # make sure that by default it's for all roles (Play loads config in different way)
+  distributed-data.role = ""
+}
+
+akka.cluster.ddata.typed {
+  # The timeout to use for ask operations in ReplicatorMessageAdapter.
+  # This should be longer than the timeout given in Replicator.WriteConsistency and
+  # Replicator.ReadConsistency. The replicator will always send a reply within those
+  # timeouts so the unexpected ask timeout should not occur, but for cleanup in a
+  # failure situation it must still exist.
+  # If askUpdate, askGet or askDelete takes longer then this timeout a
+  # java.util.concurrent.TimeoutException will be thrown by the requesting actor and
+  # may be handled by supervision.
+  replicator-message-adapter-unexpected-ask-timeout = 20 s
+}
+
+akka {
+  actor {
+    serialization-identifiers {
+      "akka.cluster.typed.internal.AkkaClusterTypedSerializer" = 28
+      "akka.cluster.typed.internal.delivery.ReliableDeliverySerializer" = 36
+    }
+    serializers {
+      typed-cluster = "akka.cluster.typed.internal.AkkaClusterTypedSerializer"
+      reliable-delivery = "akka.cluster.typed.internal.delivery.ReliableDeliverySerializer"
+    }
+    serialization-bindings {
+      "akka.cluster.typed.internal.receptionist.ClusterReceptionist$Entry" = typed-cluster
+      "akka.actor.typed.internal.pubsub.TopicImpl$MessagePublished" = typed-cluster
+      "akka.actor.typed.delivery.internal.DeliverySerializable" = reliable-delivery
+    }
+  }
+  cluster.configuration-compatibility-check.checkers {
+    receptionist = "akka.cluster.typed.internal.receptionist.ClusterReceptionistConfigCompatChecker"
+  }
+}
diff --git a/akka/repackaged-akka-jar/src/main/resources/distributed_data_reference.conf b/akka/repackaged-akka-jar/src/main/resources/distributed_data_reference.conf
new file mode 100644 (file)
index 0000000..f716157
--- /dev/null
@@ -0,0 +1,159 @@
+##############################################
+# Akka Distributed DataReference Config File #
+##############################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+
+#//#distributed-data
+# Settings for the DistributedData extension
+akka.cluster.distributed-data {
+  # Actor name of the Replicator actor, /system/ddataReplicator
+  name = ddataReplicator
+
+  # Replicas are running on members tagged with this role.
+  # All members are used if undefined or empty.
+  role = ""
+
+  # How often the Replicator should send out gossip information
+  gossip-interval = 2 s
+  
+  # How often the subscribers will be notified of changes, if any
+  notify-subscribers-interval = 500 ms
+
+  # Logging of data with payload size in bytes larger than
+  # this value. Maximum detected size per key is logged once,
+  # with an increase threshold of 10%.
+  # It can be disabled by setting the property to off.
+  log-data-size-exceeding = 10 KiB
+
+  # Maximum number of entries to transfer in one round of gossip exchange when
+  # synchronizing the replicas. Next chunk will be transferred in next round of gossip.
+  # The actual number of data entries in each Gossip message is dynamically
+  # adjusted to not exceed the maximum remote message size (maximum-frame-size).
+  max-delta-elements = 500
+  
+  # The id of the dispatcher to use for Replicator actors.
+  # If specified you need to define the settings of the actual dispatcher.
+  use-dispatcher = "akka.actor.internal-dispatcher"
+
+  # How often the Replicator checks for pruning of data associated with
+  # removed cluster nodes. If this is set to 'off' the pruning feature will
+  # be completely disabled.
+  pruning-interval = 120 s
+  
+  # How long time it takes to spread the data to all other replica nodes.
+  # This is used when initiating and completing the pruning process of data associated
+  # with removed cluster nodes. The time measurement is stopped when any replica is 
+  # unreachable, but it's still recommended to configure this with certain margin.
+  # It should be in the magnitude of minutes even though typical dissemination time
+  # is shorter (grows logarithmic with number of nodes). There is no advantage of 
+  # setting this too low. Setting it to large value will delay the pruning process.
+  max-pruning-dissemination = 300 s
+  
+  # The markers of that pruning has been performed for a removed node are kept for this
+  # time and thereafter removed. If and old data entry that was never pruned is somehow
+  # injected and merged with existing data after this time the value will not be correct.
+  # This would be possible (although unlikely) in the case of a long network partition.
+  # It should be in the magnitude of hours. For durable data it is configured by 
+  # 'akka.cluster.distributed-data.durable.pruning-marker-time-to-live'.
+ pruning-marker-time-to-live = 6 h
+  
+  # Serialized Write and Read messages are cached when they are sent to 
+  # several nodes. If no further activity they are removed from the cache
+  # after this duration.
+  serializer-cache-time-to-live = 10s
+
+  # Update and Get operations are sent to oldest nodes first.
+  # This is useful together with Cluster Singleton, which is running on oldest nodes.
+  prefer-oldest = off
+  
+  # Settings for delta-CRDT
+  delta-crdt {
+    # enable or disable delta-CRDT replication
+    enabled = on
+    
+    # Some complex deltas grow in size for each update and above this
+    # threshold such deltas are discarded and sent as full state instead.
+    # This is number of elements or similar size hint, not size in bytes.
+    max-delta-size = 50
+  }
+  
+  durable {
+    # List of keys that are durable. Prefix matching is supported by using * at the
+    # end of a key.  
+    keys = []
+    
+    # The markers of that pruning has been performed for a removed node are kept for this
+    # time and thereafter removed. If and old data entry that was never pruned is
+    # injected and merged with existing data after this time the value will not be correct.
+    # This would be possible if replica with durable data didn't participate in the pruning
+    # (e.g. it was shutdown) and later started after this time. A durable replica should not 
+    # be stopped for longer time than this duration and if it is joining again after this
+    # duration its data should first be manually removed (from the lmdb directory).
+    # It should be in the magnitude of days. Note that there is a corresponding setting
+    # for non-durable data: 'akka.cluster.distributed-data.pruning-marker-time-to-live'.
+    pruning-marker-time-to-live = 10 d
+    
+    # Fully qualified class name of the durable store actor. It must be a subclass
+    # of akka.actor.Actor and handle the protocol defined in 
+    # akka.cluster.ddata.DurableStore. The class must have a constructor with 
+    # com.typesafe.config.Config parameter.
+    store-actor-class = akka.cluster.ddata.LmdbDurableStore
+    
+    use-dispatcher = akka.cluster.distributed-data.durable.pinned-store
+    
+    pinned-store {
+      executor = thread-pool-executor
+      type = PinnedDispatcher
+    }
+    
+    # Config for the LmdbDurableStore
+    lmdb {
+      # Directory of LMDB file. There are two options:
+      # 1. A relative or absolute path to a directory that ends with 'ddata'
+      #    the full name of the directory will contain name of the ActorSystem
+      #    and its remote port.
+      # 2. Otherwise the path is used as is, as a relative or absolute path to
+      #    a directory.
+      #
+      # When running in production you may want to configure this to a specific
+      # path (alt 2), since the default directory contains the remote port of the
+      # actor system to make the name unique. If using a dynamically assigned 
+      # port (0) it will be different each time and the previously stored data 
+      # will not be loaded.
+      dir = "ddata"
+      
+      # Size in bytes of the memory mapped file.
+      map-size = 100 MiB
+      
+      # Accumulate changes before storing improves performance with the
+      # risk of losing the last writes if the JVM crashes.
+      # The interval is by default set to 'off' to write each update immediately.
+      # Enabling write behind by specifying a duration, e.g. 200ms, is especially 
+      # efficient when performing many writes to the same key, because it is only 
+      # the last value for each key that will be serialized and stored.  
+      # write-behind-interval = 200 ms
+      write-behind-interval = off
+    }
+  }
+  
+}
+#//#distributed-data
+
+# Protobuf serializer for cluster DistributedData messages
+akka.actor {
+  serializers {
+    akka-data-replication = "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer"
+    akka-replicated-data = "akka.cluster.ddata.protobuf.ReplicatedDataSerializer"
+  }
+  serialization-bindings {
+    "akka.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication
+    "akka.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data
+  }
+  serialization-identifiers {
+    "akka.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11
+    "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" = 12
+  }
+}
diff --git a/akka/repackaged-akka-jar/src/main/resources/persistence_reference.conf b/akka/repackaged-akka-jar/src/main/resources/persistence_reference.conf
new file mode 100644 (file)
index 0000000..db9ae1e
--- /dev/null
@@ -0,0 +1,355 @@
+###########################################################
+# Akka Persistence Extension Reference Configuration File #
+###########################################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits in your application.conf in order to override these settings.
+
+# Directory of persistence journal and snapshot store plugins is available at the 
+# Akka Community Projects page https://akka.io/community/
+
+# Default persistence extension settings.
+akka.persistence {
+
+    # When starting many persistent actors at the same time the journal
+    # and its data store is protected from being overloaded by limiting number
+    # of recoveries that can be in progress at the same time. When
+    # exceeding the limit the actors will wait until other recoveries have
+    # been completed.   
+    max-concurrent-recoveries = 50
+
+    # Fully qualified class name providing a default internal stash overflow strategy.
+    # It needs to be a subclass of akka.persistence.StashOverflowStrategyConfigurator.
+    # The default strategy throws StashOverflowException.
+    internal-stash-overflow-strategy = "akka.persistence.ThrowExceptionConfigurator"
+    journal {
+        # Absolute path to the journal plugin configuration entry used by 
+        # persistent actor by default.
+        # Persistent actor can override `journalPluginId` method 
+        # in order to rely on a different journal plugin.
+        plugin = ""
+        # List of journal plugins to start automatically. Use "" for the default journal plugin.
+        auto-start-journals = []
+    }
+    snapshot-store {
+        # Absolute path to the snapshot plugin configuration entry used by
+        # persistent actor by default.
+        # Persistent actor can override `snapshotPluginId` method
+        # in order to rely on a different snapshot plugin.
+        # It is not mandatory to specify a snapshot store plugin.
+        # If you don't use snapshots you don't have to configure it.
+        # Note that Cluster Sharding is using snapshots, so if you
+        # use Cluster Sharding you need to define a snapshot store plugin. 
+        plugin = ""
+        # List of snapshot stores to start automatically. Use "" for the default snapshot store.
+        auto-start-snapshot-stores = []
+    }
+    # used as default-snapshot store if no plugin configured 
+    # (see `akka.persistence.snapshot-store`)
+    no-snapshot-store {
+      class = "akka.persistence.snapshot.NoSnapshotStore"
+    }
+    # Default reliable delivery settings.
+    at-least-once-delivery {
+        # Interval between re-delivery attempts.
+        redeliver-interval = 5s
+        # Maximum number of unconfirmed messages that will be sent in one 
+        # re-delivery burst.
+        redelivery-burst-limit = 10000
+        # After this number of delivery attempts a 
+        # `ReliableRedelivery.UnconfirmedWarning`, message will be sent to the actor.
+        warn-after-number-of-unconfirmed-attempts = 5
+        # Maximum number of unconfirmed messages that an actor with 
+        # AtLeastOnceDelivery is allowed to hold in memory.
+        max-unconfirmed-messages = 100000
+    }
+    # Default persistent extension thread pools.
+    dispatchers {
+        # Dispatcher used by every plugin which does not declare explicit
+        # `plugin-dispatcher` field.
+        default-plugin-dispatcher {
+            type = PinnedDispatcher
+            executor = "thread-pool-executor"
+        }
+        # Default dispatcher for message replay.
+        default-replay-dispatcher {
+            type = Dispatcher
+            executor = "fork-join-executor"
+            fork-join-executor {
+                parallelism-min = 2
+                parallelism-max = 8
+            }
+        }
+        # Default dispatcher for streaming snapshot IO
+        default-stream-dispatcher {
+            type = Dispatcher
+            executor = "fork-join-executor"
+            fork-join-executor {
+                parallelism-min = 2
+                parallelism-max = 8
+            }
+        }
+    }
+
+    # Fallback settings for journal plugin configurations.
+    # These settings are used if they are not defined in plugin config section.
+    journal-plugin-fallback {
+
+      # Fully qualified class name providing journal plugin api implementation.
+      # It is mandatory to specify this property.
+      # The class must have a constructor without parameters or constructor with
+      # one `com.typesafe.config.Config` parameter.
+      class = ""
+
+      # Dispatcher for the plugin actor.
+      plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+
+      # Dispatcher for message replay.
+      replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+
+      # Removed: used to be the Maximum size of a persistent message batch written to the journal.
+      # Now this setting is without function, PersistentActor will write as many messages
+      # as it has accumulated since the last write.
+      max-message-batch-size = 200
+
+      # If there is more time in between individual events gotten from the journal
+      # recovery than this the recovery will fail.
+      # Note that it also affects reading the snapshot before replaying events on
+      # top of it, even though it is configured for the journal.
+      recovery-event-timeout = 30s
+
+      circuit-breaker {
+        max-failures = 10
+        call-timeout = 10s
+        reset-timeout = 30s
+      }
+
+      # The replay filter can detect a corrupt event stream by inspecting
+      # sequence numbers and writerUuid when replaying events.
+      replay-filter {
+        # What the filter should do when detecting invalid events.
+        # Supported values:
+        # `repair-by-discard-old` : discard events from old writers,
+        #                           warning is logged
+        # `fail` : fail the replay, error is logged
+        # `warn` : log warning but emit events untouched
+        # `off` : disable this feature completely
+        mode = repair-by-discard-old
+
+        # It uses a look ahead buffer for analyzing the events.
+        # This defines the size (in number of events) of the buffer.
+        window-size = 100
+
+        # How many old writerUuid to remember
+        max-old-writers = 10
+
+        # Set this to `on` to enable detailed debug logging of each
+        # replayed event.
+        debug = off
+      }
+    }
+
+    # Fallback settings for snapshot store plugin configurations
+    # These settings are used if they are not defined in plugin config section.
+    snapshot-store-plugin-fallback {
+
+      # Fully qualified class name providing snapshot store plugin api
+      # implementation. It is mandatory to specify this property if
+      # snapshot store is enabled.
+      # The class must have a constructor without parameters or constructor with
+      # one `com.typesafe.config.Config` parameter.
+      class = ""
+
+      # Dispatcher for the plugin actor.
+      plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+
+      circuit-breaker {
+        max-failures = 5
+        call-timeout = 20s
+        reset-timeout = 60s
+      }
+
+      # Set this to true if successful loading of snapshot is not necessary.
+      # This can be useful when it is alright to ignore snapshot in case of
+      # for example deserialization errors. When snapshot loading fails it will instead
+      # recover by replaying all events.
+      # Don't set to true if events are deleted because that would
+      # result in wrong recovered state if snapshot load fails.
+      snapshot-is-optional = false
+
+    }
+
+  fsm {
+    # PersistentFSM saves snapshots after this number of persistent
+    # events. Snapshots are used to reduce recovery times.
+    # When you disable this feature, specify snapshot-after = off.
+    # To enable the feature, specify a number like snapshot-after = 1000
+    # which means a snapshot is taken after persisting every 1000 events.
+    snapshot-after = off
+  }
+
+  # DurableStateStore settings
+  state {
+    # Absolute path to the KeyValueStore plugin configuration entry used by
+    # DurableStateBehavior actors by default.
+    # DurableStateBehavior can override `durableStateStorePluginId` method (`withDurableStateStorePluginId`)
+    # in order to rely on a different plugin.
+    plugin = ""
+  }
+
+  # Fallback settings for DurableStateStore plugin configurations
+  # These settings are used if they are not defined in plugin config section.
+  state-plugin-fallback {
+    recovery-timeout = 30s
+  }
+}
+
+# Protobuf serialization for the persistent extension messages.
+akka.actor {
+    serializers {
+        akka-persistence-message = "akka.persistence.serialization.MessageSerializer"
+        akka-persistence-snapshot = "akka.persistence.serialization.SnapshotSerializer"
+    }
+    serialization-bindings {
+        "akka.persistence.serialization.Message" = akka-persistence-message
+        "akka.persistence.serialization.Snapshot" = akka-persistence-snapshot
+    }
+    serialization-identifiers {
+        "akka.persistence.serialization.MessageSerializer" = 7
+        "akka.persistence.serialization.SnapshotSerializer" = 8
+    }
+}
+
+
+###################################################
+# Persistence plugins included with the extension #
+###################################################
+
+# In-memory journal plugin.
+akka.persistence.journal.inmem {
+    # Class name of the plugin.
+    class = "akka.persistence.journal.inmem.InmemJournal"
+    # Dispatcher for the plugin actor.
+    plugin-dispatcher = "akka.actor.default-dispatcher"
+
+    # Turn this on to test serialization of the events
+    test-serialization = off
+}
+
+# Local file system snapshot store plugin.
+akka.persistence.snapshot-store.local {
+    # Class name of the plugin.
+    class = "akka.persistence.snapshot.local.LocalSnapshotStore"
+    # Dispatcher for the plugin actor.
+    plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+    # Dispatcher for streaming snapshot IO.
+    stream-dispatcher = "akka.persistence.dispatchers.default-stream-dispatcher"
+    # Storage location of snapshot files.
+    dir = "snapshots"
+    # Number load attempts when recovering from the latest snapshot fails
+    # yet older snapshot files are available. Each recovery attempt will try
+    # to recover using an older than previously failed-on snapshot file 
+    # (if any are present). If all attempts fail the recovery will fail and
+    # the persistent actor will be stopped.
+    max-load-attempts = 3
+}
+
+# LevelDB journal plugin.
+# Note: this plugin requires explicit LevelDB dependency, see below. 
+akka.persistence.journal.leveldb {
+    # Class name of the plugin.
+    class = "akka.persistence.journal.leveldb.LeveldbJournal"
+    # Dispatcher for the plugin actor.
+    plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+    # Dispatcher for message replay.
+    replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+    # Storage location of LevelDB files.
+    dir = "journal"
+    # Use fsync on write.
+    fsync = on
+    # Verify checksum on read.
+    checksum = off
+    # Native LevelDB (via JNI) or LevelDB Java port.
+    native = on
+    # Number of deleted messages per persistence id that will trigger journal compaction
+    compaction-intervals {
+    }
+}
+
+# Shared LevelDB journal plugin (for testing only).
+# Note: this plugin requires explicit LevelDB dependency, see below. 
+akka.persistence.journal.leveldb-shared {
+    # Class name of the plugin.
+    class = "akka.persistence.journal.leveldb.SharedLeveldbJournal"
+    # Dispatcher for the plugin actor.
+    plugin-dispatcher = "akka.actor.default-dispatcher"
+    # Timeout for async journal operations.
+    timeout = 10s
+    store {
+        # Dispatcher for shared store actor.
+        store-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+        # Dispatcher for message replay.
+        replay-dispatcher = "akka.persistence.dispatchers.default-replay-dispatcher"
+        # Storage location of LevelDB files.
+        dir = "journal"
+        # Use fsync on write.
+        fsync = on
+        # Verify checksum on read.
+        checksum = off
+        # Native LevelDB (via JNI) or LevelDB Java port.
+        native = on
+        # Number of deleted messages per persistence id that will trigger journal compaction
+        compaction-intervals {
+        }
+    }
+}
+
+akka.persistence.journal.proxy {
+  # Class name of the plugin.
+  class = "akka.persistence.journal.PersistencePluginProxy"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.actor.default-dispatcher"
+  # Set this to on in the configuration of the ActorSystem
+  # that will host the target journal
+  start-target-journal = off
+  # The journal plugin config path to use for the target journal
+  target-journal-plugin = ""
+  # The address of the proxy to connect to from other nodes. Optional setting.
+  target-journal-address = ""
+  # Initialization timeout of target lookup
+  init-timeout = 10s
+}
+
+akka.persistence.snapshot-store.proxy {
+  # Class name of the plugin.
+  class = "akka.persistence.journal.PersistencePluginProxy"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.actor.default-dispatcher"
+  # Set this to on in the configuration of the ActorSystem
+  # that will host the target snapshot-store
+  start-target-snapshot-store = off
+  # The journal plugin config path to use for the target snapshot-store
+  target-snapshot-store-plugin = ""
+  # The address of the proxy to connect to from other nodes. Optional setting.
+  target-snapshot-store-address = ""
+  # Initialization timeout of target lookup
+  init-timeout = 10s
+}
+
+# LevelDB persistence requires the following dependency declarations:
+#
+# SBT:
+#       "org.iq80.leveldb"            % "leveldb"          % "0.7"
+#       "org.fusesource.leveldbjni"   % "leveldbjni-all"   % "1.8"
+#
+# Maven:
+#        <dependency>
+#            <groupId>org.iq80.leveldb</groupId>
+#            <artifactId>leveldb</artifactId>
+#            <version>0.7</version>
+#        </dependency>
+#        <dependency>
+#            <groupId>org.fusesource.leveldbjni</groupId>
+#            <artifactId>leveldbjni-all</artifactId>
+#            <version>1.8</version>
+#        </dependency>
diff --git a/akka/repackaged-akka-jar/src/main/resources/reference.conf b/akka/repackaged-akka-jar/src/main/resources/reference.conf
new file mode 100644 (file)
index 0000000..7e60da8
--- /dev/null
@@ -0,0 +1,9 @@
+include "actor_reference.conf"
+include "actor_typed_reference.conf"
+include "cluster_reference.conf"
+include "cluster_tools_reference.conf"
+include "cluster_typed_reference.conf"
+include "distributed_data_reference.conf"
+include "persistence_reference.conf"
+include "remote_reference.conf"
+include "stream_reference.conf"
diff --git a/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf b/akka/repackaged-akka-jar/src/main/resources/remote_reference.conf
new file mode 100644 (file)
index 0000000..a30bce7
--- /dev/null
@@ -0,0 +1,1261 @@
+#//#shared
+#####################################
+# Akka Remote Reference Config File #
+#####################################
+
+# This is the reference config file that contains all the default settings.
+# Make your edits/overrides in your application.conf.
+
+# comments about akka.actor settings left out where they are already in akka-
+# actor.jar, because otherwise they would be repeated in config rendering.
+#
+# For the configuration of the new remoting implementation (Artery) please look
+# at the bottom section of this file as it is listed separately.
+
+akka {
+
+  actor {
+
+    serializers {
+      akka-containers = "akka.remote.serialization.MessageContainerSerializer"
+      akka-misc = "akka.remote.serialization.MiscMessageSerializer"
+      artery = "akka.remote.serialization.ArteryMessageSerializer"
+      proto = "akka.remote.serialization.ProtobufSerializer"
+      daemon-create = "akka.remote.serialization.DaemonMsgCreateSerializer"
+      akka-system-msg = "akka.remote.serialization.SystemMessageSerializer"
+    }
+
+    serialization-bindings {
+      "akka.actor.ActorSelectionMessage" = akka-containers
+
+      "akka.remote.DaemonMsgCreate" = daemon-create
+
+      "akka.remote.artery.ArteryMessage" = artery
+
+      # Since akka.protobuf.Message does not extend Serializable but
+      # GeneratedMessage does, need to use the more specific one here in order
+      # to avoid ambiguity.
+      # This is only loaded if akka-protobuf is on the classpath
+      # It should not be used and users should migrate to using the protobuf classes
+      # directly
+      # Remove in 2.7
+      "akka.protobuf.GeneratedMessage" = proto
+
+      "akka.protobufv3.internal.GeneratedMessageV3" = proto
+
+      # Since com.google.protobuf.Message does not extend Serializable but
+      # GeneratedMessage does, need to use the more specific one here in order
+      # to avoid ambiguity.
+      # This com.google.protobuf serialization binding is only used if the class can be loaded,
+      # i.e. com.google.protobuf dependency has been added in the application project.
+      "com.google.protobuf.GeneratedMessage" = proto
+      "com.google.protobuf.GeneratedMessageV3" = proto
+
+      "akka.actor.Identify" = akka-misc
+      "akka.actor.ActorIdentity" = akka-misc
+      "scala.Some" = akka-misc
+      "scala.None$" = akka-misc
+      "java.util.Optional" = akka-misc
+      "akka.actor.Status$Success" = akka-misc
+      "akka.actor.Status$Failure" = akka-misc
+      "akka.actor.ActorRef" = akka-misc
+      "akka.actor.PoisonPill$" = akka-misc
+      "akka.actor.Kill$" = akka-misc
+      "akka.remote.RemoteWatcher$Heartbeat$" = akka-misc
+      "akka.remote.RemoteWatcher$HeartbeatRsp" = akka-misc
+      "akka.Done" = akka-misc
+      "akka.NotUsed" = akka-misc
+      "akka.actor.Address" = akka-misc
+      "akka.remote.UniqueAddress" = akka-misc
+
+      "akka.actor.ActorInitializationException" = akka-misc
+      "akka.actor.IllegalActorStateException" = akka-misc
+      "akka.actor.ActorKilledException" = akka-misc
+      "akka.actor.InvalidActorNameException" = akka-misc
+      "akka.actor.InvalidMessageException" = akka-misc
+      "java.util.concurrent.TimeoutException" = akka-misc
+      "akka.remote.serialization.ThrowableNotSerializableException" = akka-misc
+
+      "akka.actor.LocalScope$" = akka-misc
+      "akka.remote.RemoteScope" = akka-misc
+
+      "com.typesafe.config.impl.SimpleConfig" = akka-misc
+      "com.typesafe.config.Config" = akka-misc
+
+      "akka.routing.FromConfig" = akka-misc
+      "akka.routing.DefaultResizer" = akka-misc
+      "akka.routing.BalancingPool" = akka-misc
+      "akka.routing.BroadcastGroup" = akka-misc
+      "akka.routing.BroadcastPool" = akka-misc
+      "akka.routing.RandomGroup" = akka-misc
+      "akka.routing.RandomPool" = akka-misc
+      "akka.routing.RoundRobinGroup" = akka-misc
+      "akka.routing.RoundRobinPool" = akka-misc
+      "akka.routing.ScatterGatherFirstCompletedGroup" = akka-misc
+      "akka.routing.ScatterGatherFirstCompletedPool" = akka-misc
+      "akka.routing.SmallestMailboxPool" = akka-misc
+      "akka.routing.TailChoppingGroup" = akka-misc
+      "akka.routing.TailChoppingPool" = akka-misc
+      "akka.remote.routing.RemoteRouterConfig" = akka-misc
+
+      "akka.pattern.StatusReply" = akka-misc
+
+      "akka.dispatch.sysmsg.SystemMessage" = akka-system-msg
+
+      # Java Serializer is by default used for exceptions and will by default
+      # not be allowed to be serialized, but in certain cases they are replaced
+      # by `akka.remote.serialization.ThrowableNotSerializableException` if
+      # no specific serializer has been defined:
+      # - when wrapped in `akka.actor.Status.Failure` for ask replies
+      # - when wrapped in system messages for exceptions from remote deployed child actors
+      #
+      # It's recommended that you implement custom serializer for exceptions that are
+      # sent remotely, You can add binding to akka-misc (MiscMessageSerializer) for the
+      # exceptions that have a constructor with single message String or constructor with
+      # message String as first parameter and cause Throwable as second parameter. Note that it's not
+      # safe to add this binding for general exceptions such as IllegalArgumentException
+      # because it may have a subclass without required constructor.
+      "java.lang.Throwable" = java
+    }
+
+    serialization-identifiers {
+      "akka.remote.serialization.ProtobufSerializer" = 2
+      "akka.remote.serialization.DaemonMsgCreateSerializer" = 3
+      "akka.remote.serialization.MessageContainerSerializer" = 6
+      "akka.remote.serialization.MiscMessageSerializer" = 16
+      "akka.remote.serialization.ArteryMessageSerializer" = 17
+
+      "akka.remote.serialization.SystemMessageSerializer" = 22
+
+      # deprecated in 2.6.0, moved to akka-actor
+      "akka.remote.serialization.LongSerializer" = 18
+      # deprecated in 2.6.0, moved to akka-actor
+      "akka.remote.serialization.IntSerializer" = 19
+      # deprecated in 2.6.0, moved to akka-actor
+      "akka.remote.serialization.StringSerializer" = 20
+      # deprecated in 2.6.0, moved to akka-actor
+      "akka.remote.serialization.ByteStringSerializer" = 21
+    }
+
+    deployment {
+
+      default {
+
+        # if this is set to a valid remote address, the named actor will be
+        # deployed at that node e.g. "akka://sys@host:port"
+        remote = ""
+
+        target {
+
+          # A list of hostnames and ports for instantiating the children of a
+          # router
+          #   The format should be on "akka://sys@host:port", where:
+          #    - sys is the remote actor system name
+          #    - hostname can be either hostname or IP address the remote actor
+          #      should connect to
+          #    - port should be the port for the remote server on the other node
+          # The number of actor instances to be spawned is still taken from the
+          # nr-of-instances setting as for local routers; the instances will be
+          # distributed round-robin among the given nodes.
+          nodes = []
+
+        }
+      }
+    }
+  }
+
+  remote {
+    ### Settings shared by classic remoting and Artery (the new implementation of remoting)
+
+    # Using remoting directly is typically not desirable, so a warning will
+    # be shown to make this clear. Set this setting to 'off' to suppress that
+    # warning.
+    warn-about-direct-use = on
+
+
+    # If Cluster is not used, remote watch and deployment are disabled.
+    # To optionally use them while not using Cluster, set to 'on'.
+    use-unsafe-remote-features-outside-cluster = off
+
+    # A warning will be logged on remote watch attempts if Cluster
+    # is not in use and 'use-unsafe-remote-features-outside-cluster'
+    # is 'off'. Set this to 'off' to suppress these.
+    warn-unsafe-watch-outside-cluster = on
+
+    # Settings for the Phi accrual failure detector (http://www.jaist.ac.jp/~defago/files/pdf/IS_RR_2004_010.pdf
+    # [Hayashibara et al]) used for remote death watch.
+    # The default PhiAccrualFailureDetector will trigger if there are no heartbeats within
+    # the duration heartbeat-interval + acceptable-heartbeat-pause + threshold_adjustment,
+    # i.e. around 12.5 seconds with default settings.
+    watch-failure-detector {
+
+      # FQCN of the failure detector implementation.
+      # It must implement akka.remote.FailureDetector and have
+      # a public constructor with a com.typesafe.config.Config and
+      # akka.actor.EventStream parameter.
+      implementation-class = "akka.remote.PhiAccrualFailureDetector"
+
+      # How often keep-alive heartbeat messages should be sent to each connection.
+      heartbeat-interval = 1 s
+
+      # Defines the failure detector threshold.
+      # A low threshold is prone to generate many wrong suspicions but ensures
+      # a quick detection in the event of a real crash. Conversely, a high
+      # threshold generates fewer mistakes but needs more time to detect
+      # actual crashes.
+      threshold = 10.0
+
+      # Number of the samples of inter-heartbeat arrival times to adaptively
+      # calculate the failure timeout for connections.
+      max-sample-size = 200
+
+      # Minimum standard deviation to use for the normal distribution in
+      # AccrualFailureDetector. Too low standard deviation might result in
+      # too much sensitivity for sudden, but normal, deviations in heartbeat
+      # inter arrival times.
+      min-std-deviation = 100 ms
+
+      # Number of potentially lost/delayed heartbeats that will be
+      # accepted before considering it to be an anomaly.
+      # This margin is important to be able to survive sudden, occasional,
+      # pauses in heartbeat arrivals, due to for example garbage collect or
+      # network drop.
+      acceptable-heartbeat-pause = 10 s
+
+
+      # How often to check for nodes marked as unreachable by the failure
+      # detector
+      unreachable-nodes-reaper-interval = 1s
+
+      # After the heartbeat request has been sent the first failure detection
+      # will start after this period, even though no heartbeat mesage has
+      # been received.
+      expected-response-after = 1 s
+
+    }
+
+    # remote deployment configuration section
+    deployment {
+      # deprecated, use `enable-allow-list`
+      enable-whitelist = off
+
+      # If true, will only allow specific classes listed in `allowed-actor-classes` to be instanciated on this
+      # system via remote deployment
+      enable-allow-list = ${akka.remote.deployment.enable-whitelist}
+
+
+      # deprecated, use `allowed-actor-classes`
+      whitelist = []
+
+      allowed-actor-classes = ${akka.remote.deployment.whitelist}
+    }
+
+    ### Default dispatcher for the remoting subsystem
+    default-remote-dispatcher {
+      type = Dispatcher
+      executor = "fork-join-executor"
+      fork-join-executor {
+        parallelism-min = 2
+        parallelism-factor = 0.5
+        parallelism-max = 16
+      }
+      throughput = 10
+    }
+    #//#shared
+  }
+
+}
+
+akka {
+
+  remote {
+  #//#classic
+    classic {
+
+      ### Configuration for classic remoting. Classic remoting is deprecated, use artery.
+
+
+      # If set to a nonempty string remoting will use the given dispatcher for
+      # its internal actors otherwise the default dispatcher is used. Please note
+      # that since remoting can load arbitrary 3rd party drivers (see
+      # "enabled-transport" and "adapters" entries) it is not guaranteed that
+      # every module will respect this setting.
+      use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+      # Settings for the failure detector to monitor connections.
+      # For TCP it is not important to have fast failure detection, since
+      # most connection failures are captured by TCP itself.
+      # The default DeadlineFailureDetector will trigger if there are no heartbeats within
+      # the duration heartbeat-interval + acceptable-heartbeat-pause, i.e. 124 seconds
+      # with the default settings.
+      transport-failure-detector {
+
+        # FQCN of the failure detector implementation.
+        # It must implement akka.remote.FailureDetector and have
+        # a public constructor with a com.typesafe.config.Config and
+        # akka.actor.EventStream parameter.
+        implementation-class = "akka.remote.DeadlineFailureDetector"
+
+        # How often keep-alive heartbeat messages should be sent to each connection.
+        heartbeat-interval = 4 s
+
+        # Number of potentially lost/delayed heartbeats that will be
+        # accepted before considering it to be an anomaly.
+        # A margin to the `heartbeat-interval` is important to be able to survive sudden,
+        # occasional, pauses in heartbeat arrivals, due to for example garbage collect or
+        # network drop.
+        acceptable-heartbeat-pause = 120 s
+      }
+
+
+      # Timeout after which the startup of the remoting subsystem is considered
+      # to be failed. Increase this value if your transport drivers (see the
+      # enabled-transports section) need longer time to be loaded.
+      startup-timeout = 10 s
+
+      # Timout after which the graceful shutdown of the remoting subsystem is
+      # considered to be failed. After the timeout the remoting system is
+      # forcefully shut down. Increase this value if your transport drivers
+      # (see the enabled-transports section) need longer time to stop properly.
+      shutdown-timeout = 10 s
+
+      # Before shutting down the drivers, the remoting subsystem attempts to flush
+      # all pending writes. This setting controls the maximum time the remoting is
+      # willing to wait before moving on to shut down the drivers.
+      flush-wait-on-shutdown = 2 s
+
+      # Reuse inbound connections for outbound messages
+      use-passive-connections = on
+
+      # Controls the backoff interval after a refused write is reattempted.
+      # (Transports may refuse writes if their internal buffer is full)
+      backoff-interval = 5 ms
+
+      # Acknowledgment timeout of management commands sent to the transport stack.
+      command-ack-timeout = 30 s
+
+      # The timeout for outbound associations to perform the handshake.
+      # If the transport is akka.remote.classic.netty.tcp or akka.remote.classic.netty.ssl
+      # the configured connection-timeout for the transport will be used instead.
+      handshake-timeout = 15 s
+
+      ### Security settings
+
+      # Enable untrusted mode for full security of server managed actors, prevents
+      # system messages to be send by clients, e.g. messages like 'Create',
+      # 'Suspend', 'Resume', 'Terminate', 'Supervise', 'Link' etc.
+      untrusted-mode = off
+
+      # When 'untrusted-mode=on' inbound actor selections are by default discarded.
+      # Actors with paths defined in this list are granted permission to receive actor
+      # selections messages.
+      # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
+      trusted-selection-paths = []
+
+      ### Logging
+
+      # If this is "on", Akka will log all inbound messages at DEBUG level,
+      # if off then they are not logged
+      log-received-messages = off
+
+      # If this is "on", Akka will log all outbound messages at DEBUG level,
+      # if off then they are not logged
+      log-sent-messages = off
+
+      # Sets the log granularity level at which Akka logs remoting events. This setting
+      # can take the values OFF, ERROR, WARNING, INFO, DEBUG, or ON. For compatibility
+      # reasons the setting "on" will default to "debug" level. Please note that the effective
+      # logging level is still determined by the global logging level of the actor system:
+      # for example debug level remoting events will be only logged if the system
+      # is running with debug level logging.
+      # Failures to deserialize received messages also fall under this flag.
+      log-remote-lifecycle-events = on
+
+      # Logging of message types with payload size in bytes larger than
+      # this value. Maximum detected size per message type is logged once,
+      # with an increase threshold of 10%.
+      # By default this feature is turned off. Activate it by setting the property to
+      # a value in bytes, such as 1000b. Note that for all messages larger than this
+      # limit there will be extra performance and scalability cost.
+      log-frame-size-exceeding = off
+
+      # Log warning if the number of messages in the backoff buffer in the endpoint
+      # writer exceeds this limit. It can be disabled by setting the value to off.
+      log-buffer-size-exceeding = 50000
+
+      # After failed to establish an outbound connection, the remoting will mark the
+      # address as failed. This configuration option controls how much time should
+      # be elapsed before reattempting a new connection. While the address is
+      # gated, all messages sent to the address are delivered to dead-letters.
+      # Since this setting limits the rate of reconnects setting it to a
+      # very short interval (i.e. less than a second) may result in a storm of
+      # reconnect attempts.
+      retry-gate-closed-for = 5 s
+
+      # After catastrophic communication failures that result in the loss of system
+      # messages or after the remote DeathWatch triggers the remote system gets
+      # quarantined to prevent inconsistent behavior.
+      # This setting controls how long the Quarantine marker will be kept around
+      # before being removed to avoid long-term memory leaks.
+      # WARNING: DO NOT change this to a small value to re-enable communication with
+      # quarantined nodes. Such feature is not supported and any behavior between
+      # the affected systems after lifting the quarantine is undefined.
+      prune-quarantine-marker-after = 5 d
+
+      # If system messages have been exchanged between two systems (i.e. remote death
+      # watch or remote deployment has been used) a remote system will be marked as
+      # quarantined after the two system has no active association, and no
+      # communication happens during the time configured here.
+      # The only purpose of this setting is to avoid storing system message redelivery
+      # data (sequence number state, etc.) for an undefined amount of time leading to long
+      # term memory leak. Instead, if a system has been gone for this period,
+      # or more exactly
+      # - there is no association between the two systems (TCP connection, if TCP transport is used)
+      # - neither side has been attempting to communicate with the other
+      # - there are no pending system messages to deliver
+      # for the amount of time configured here, the remote system will be quarantined and all state
+      # associated with it will be dropped.
+      #
+      # Maximum value depends on the scheduler's max limit (default 248 days) and if configured
+      # to a longer duration this feature will effectively be disabled. Setting the value to
+      # 'off' will also disable the feature. Note that if disabled there is a risk of a long
+      # term memory leak.
+      quarantine-after-silence = 2 d
+
+      # This setting defines the maximum number of unacknowledged system messages
+      # allowed for a remote system. If this limit is reached the remote system is
+      # declared to be dead and its UID marked as tainted.
+      system-message-buffer-size = 20000
+
+      # This setting defines the maximum idle time after an individual
+      # acknowledgement for system messages is sent. System message delivery
+      # is guaranteed by explicit acknowledgement messages. These acks are
+      # piggybacked on ordinary traffic messages. If no traffic is detected
+      # during the time period configured here, the remoting will send out
+      # an individual ack.
+      system-message-ack-piggyback-timeout = 0.3 s
+
+      # This setting defines the time after internal management signals
+      # between actors (used for DeathWatch and supervision) that have not been
+      # explicitly acknowledged or negatively acknowledged are resent.
+      # Messages that were negatively acknowledged are always immediately
+      # resent.
+      resend-interval = 2 s
+
+      # Maximum number of unacknowledged system messages that will be resent
+      # each 'resend-interval'. If you watch many (> 1000) remote actors you can
+      # increase this value to for example 600, but a too large limit (e.g. 10000)
+      # may flood the connection and might cause false failure detection to trigger.
+      # Test such a configuration by watching all actors at the same time and stop
+      # all watched actors at the same time.
+      resend-limit = 200
+
+      # WARNING: this setting should not be not changed unless all of its consequences
+      # are properly understood which assumes experience with remoting internals
+      # or expert advice.
+      # This setting defines the time after redelivery attempts of internal management
+      # signals are stopped to a remote system that has been not confirmed to be alive by
+      # this system before.
+      initial-system-message-delivery-timeout = 3 m
+
+      ### Transports and adapters
+
+      # List of the transport drivers that will be loaded by the remoting.
+      # A list of fully qualified config paths must be provided where
+      # the given configuration path contains a transport-class key
+      # pointing to an implementation class of the Transport interface.
+      # If multiple transports are provided, the address of the first
+      # one will be used as a default address.
+      enabled-transports = ["akka.remote.classic.netty.tcp"]
+
+      # Transport drivers can be augmented with adapters by adding their
+      # name to the applied-adapters setting in the configuration of a
+      # transport. The available adapters should be configured in this
+      # section by providing a name, and the fully qualified name of
+      # their corresponding implementation. The class given here
+      # must implement akka.akka.remote.transport.TransportAdapterProvider
+      # and have public constructor without parameters.
+      adapters {
+        gremlin = "akka.remote.transport.FailureInjectorProvider"
+        trttl = "akka.remote.transport.ThrottlerProvider"
+      }
+
+      ### Default configuration for the Netty based transport drivers
+
+      netty.tcp {
+        # The class given here must implement the akka.remote.transport.Transport
+        # interface and offer a public constructor which takes two arguments:
+        #  1) akka.actor.ExtendedActorSystem
+        #  2) com.typesafe.config.Config
+        transport-class = "akka.remote.transport.netty.NettyTransport"
+
+        # Transport drivers can be augmented with adapters by adding their
+        # name to the applied-adapters list. The last adapter in the
+        # list is the adapter immediately above the driver, while
+        # the first one is the top of the stack below the standard
+        # Akka protocol
+        applied-adapters = []
+
+        # The default remote server port clients should connect to.
+        # Default is 2552 (AKKA), use 0 if you want a random available port
+        # This port needs to be unique for each actor system on the same machine.
+        port = 2552
+
+        # The hostname or ip clients should connect to.
+        # InetAddress.getLocalHost.getHostAddress is used if empty
+        hostname = ""
+
+        # Use this setting to bind a network interface to a different port
+        # than remoting protocol expects messages at. This may be used
+        # when running akka nodes in a separated networks (under NATs or docker containers).
+        # Use 0 if you want a random available port. Examples:
+        #
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = 2553
+        # Network interface will be bound to the 2553 port, but remoting protocol will
+        # expect messages sent to port 2552.
+        #
+        # akka.remote.classic.netty.tcp.port = 0
+        # akka.remote.classic.netty.tcp.bind-port = 0
+        # Network interface will be bound to a random port, and remoting protocol will
+        # expect messages sent to the bound port.
+        #
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = 0
+        # Network interface will be bound to a random port, but remoting protocol will
+        # expect messages sent to port 2552.
+        #
+        # akka.remote.classic.netty.tcp.port = 0
+        # akka.remote.classic.netty.tcp.bind-port = 2553
+        # Network interface will be bound to the 2553 port, and remoting protocol will
+        # expect messages sent to the bound port.
+        #
+        # akka.remote.classic.netty.tcp.port = 2552
+        # akka.remote.classic.netty.tcp.bind-port = ""
+        # Network interface will be bound to the 2552 port, and remoting protocol will
+        # expect messages sent to the bound port.
+        #
+        # akka.remote.classic.netty.tcp.port if empty
+        bind-port = ""
+
+        # Use this setting to bind a network interface to a different hostname or ip
+        # than remoting protocol expects messages at.
+        # Use "0.0.0.0" to bind to all interfaces.
+        # akka.remote.classic.netty.tcp.hostname if empty
+        bind-hostname = ""
+
+        # Enables SSL support on this transport
+        enable-ssl = false
+
+        # Sets the connectTimeoutMillis of all outbound connections,
+        # i.e. how long a connect may take until it is timed out
+        connection-timeout = 15 s
+
+        # If set to "<id.of.dispatcher>" then the specified dispatcher
+        # will be used to accept inbound connections, and perform IO. If "" then
+        # dedicated threads will be used.
+        # Please note that the Netty driver only uses this configuration and does
+        # not read the "akka.remote.use-dispatcher" entry. Instead it has to be
+        # configured manually to point to the same dispatcher if needed.
+        use-dispatcher-for-io = ""
+
+        # Sets the high water mark for the in and outbound sockets,
+        # set to 0b for platform default
+        write-buffer-high-water-mark = 0b
+
+        # Sets the low water mark for the in and outbound sockets,
+        # set to 0b for platform default
+        write-buffer-low-water-mark = 0b
+
+        # Sets the send buffer size of the Sockets,
+        # set to 0b for platform default
+        send-buffer-size = 256000b
+
+        # Sets the receive buffer size of the Sockets,
+        # set to 0b for platform default
+        receive-buffer-size = 256000b
+
+        # Maximum message size the transport will accept, but at least
+        # 32000 bytes.
+        # Please note that UDP does not support arbitrary large datagrams,
+        # so this setting has to be chosen carefully when using UDP.
+        # Both send-buffer-size and receive-buffer-size settings has to
+        # be adjusted to be able to buffer messages of maximum size.
+        maximum-frame-size = 128000b
+
+        # Sets the size of the connection backlog
+        backlog = 4096
+
+        # Enables the TCP_NODELAY flag, i.e. disables Nagle’s algorithm
+        tcp-nodelay = on
+
+        # Enables TCP Keepalive, subject to the O/S kernel’s configuration
+        tcp-keepalive = on
+
+        # Enables SO_REUSEADDR, which determines when an ActorSystem can open
+        # the specified listen port (the meaning differs between *nix and Windows)
+        # Valid values are "on", "off" and "off-for-windows"
+        # due to the following Windows bug: https://bugs.java.com/bugdatabase/view_bug.do?bug_id=4476378
+        # "off-for-windows" of course means that it's "on" for all other platforms
+        tcp-reuse-addr = off-for-windows
+
+        # Used to configure the number of I/O worker threads on server sockets
+        server-socket-worker-pool {
+          # Min number of threads to cap factor-based number to
+          pool-size-min = 2
+
+          # The pool size factor is used to determine thread pool size
+          # using the following formula: ceil(available processors * factor).
+          # Resulting size is then bounded by the pool-size-min and
+          # pool-size-max values.
+          pool-size-factor = 1.0
+
+          # Max number of threads to cap factor-based number to
+          pool-size-max = 2
+        }
+
+        # Used to configure the number of I/O worker threads on client sockets
+        client-socket-worker-pool {
+          # Min number of threads to cap factor-based number to
+          pool-size-min = 2
+
+          # The pool size factor is used to determine thread pool size
+          # using the following formula: ceil(available processors * factor).
+          # Resulting size is then bounded by the pool-size-min and
+          # pool-size-max values.
+          pool-size-factor = 1.0
+
+          # Max number of threads to cap factor-based number to
+          pool-size-max = 2
+        }
+
+
+      }
+
+      netty.ssl = ${akka.remote.classic.netty.tcp}
+      netty.ssl = {
+        # Enable SSL/TLS encryption.
+        # This must be enabled on both the client and server to work.
+        enable-ssl = true
+
+        # Factory of SSLEngine.
+        # Must implement akka.remote.transport.netty.SSLEngineProvider and have a public
+        # constructor with an ActorSystem parameter.
+        # The default ConfigSSLEngineProvider is configured by properties in section
+        # akka.remote.classic.netty.ssl.security
+        #
+        # The SSLEngineProvider can also be defined via ActorSystemSetup with
+        # SSLEngineProviderSetup  when starting the ActorSystem. That is useful when
+        # the SSLEngineProvider implementation requires other external constructor
+        # parameters or is created before the ActorSystem is created.
+        # If such SSLEngineProviderSetup is defined this config property is not used.
+        ssl-engine-provider = akka.remote.transport.netty.ConfigSSLEngineProvider
+
+        security {
+          # This is the Java Key Store used by the server connection
+          key-store = "keystore"
+
+          # This password is used for decrypting the key store
+          key-store-password = "changeme"
+
+          # This password is used for decrypting the key
+          key-password = "changeme"
+
+          # This is the Java Key Store used by the client connection
+          trust-store = "truststore"
+
+          # This password is used for decrypting the trust store
+          trust-store-password = "changeme"
+
+          # Protocol to use for SSL encryption.
+          protocol = "TLSv1.2"
+
+          # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", 
+          #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+          #   "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+          #   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+          # When doing rolling upgrades, make sure to include both the algorithm used 
+          # by old nodes and the preferred algorithm.
+          # If you use a JDK 8 prior to 8u161 you need to install
+          # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+          # More info here:
+          # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+          enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+            "TLS_RSA_WITH_AES_128_CBC_SHA"]
+
+          # There are two options, and the default SecureRandom is recommended:
+          # "" or "SecureRandom" => (default)
+          # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+          #
+          # Setting a value here may require you to supply the appropriate cipher
+          # suite (see enabled-algorithms section above)
+          random-number-generator = ""
+
+          # Require mutual authentication between TLS peers
+          #
+          # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
+          # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
+          # the passive side will also request and verify a certificate from the connecting peer.
+          #
+          # To prevent man-in-the-middle attacks this setting is enabled by default.
+          #
+          # Note: Nodes that are configured with this setting to 'on' might not be able to receive messages from nodes that
+          # run on older versions of akka-remote. This is because in versions of Akka < 2.4.12 the active side of the remoting
+          # connection will not send over certificates even if asked.
+          #
+          # However, starting with Akka 2.4.12, even with this setting "off", the active side (TLS client side)
+          # will use the given key-store to send over a certificate if asked. A rolling upgrade from versions of
+          # Akka < 2.4.12 can therefore work like this:
+          #   - upgrade all nodes to an Akka version >= 2.4.12, in the best case the latest version, but keep this setting at "off"
+          #   - then switch this flag to "on" and do again a rolling upgrade of all nodes
+          # The first step ensures that all nodes will send over a certificate when asked to. The second
+          # step will ensure that all nodes finally enforce the secure checking of client certificates.
+          require-mutual-authentication = on
+        }
+      }
+
+      ### Default configuration for the failure injector transport adapter
+
+      gremlin {
+        # Enable debug logging of the failure injector transport adapter
+        debug = off
+      }
+
+      backoff-remote-dispatcher {
+        type = Dispatcher
+        executor = "fork-join-executor"
+        fork-join-executor {
+          # Min number of threads to cap factor-based parallelism number to
+          parallelism-min = 2
+          parallelism-max = 2
+        }
+      }
+    }
+  }
+}
+#//#classic
+
+#//#artery
+akka {
+
+  remote {
+
+    ### Configuration for Artery, the new implementation of remoting
+    artery {
+
+      # Disable artery with this flag
+      enabled = on
+
+      # Select the underlying transport implementation.
+      #
+      # Possible values: aeron-udp, tcp, tls-tcp
+      # See https://doc.akka.io/docs/akka/current/remoting-artery.html#selecting-a-transport for the tradeoffs
+      # for each transport
+      transport = tcp
+
+      # Canonical address is the address other clients should connect to.
+      # Artery transport will expect messages to this address.
+      canonical {
+
+        # The default remote server port clients should connect to.
+        # Default is 25520, use 0 if you want a random available port
+        # This port needs to be unique for each actor system on the same machine.
+        port = 25520
+
+        # Hostname clients should connect to. Can be set to an ip, hostname
+        # or one of the following special values:
+        #   "<getHostAddress>"   InetAddress.getLocalHost.getHostAddress
+        #   "<getHostName>"      InetAddress.getLocalHost.getHostName
+        #
+        hostname = "<getHostAddress>"
+      }
+
+      # Use these settings to bind a network interface to a different address
+      # than artery expects messages at. This may be used when running Akka
+      # nodes in a separated networks (under NATs or in containers). If canonical
+      # and bind addresses are different, then network configuration that relays
+      # communications from canonical to bind addresses is expected.
+      bind {
+
+        # Port to bind a network interface to. Can be set to a port number
+        # of one of the following special values:
+        #   0    random available port
+        #   ""   akka.remote.artery.canonical.port
+        #
+        port = ""
+
+        # Hostname to bind a network interface to. Can be set to an ip, hostname
+        # or one of the following special values:
+        #   "0.0.0.0"            all interfaces
+        #   ""                   akka.remote.artery.canonical.hostname
+        #   "<getHostAddress>"   InetAddress.getLocalHost.getHostAddress
+        #   "<getHostName>"      InetAddress.getLocalHost.getHostName
+        #
+        hostname = ""
+
+        # Time to wait for Aeron/TCP to bind
+        bind-timeout = 3s
+      }
+
+
+      # Actor paths to use the large message stream for when a message
+      # is sent to them over remoting. The large message stream dedicated
+      # is separate from "normal" and system messages so that sending a
+      # large message does not interfere with them.
+      # Entries should be the full path to the actor. Wildcards in the form of "*"
+      # can be supplied at any place and matches any name at that segment -
+      # "/user/supervisor/actor/*" will match any direct child to actor,
+      # while "/supervisor/*/child" will match any grandchild to "supervisor" that
+      # has the name "child"
+      # Entries have to be specified on both the sending and receiving side.
+      # Messages sent to ActorSelections will not be passed through the large message
+      # stream, to pass such messages through the large message stream the selections
+      # but must be resolved to ActorRefs first.
+      large-message-destinations = []
+
+      # Enable untrusted mode, which discards inbound system messages, PossiblyHarmful and
+      # ActorSelection messages. E.g. remote watch and remote deployment will not work.
+      # ActorSelection messages can be enabled for specific paths with the trusted-selection-paths
+      untrusted-mode = off
+
+      # When 'untrusted-mode=on' inbound actor selections are by default discarded.
+      # Actors with paths defined in this list are granted permission to receive actor
+      # selections messages.
+      # E.g. trusted-selection-paths = ["/user/receptionist", "/user/namingService"]
+      trusted-selection-paths = []
+
+      # If this is "on", all inbound remote messages will be logged at DEBUG level,
+      # if off then they are not logged
+      log-received-messages = off
+
+      # If this is "on", all outbound remote messages will be logged at DEBUG level,
+      # if off then they are not logged
+      log-sent-messages = off
+
+      # Logging of message types with payload size in bytes larger than
+      # this value. Maximum detected size per message type is logged once,
+      # with an increase threshold of 10%.
+      # By default this feature is turned off. Activate it by setting the property to
+      # a value in bytes, such as 1000b. Note that for all messages larger than this
+      # limit there will be extra performance and scalability cost.
+      log-frame-size-exceeding = off
+
+      advanced {
+
+        # Maximum serialized message size, including header data.
+        maximum-frame-size = 256 KiB
+
+        # Direct byte buffers are reused in a pool with this maximum size.
+        # Each buffer has the size of 'maximum-frame-size'.
+        # This is not a hard upper limit on number of created buffers. Additional
+        # buffers will be created if needed, e.g. when using many outbound
+        # associations at the same time. Such additional buffers will be garbage
+        # collected, which is not as efficient as reusing buffers in the pool.
+        buffer-pool-size = 128
+
+        # Maximum serialized message size for the large messages, including header data.
+        # If the value of akka.remote.artery.transport is set to aeron-udp, it is currently
+        # restricted to 1/8th the size of a term buffer that can be configured by setting the
+        # 'aeron.term.buffer.length' system property.
+        # See 'large-message-destinations'.
+        maximum-large-frame-size = 2 MiB
+
+        # Direct byte buffers for the large messages are reused in a pool with this maximum size.
+        # Each buffer has the size of 'maximum-large-frame-size'.
+        # See 'large-message-destinations'.
+        # This is not a hard upper limit on number of created buffers. Additional
+        # buffers will be created if needed, e.g. when using many outbound
+        # associations at the same time. Such additional buffers will be garbage
+        # collected, which is not as efficient as reusing buffers in the pool.
+        large-buffer-pool-size = 32
+
+        # For enabling testing features, such as blackhole in akka-remote-testkit.
+        test-mode = off
+
+        # Settings for the materializer that is used for the remote streams.
+        materializer = ${akka.stream.materializer}
+
+        # Remoting will use the given dispatcher for the ordinary and large message
+        # streams.
+        use-dispatcher = "akka.remote.default-remote-dispatcher"
+
+        # Remoting will use the given dispatcher for the control stream.
+        # It can be good to not use the same dispatcher for the control stream as
+        # the dispatcher for the ordinary message stream so that heartbeat messages
+        # are not disturbed.
+        use-control-stream-dispatcher = "akka.actor.internal-dispatcher"
+
+
+        # Total number of inbound lanes, shared among all inbound associations. A value
+        # greater than 1 means that deserialization can be performed in parallel for
+        # different destination actors. The selection of lane is based on consistent
+        # hashing of the recipient ActorRef to preserve message ordering per receiver.
+        # Lowest latency can be achieved with inbound-lanes=1 because of one less
+        # asynchronous boundary.
+        inbound-lanes = 4
+
+        # Number of outbound lanes for each outbound association. A value greater than 1
+        # means that serialization and other work can be performed in parallel for different
+        # destination actors. The selection of lane is based on consistent hashing of the
+        # recipient ActorRef to preserve message ordering per receiver. Note that messages
+        # for different destination systems (hosts) are handled by different streams also
+        # when outbound-lanes=1. Lowest latency can be achieved with outbound-lanes=1
+        # because of one less asynchronous boundary.
+        outbound-lanes = 1
+
+        # Size of the send queue for outgoing messages. Messages will be dropped if
+        # the queue becomes full. This may happen if you send a burst of many messages
+        # without end-to-end flow control. Note that there is one such queue per
+        # outbound association. The trade-off of using a larger queue size is that
+        # it consumes more memory, since the queue is based on preallocated array with
+        # fixed size.
+        outbound-message-queue-size = 3072
+
+        # Size of the send queue for outgoing control messages, such as system messages.
+        # If this limit is reached the remote system is declared to be dead and its UID
+        # marked as quarantined. Note that there is one such queue per outbound association.
+        # It is a linked queue so it will not use more memory than needed but by increasing
+        # too much you may risk OutOfMemoryError in the worst case.
+        outbound-control-queue-size = 20000
+
+        # Size of the send queue for outgoing large messages. Messages will be dropped if
+        # the queue becomes full. This may happen if you send a burst of many messages
+        # without end-to-end flow control. Note that there is one such queue per
+        # outbound association.
+        # It is a linked queue so it will not use more memory than needed but by increasing
+        # too much you may risk OutOfMemoryError, especially since the message payload
+        # of these messages may be large.
+        outbound-large-message-queue-size = 256
+
+        # This setting defines the maximum number of unacknowledged system messages
+        # allowed for a remote system. If this limit is reached the remote system is
+        # declared to be dead and its UID marked as quarantined.
+        system-message-buffer-size = 20000
+
+        # unacknowledged system messages are re-delivered with this interval
+        system-message-resend-interval = 1 second
+
+
+
+        # The timeout for outbound associations to perform the initial handshake.
+        # This timeout must be greater than the 'image-liveness-timeout' when
+        # transport is aeron-udp.
+        handshake-timeout = 20 seconds
+
+        # incomplete initial handshake attempt is retried with this interval
+        handshake-retry-interval = 1 second
+
+        # Handshake requests are performed periodically with this interval,
+        # also after the handshake has been completed to be able to establish
+        # a new session with a restarted destination system.
+        inject-handshake-interval = 1 second
+
+
+        # System messages that are not acknowledged after re-sending for this period are
+        # dropped and will trigger quarantine. The value should be longer than the length
+        # of a network partition that you need to survive.
+        give-up-system-message-after = 6 hours
+
+        # Outbound streams are stopped when they haven't been used for this duration.
+        # They are started again when new messages are sent.
+        stop-idle-outbound-after = 5 minutes
+
+        # Outbound streams are quarantined when they haven't been used for this duration
+        # to cleanup resources used by the association, such as compression tables.
+        # This will cleanup association to crashed systems that didn't announce their
+        # termination.
+        # The value should be longer than the length of a network partition that you
+        # need to survive.
+        # The value must also be greater than stop-idle-outbound-after.
+        # Once every 1/10 of this duration an extra handshake message will be sent.
+        # Therfore it's also recommended to use a value that is greater than 10 times
+        # the stop-idle-outbound-after, since otherwise the idle streams will not be
+        # stopped.
+        quarantine-idle-outbound-after = 6 hours
+
+        # Stop outbound stream of a quarantined association after this idle timeout, i.e.
+        # when not used any more.
+        stop-quarantined-after-idle = 3 seconds
+
+        # After catastrophic communication failures that could result in the loss of system
+        # messages or after the remote DeathWatch triggers the remote system gets
+        # quarantined to prevent inconsistent behavior.
+        # This setting controls how long the quarantined association will be kept around
+        # before being removed to avoid long-term memory leaks. It must be quarantined
+        # and also unused for this duration before it's removed. When removed the historical
+        # information about which UIDs that were quarantined for that hostname:port is
+        # gone which could result in communication with a previously quarantined node
+        # if it wakes up again. Therfore this shouldn't be set too low.
+        remove-quarantined-association-after = 1 h
+
+        # during ActorSystem termination the remoting will wait this long for
+        # an acknowledgment by the destination system that flushing of outstanding
+        # remote messages has been completed
+        shutdown-flush-timeout = 1 second
+
+        # Before sending notificaiton of terminated actor (DeathWatchNotification) other messages
+        # will be flushed to make sure that the Terminated message arrives after other messages.
+        # It will wait this long for the flush acknowledgement before continuing.
+        # The flushing can be disabled by setting this to `off`.
+        death-watch-notification-flush-timeout = 3 seconds
+
+        # See 'inbound-max-restarts'
+        inbound-restart-timeout = 5 seconds
+
+        # Max number of restarts within 'inbound-restart-timeout' for the inbound streams.
+        # If more restarts occurs the ActorSystem will be terminated.
+        inbound-max-restarts = 5
+
+        # Retry outbound connection after this backoff.
+        # Only used when transport is tcp or tls-tcp.
+        outbound-restart-backoff = 1 second
+
+        # See 'outbound-max-restarts'
+        outbound-restart-timeout = 5 seconds
+
+        # Max number of restarts within 'outbound-restart-timeout' for the outbound streams.
+        # If more restarts occurs the ActorSystem will be terminated.
+        outbound-max-restarts = 5
+
+        # compression of common strings in remoting messages, like actor destinations, serializers etc
+        compression {
+
+          actor-refs {
+            # Max number of compressed actor-refs
+            # Note that compression tables are "rolling" (i.e. a new table replaces the old
+            # compression table once in a while), and this setting is only about the total number
+            # of compressions within a single such table.
+            # Must be a positive natural number. Can be disabled with "off".
+            max = 256
+
+            # interval between new table compression advertisements.
+            # this means the time during which we collect heavy-hitter data and then turn it into a compression table.
+            advertisement-interval = 1 minute
+          }
+          manifests {
+            # Max number of compressed manifests
+            # Note that compression tables are "rolling" (i.e. a new table replaces the old
+            # compression table once in a while), and this setting is only about the total number
+            # of compressions within a single such table.
+            # Must be a positive natural number. Can be disabled with "off".
+            max = 256
+
+            # interval between new table compression advertisements.
+            # this means the time during which we collect heavy-hitter data and then turn it into a compression table.
+            advertisement-interval = 1 minute
+          }
+        }
+
+        # List of fully qualified class names of remote instruments which should
+        # be initialized and used for monitoring of remote messages.
+        # The class must extend akka.remote.artery.RemoteInstrument and
+        # have a public constructor with empty parameters or one ExtendedActorSystem
+        # parameter.
+        # A new instance of RemoteInstrument will be created for each encoder and decoder.
+        # It's only called from the stage, so if it dosn't delegate to any shared instance
+        # it doesn't have to be thread-safe.
+        # Refer to `akka.remote.artery.RemoteInstrument` for more information.
+        instruments = ${?akka.remote.artery.advanced.instruments} []
+
+        # Only used when transport is aeron-udp
+        aeron {
+          # Periodically log out all Aeron counters. See https://github.com/real-logic/aeron/wiki/Monitoring-and-Debugging#counters
+          # Only used when transport is aeron-udp.
+          log-aeron-counters = false
+
+          # Controls whether to start the Aeron media driver in the same JVM or use external
+          # process. Set to 'off' when using external media driver, and then also set the
+          # 'aeron-dir'.
+          # Only used when transport is aeron-udp.
+          embedded-media-driver = on
+
+          # Directory used by the Aeron media driver. It's mandatory to define the 'aeron-dir'
+          # if using external media driver, i.e. when 'embedded-media-driver = off'.
+          # Embedded media driver will use a this directory, or a temporary directory if this
+          # property is not defined (empty).
+          # Only used when transport is aeron-udp.
+          aeron-dir = ""
+
+          # Whether to delete aeron embedded driver directory upon driver stop.
+          # Only used when transport is aeron-udp.
+          delete-aeron-dir = yes
+
+          # Level of CPU time used, on a scale between 1 and 10, during backoff/idle.
+          # The tradeoff is that to have low latency more CPU time must be used to be
+          # able to react quickly on incoming messages or send as fast as possible after
+          # backoff backpressure.
+          # Level 1 strongly prefer low CPU consumption over low latency.
+          # Level 10 strongly prefer low latency over low CPU consumption.
+          # Only used when transport is aeron-udp.
+          idle-cpu-level = 5
+
+          # messages that are not accepted by Aeron are dropped after retrying for this period
+          # Only used when transport is aeron-udp.
+          give-up-message-after = 60 seconds
+
+          # Timeout after which aeron driver has not had keepalive messages
+          # from a client before it considers the client dead.
+          # Only used when transport is aeron-udp.
+          client-liveness-timeout = 20 seconds
+
+          # Timout after after which an uncommitted publication will be unblocked
+          # Only used when transport is aeron-udp.
+          publication-unblock-timeout = 40 seconds
+
+          # Timeout for each the INACTIVE and LINGER stages an aeron image
+          # will be retained for when it is no longer referenced.
+          # This timeout must be less than the 'handshake-timeout'.
+          # Only used when transport is aeron-udp.
+          image-liveness-timeout = 10 seconds
+
+          # Timeout after which the aeron driver is considered dead
+          # if it does not update its C'n'C timestamp.
+          # Only used when transport is aeron-udp.
+          driver-timeout = 20 seconds
+        }
+
+        # Only used when transport is tcp or tls-tcp.
+        tcp {
+          # Timeout of establishing outbound connections.
+          connection-timeout = 5 seconds
+
+          # The local address that is used for the client side of the TCP connection.
+          outbound-client-hostname = ""
+        }
+
+      }
+
+      # SSL configuration that is used when transport=tls-tcp.
+      ssl {
+        # Factory of SSLEngine.
+        # Must implement akka.remote.artery.tcp.SSLEngineProvider and have a public
+        # constructor with an ActorSystem parameter.
+        # The default ConfigSSLEngineProvider is configured by properties in section
+        # akka.remote.artery.ssl.config-ssl-engine
+        ssl-engine-provider = akka.remote.artery.tcp.ConfigSSLEngineProvider
+
+        # Config of akka.remote.artery.tcp.ConfigSSLEngineProvider
+        config-ssl-engine {
+
+          # This is the Java Key Store used by the server connection
+          key-store = "keystore"
+
+          # This password is used for decrypting the key store
+          # Use substitution from environment variables for passwords. Don't define
+          # real passwords in config files. key-store-password=${SSL_KEY_STORE_PASSWORD}
+          key-store-password = "changeme"
+
+          # This password is used for decrypting the key
+          # Use substitution from environment variables for passwords. Don't define
+          # real passwords in config files. key-password=${SSL_KEY_PASSWORD}
+          key-password = "changeme"
+
+          # This is the Java Key Store used by the client connection
+          trust-store = "truststore"
+
+          # This password is used for decrypting the trust store
+          # Use substitution from environment variables for passwords. Don't define
+          # real passwords in config files. trust-store-password=${SSL_TRUST_STORE_PASSWORD}
+          trust-store-password = "changeme"
+
+          # Protocol to use for SSL encryption.
+          protocol = "TLSv1.2"
+
+          # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", 
+          #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+          #   "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+          #   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+          # When doing rolling upgrades, make sure to include both the algorithm used 
+          # by old nodes and the preferred algorithm.
+          # If you use a JDK 8 prior to 8u161 you need to install
+          # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+          # More info here:
+          # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+          enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+            "TLS_RSA_WITH_AES_128_CBC_SHA"]
+
+          # There are two options, and the default SecureRandom is recommended:
+          # "" or "SecureRandom" => (default)
+          # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+          #
+          # Setting a value here may require you to supply the appropriate cipher
+          # suite (see enabled-algorithms section above)
+          random-number-generator = ""
+
+          # Require mutual authentication between TLS peers
+          #
+          # Without mutual authentication only the peer that actively establishes a connection (TLS client side)
+          # checks if the passive side (TLS server side) sends over a trusted certificate. With the flag turned on,
+          # the passive side will also request and verify a certificate from the connecting peer.
+          #
+          # To prevent man-in-the-middle attacks this setting is enabled by default.
+          require-mutual-authentication = on
+
+          # Set this to `on` to verify hostnames with sun.security.util.HostnameChecker
+          # If possible it is recommended to have this enabled. Hostname verification is designed for
+          # situations where things locate each other by hostname, in scenarios where host names are dynamic
+          # and not known up front it can make sense to have this disabled.
+          hostname-verification = off
+        }
+
+        # Config of akka.remote.artery.tcp.ssl.RotatingKeysSSLEngineProvider
+        # This engine provider reads PEM files from a mount point shared with the secret
+        # manager. The constructed SSLContext is cached some time (configurable) so when
+        # the credentials rotate the new credentials are eventually picked up.
+        # By default mTLS is enabled.
+        # This provider also includes a verification phase that runs after the TLS handshake
+        # phase. In this verification, both peers run an authorization and verify they are
+        # part of the same akka cluster. The verification happens via comparing the subject
+        # names in the peer's certificate with the name on the own certificate so if you
+        # use this SSLEngineProvider you should make sure all nodes on the cluster include
+        # at least one common subject name (CN or SAN).
+        # The Key setup this implementation supports has some limitations:
+        #   1. the private key must be provided on a PKCS#1 or a non-encrypted PKCS#8 PEM-formatted file
+        #   2. the private key must be be of an algorythm supported by `akka-pki` tools (e.g. "RSA", not "EC")
+        #   3. the node certificate must be issued by a root CA (not an intermediate CA)
+        #   4. both the node and the CA certificates must be provided in PEM-formatted files
+        rotating-keys-engine {
+
+          # This is a convention that people may follow if they wish to save themselves some configuration
+          secret-mount-point = /var/run/secrets/akka-tls/rotating-keys-engine
+
+          # The absolute path the PEM file with the private key.
+          key-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.key
+          # The absolute path to the PEM file of the certificate for the private key above.
+          cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/tls.crt
+          # The absolute path to the PEM file of the certificate of the CA that emited
+          # the node certificate above.
+          ca-cert-file = ${akka.remote.artery.ssl.rotating-keys-engine.secret-mount-point}/ca.crt
+
+          # There are two options, and the default SecureRandom is recommended:
+          # "" or "SecureRandom" => (default)
+          # "SHA1PRNG" => Can be slow because of blocking issues on Linux
+          #
+          # Setting a value here may require you to supply the appropriate cipher
+          # suite (see enabled-algorithms section)
+          random-number-generator = ""
+
+          # Example: ["TLS_DHE_RSA_WITH_AES_128_GCM_SHA256",
+          #   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+          #   "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384",
+          #   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+          # If you use a JDK 8 prior to 8u161 you need to install
+          # the JCE Unlimited Strength Jurisdiction Policy Files to use AES 256.
+          # More info here:
+          # https://www.oracle.com/java/technologies/javase-jce-all-downloads.html
+          enabled-algorithms = ["TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"]
+
+          # Protocol to use for SSL encryption.
+          protocol = "TLSv1.2"
+
+          # How long should an SSLContext instance be cached. When rotating keys and certificates,
+          # there must a time overlap between the old certificate/key and the new ones. The
+          # value of this setting should be lower than duration of that overlap.
+          ssl-context-cache-ttl = 5m
+        }
+      }
+    }
+  }
+
+}
+#//#artery
diff --git a/akka/repackaged-akka-jar/src/main/resources/stream_reference.conf b/akka/repackaged-akka-jar/src/main/resources/stream_reference.conf
new file mode 100644 (file)
index 0000000..66d9130
--- /dev/null
@@ -0,0 +1,200 @@
+#####################################
+# Akka Stream Reference Config File #
+#####################################
+
+# eager creation of the system wide materializer
+akka.library-extensions += "akka.stream.SystemMaterializer$"
+akka {
+  stream {
+
+    # Default materializer settings
+    materializer {
+
+      # Initial size of buffers used in stream elements
+      initial-input-buffer-size = 4
+      # Maximum size of buffers used in stream elements
+      max-input-buffer-size = 16
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # or full dispatcher configuration to be used by ActorMaterializer when creating Actors.
+      dispatcher = "akka.actor.default-dispatcher"
+
+      # Fully qualified config path which holds the dispatcher configuration
+      # or full dispatcher configuration to be used by stream operators that
+      # perform blocking operations
+      blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+      # Cleanup leaked publishers and subscribers when they are not used within a given
+      # deadline
+      subscription-timeout {
+        # when the subscription timeout is reached one of the following strategies on
+        # the "stale" publisher:
+        # cancel - cancel it (via `onError` or subscribing to the publisher and
+        #          `cancel()`ing the subscription right away
+        # warn   - log a warning statement about the stale element (then drop the
+        #          reference to it)
+        # noop   - do nothing (not recommended)
+        mode = cancel
+
+        # time after which a subscriber / publisher is considered stale and eligible
+        # for cancelation (see `akka.stream.subscription-timeout.mode`)
+        timeout = 5s
+      }
+
+      # Enable additional troubleshooting logging at DEBUG log level
+      debug-logging = off
+
+      # Maximum number of elements emitted in batch if downstream signals large demand
+      output-burst-limit = 1000
+
+      # Enable automatic fusing of all graphs that are run. For short-lived streams
+      # this may cause an initial runtime overhead, but most of the time fusing is
+      # desirable since it reduces the number of Actors that are created.
+      # Deprecated, since Akka 2.5.0, setting does not have any effect.
+      auto-fusing = on
+
+      # Those stream elements which have explicit buffers (like mapAsync, mapAsyncUnordered,
+      # buffer, flatMapMerge, Source.actorRef, Source.queue, etc.) will preallocate a fixed
+      # buffer upon stream materialization if the requested buffer size is less than this
+      # configuration parameter. The default is very high because failing early is better
+      # than failing under load.
+      #
+      # Buffers sized larger than this will dynamically grow/shrink and consume more memory
+      # per element than the fixed size buffers.
+      max-fixed-buffer-size = 1000000000
+
+      # Maximum number of sync messages that actor can process for stream to substream communication.
+      # Parameter allows to interrupt synchronous processing to get upstream/downstream messages.
+      # Allows to accelerate message processing that happening within same actor but keep system responsive.
+      sync-processing-limit = 1000
+
+      debug {
+        # Enables the fuzzing mode which increases the chance of race conditions
+        # by aggressively reordering events and making certain operations more
+        # concurrent than usual.
+        # This setting is for testing purposes, NEVER enable this in a production
+        # environment!
+        # To get the best results, try combining this setting with a throughput
+        # of 1 on the corresponding dispatchers.
+        fuzzing-mode = off
+      }
+
+      io.tcp {
+        # The outgoing bytes are accumulated in a buffer while waiting for acknowledgment
+        # of pending write. This improves throughput for small messages (frames) without
+        # sacrificing latency. While waiting for the ack the stage will eagerly pull
+        # from upstream until the buffer exceeds this size. That means that the buffer may hold
+        # slightly more bytes than this limit (at most one element more). It can be set to 0
+        # to disable the usage of the buffer.
+        write-buffer-size = 16 KiB
+
+        # In addition to the buffering described for property write-buffer-size, try to collect
+        # more consecutive writes from the upstream stream producers.
+        #
+        # The rationale is to increase write efficiency by avoiding separate small
+        # writes to the network which is expensive to do. Merging those writes together
+        # (up to `write-buffer-size`) improves throughput for small writes.
+        #
+        # The idea is that a running stream may produce multiple small writes consecutively
+        # in one go without waiting for any external input. To probe the stream for
+        # data, this features delays sending a write immediately by probing the stream
+        # for more writes. This works by rescheduling the TCP connection stage via the
+        # actor mailbox of the underlying actor. Thus, before the stage is reactivated
+        # the upstream gets another opportunity to emit writes.
+        #
+        # When the stage is reactivated and if new writes are detected another round-trip
+        # is scheduled. The loop repeats until either the number of round trips given in this
+        # setting is reached, the buffer reaches `write-buffer-size`, or no new writes
+        # were detected during the last round-trip.
+        #
+        # This mechanism ensures that a write is guaranteed to be sent when the remaining stream
+        # becomes idle waiting for external signals.
+        #
+        # In most cases, the extra latency this mechanism introduces should be negligible,
+        # but depending on the stream setup it may introduce a noticeable delay,
+        # if the upstream continuously produces small amounts of writes in a
+        # blocking (CPU-bound) way.
+        #
+        # In that case, the feature can either be disabled, or the producing CPU-bound
+        # work can be taken off-stream to avoid excessive delays (e.g. using `mapAsync` instead of `map`).
+        #
+        # A value of 0 disables this feature.
+        coalesce-writes = 10
+      }
+
+      # Time to wait for async materializer creation before throwing an exception
+      creation-timeout = 20 seconds
+
+      //#stream-ref
+      # configure defaults for SourceRef and SinkRef
+      stream-ref {
+        # Buffer of a SinkRef that is used to batch Request elements from the other side of the stream ref
+        #
+        # The buffer will be attempted to be filled eagerly even while the local stage did not request elements,
+        # because the delay of requesting over network boundaries is much higher.
+        buffer-capacity = 32
+
+        # Demand is signalled by sending a cumulative demand message ("requesting messages until the n-th sequence number)
+        # Using a cumulative demand model allows us to re-deliver the demand message in case of message loss (which should
+        # be very rare in any case, yet possible -- mostly under connection break-down and re-establishment).
+        #
+        # The semantics of handling and updating the demand however are in-line with what Reactive Streams dictates.
+        #
+        # In normal operation, demand is signalled in response to arriving elements, however if no new elements arrive
+        # within `demand-redelivery-interval` a re-delivery of the demand will be triggered, assuming that it may have gotten lost.
+        demand-redelivery-interval = 1 second
+
+        # Subscription timeout, during which the "remote side" MUST subscribe (materialize) the handed out stream ref.
+        # This timeout does not have to be very low in normal situations, since the remote side may also need to
+        # prepare things before it is ready to materialize the reference. However the timeout is needed to avoid leaking
+        # in-active streams which are never subscribed to.
+        subscription-timeout = 30 seconds
+
+        # In order to guard the receiving end of a stream ref from never terminating (since awaiting a Completion or Failed
+        # message) after / before a Terminated is seen, a special timeout is applied once Terminated is received by it.
+        # This allows us to terminate stream refs that have been targeted to other nodes which are Downed, and as such the
+        # other side of the stream ref would never send the "final" terminal message.
+        #
+        # The timeout specifically means the time between the Terminated signal being received and when the local SourceRef
+        # determines to fail itself, assuming there was message loss or a complete partition of the completion signal.
+        final-termination-signal-deadline = 2 seconds
+      }
+      //#stream-ref
+    }
+
+    # Deprecated, left here to not break Akka HTTP which refers to it
+    blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+
+    # Deprecated, will not be used unless user code refer to it, use 'akka.stream.materializer.blocking-io-dispatcher'
+    # instead, or if from code, prefer the 'ActorAttributes.IODispatcher' attribute
+    default-blocking-io-dispatcher = "akka.actor.default-blocking-io-dispatcher"
+  }
+
+  # configure overrides to ssl-configuration here (to be used by akka-streams, and akka-http – i.e. when serving https connections)
+  ssl-config {
+    protocol = "TLSv1.2"
+  }
+
+  actor {
+
+    serializers {
+      akka-stream-ref = "akka.stream.serialization.StreamRefSerializer"
+    }
+
+    serialization-bindings {
+      "akka.stream.SinkRef"                           = akka-stream-ref
+      "akka.stream.SourceRef"                         = akka-stream-ref
+      "akka.stream.impl.streamref.StreamRefsProtocol" = akka-stream-ref
+    }
+
+    serialization-identifiers {
+      "akka.stream.serialization.StreamRefSerializer" = 30
+    }
+  }
+}
+
+# ssl configuration
+# folded in from former ssl-config-akka module
+ssl-config {
+  logger = "com.typesafe.sslconfig.akka.util.AkkaLoggerBridge"
+}
diff --git a/akka/repackaged-akka/pom.xml b/akka/repackaged-akka/pom.xml
new file mode 100644 (file)
index 0000000..cc22218
--- /dev/null
@@ -0,0 +1,233 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>bundle-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../../bundle-parent</relativePath>
+    </parent>
+
+    <artifactId>repackaged-akka</artifactId>
+    <packaging>bundle</packaging>
+    <name>${project.artifactId}</name>
+
+    <properties>
+        <!-- We are just juggling classes here -->
+        <odlparent.modernizer.skip>true</odlparent.modernizer.skip>
+        <odlparent.spotbugs.skip>true</odlparent.spotbugs.skip>
+
+        <!-- We do not want to generate javadoc -->
+        <maven.javadoc.skip>true</maven.javadoc.skip>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka-jar</artifactId>
+            <version>${project.version}</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe</groupId>
+            <artifactId>ssl-config-core_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.aeron</groupId>
+            <artifactId>aeron-client</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.aeron</groupId>
+            <artifactId>aeron-driver</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.netty</groupId>
+            <artifactId>netty</artifactId>
+            <version>3.10.6.Final</version>
+        </dependency>
+        <dependency>
+            <groupId>org.agrona</groupId>
+            <artifactId>agrona</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.reactivestreams</groupId>
+            <artifactId>reactive-streams</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.lmdbjava</groupId>
+            <artifactId>lmdbjava</artifactId>
+            <version>0.7.0</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>com.github.jnr</groupId>
+                    <artifactId>jffi</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.github.jnr</groupId>
+                    <artifactId>jnr-ffi</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>com.github.jnr</groupId>
+                    <artifactId>jnr-constants</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-reflect</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang.modules</groupId>
+            <artifactId>scala-java8-compat_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang.modules</groupId>
+            <artifactId>scala-parser-combinators_2.13</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>unpack-license</id>
+                        <configuration>
+                            <!-- Akka is Apache-2.0 licensed -->
+                            <skip>true</skip>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>unpack</id>
+                        <phase>compile</phase>
+                        <goals>
+                            <goal>unpack</goal>
+                        </goals>
+                        <configuration>
+                            <artifactItems>
+                                <artifactItem>
+                                    <groupId>org.opendaylight.controller</groupId>
+                                    <artifactId>repackaged-akka-jar</artifactId>
+                                    <version>${project.version}</version>
+                                </artifactItem>
+                                <artifactItem>
+                                    <groupId>com.hierynomus</groupId>
+                                    <artifactId>asn-one</artifactId>
+                                    <version>0.4.0</version>
+                                </artifactItem>
+                            </artifactItems>
+                            <overWriteReleases>false</overWriteReleases>
+                            <overWriteSnapshots>true</overWriteSnapshots>
+                            <outputDirectory>${project.build.directory}/classes</outputDirectory>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>unpack-sources</id>
+                        <phase>prepare-package</phase>
+                        <goals>
+                            <goal>unpack-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <classifier>sources</classifier>
+                            <includeArtifactIds>repackaged-akka-jar</includeArtifactIds>
+                            <outputDirectory>${project.build.directory}/shaded-sources</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-antrun-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>move-resources</id>
+                        <phase>prepare-package</phase>
+                        <goals>
+                            <goal>run</goal>
+                        </goals>
+                        <configuration>
+                            <target>
+                                <move todir="${project.build.directory}/resources">
+                                    <fileset dir="${project.build.directory}/classes">
+                                        <include name="*.conf"/>
+                                    </fileset>
+                                </move>
+                            </target>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>shaded-sources</id>
+                        <phase>prepare-package</phase>
+                        <goals>
+                           <goal>add-source</goal>
+                        </goals>
+                        <configuration>
+                            <sources>${project.build.directory}/shaded-sources</sources>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>shaded-resources</id>
+                        <phase>prepare-package</phase>
+                        <goals>
+                           <goal>add-resource</goal>
+                        </goals>
+                        <configuration>
+                            <resources>
+                                <resource>
+                                    <directory>${project.build.directory}/resources</directory>
+                                </resource>
+                            </resources>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <extensions>true</extensions>
+                <configuration>
+                    <instructions>
+                        <Automatic-Module-Name>org.opendaylight.controller.repackaged.akka</Automatic-Module-Name>
+                        <Export-Package>
+                            akka.*,
+                            com.typesafe.sslconfig.akka.*,
+                            jdk.jfr,
+                        </Export-Package>
+                        <Import-Package>
+                            sun.misc;resolution:=optional,
+                            sun.reflect;resolution:=optional,
+                            org.fusesource.leveldbjni;resolution:=optional,
+                            org.iq80.leveldb;resolution:=optional,
+                            org.iq80.leveldb.impl;resolution:=optional,
+                            *
+                        </Import-Package>
+                    </instructions>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>
similarity index 81%
rename from opendaylight/md-sal/mdsal-artifacts/pom.xml
rename to artifacts/pom.xml
index 51aaca2e3a1f33b8feeed910d5e10cd30263d354..79e8d88f08ab0a1c147aa77d8801def96d6ac31b 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-artifacts</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <artifactId>controller-artifacts</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
 
     <dependencyManagement>
         <dependencies>
-            <!-- Core API/implementation -->
+            <!-- Repackaged Akka -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>sal-common-api</artifactId>
+                <artifactId>repackaged-akka</artifactId>
                 <version>${project.version}</version>
             </dependency>
+
+            <!-- Atomix -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>sal-common-impl</artifactId>
+                <artifactId>atomix-storage</artifactId>
                 <version>${project.version}</version>
             </dependency>
+
+            <!-- Core API/implementation -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
                 <artifactId>sal-common-util</artifactId>
                 <version>${project.version}</version>
             </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-core-api</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-core-spi</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-core-compat</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-broker-impl</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-binding-api</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-binding-broker-impl</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-binding-util</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-connector-api</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>sal-inmemory-datastore</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>mdsal-trace-api</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>mdsal-trace-dom-impl</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>mdsal-trace-binding-impl</artifactId>
-                <version>${project.version}</version>
-            </dependency>
 
             <!-- Test support -->
             <dependency>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>sal-binding-broker-impl</artifactId>
+                <artifactId>mdsal-it-base</artifactId>
                 <version>${project.version}</version>
-                <type>test-jar</type>
                 <scope>test</scope>
             </dependency>
 
-            <!-- Features -->
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>features-mdsal</artifactId>
-                <version>${project.version}</version>
-                <classifier>features</classifier>
-                <type>xml</type>
-                <scope>runtime</scope>
-            </dependency>
-            <dependency>
-              <groupId>${project.groupId}</groupId>
-              <artifactId>features-restconf</artifactId>
-              <version>${project.version}</version>
-              <classifier>features</classifier>
-              <type>xml</type>
-              <scope>runtime</scope>
-            </dependency>
-            <dependency>
-              <groupId>${project.groupId}</groupId>
-              <artifactId>features-mdsal-trace</artifactId>
-              <version>${project.version}</version>
-              <classifier>features</classifier>
-              <type>xml</type>
-              <scope>runtime</scope>
-            </dependency>
-
-            <!-- Base model augmentations -->
-            <dependency>
-                <groupId>org.opendaylight.controller.model</groupId>
-                <artifactId>model-inventory</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>org.opendaylight.controller.model</groupId>
-                <artifactId>model-topology</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-
             <!-- Clustered implementation -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>cds-access-api</artifactId>
-                <version>1.6.0-SNAPSHOT</version>
+                <version>${project.version}</version>
             </dependency>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>cds-access-client</artifactId>
-                <version>1.6.0-SNAPSHOT</version>
+                <version>${project.version}</version>
             </dependency>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>cds-access-client</artifactId>
-                <version>1.6.0-SNAPSHOT</version>
+                <version>${project.version}</version>
                 <type>test-jar</type>
                 <scope>test</scope>
             </dependency>
                 <artifactId>sal-cluster-admin-impl</artifactId>
                 <version>${project.version}</version>
             </dependency>
+            <dependency>
+                <groupId>org.opendaylight.controller</groupId>
+                <artifactId>sal-cluster-admin-karaf-cli</artifactId>
+                <version>${project.version}</version>
+            </dependency>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
                 <artifactId>cds-dom-api</artifactId>
-                <version>1.6.0-SNAPSHOT</version>
+                <version>${project.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.opendaylight.controller</groupId>
+                <artifactId>cds-mgmt-api</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>eos-dom-akka</artifactId>
+                <version>${project.version}</version>
             </dependency>
 
             <!-- Toaster -->
                 <scope>runtime</scope>
             </dependency>
 
-            <!-- MessageBus -->
+            <!-- Clustering system test support -->
             <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>messagebus-api</artifactId>
+                <groupId>org.opendaylight.controller.samples</groupId>
+                <artifactId>clustering-it-config</artifactId>
                 <version>${project.version}</version>
             </dependency>
             <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>messagebus-spi</artifactId>
+                <groupId>org.opendaylight.controller.samples</groupId>
+                <artifactId>clustering-it-model</artifactId>
                 <version>${project.version}</version>
             </dependency>
             <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>messagebus-impl</artifactId>
+                <groupId>org.opendaylight.controller.samples</groupId>
+                <artifactId>clustering-it-provider</artifactId>
                 <version>${project.version}</version>
             </dependency>
             <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>messagebus-util</artifactId>
+                <groupId>org.opendaylight.controller.samples</groupId>
+                <artifactId>clustering-it-karaf-cli</artifactId>
                 <version>${project.version}</version>
             </dependency>
 
-            <!-- Clustering system test support -->
+            <!-- Config files -->
             <dependency>
-                <groupId>org.opendaylight.controller.samples</groupId>
-                <artifactId>clustering-it-config</artifactId>
+                <!-- finalname="configuration/initial/akka.conf" -->
+                <groupId>${project.groupId}</groupId>
+                <artifactId>sal-clustering-config</artifactId>
                 <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>akkaconf</classifier>
             </dependency>
             <dependency>
-                <groupId>org.opendaylight.controller.samples</groupId>
-                <artifactId>clustering-it-model</artifactId>
+                <!-- finalname="configuration/factory/akka.conf" override="true" -->
+                <groupId>${project.groupId}</groupId>
+                <artifactId>sal-clustering-config</artifactId>
                 <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>factoryakkaconf</classifier>
             </dependency>
             <dependency>
-                <groupId>org.opendaylight.controller.samples</groupId>
-                <artifactId>clustering-it-provider</artifactId>
+                <!-- finalname="configuration/initial/module-shards.conf" -->
+                <groupId>${project.groupId}</groupId>
+                <artifactId>sal-clustering-config</artifactId>
+                <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>moduleshardconf</classifier>
+            </dependency>
+            <dependency>
+                <!-- finalname="configuration/initial/modules.conf" -->
+                <groupId>${project.groupId}</groupId>
+                <artifactId>sal-clustering-config</artifactId>
                 <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>moduleconf</classifier>
+            </dependency>
+            <dependency>
+                <!-- finalname="etc/org.opendaylight.controller.cluster.datastore.cfg" -->
+                <groupId>${project.groupId}</groupId>
+                <artifactId>sal-clustering-config</artifactId>
+                <version>${project.version}</version>
+                <type>cfg</type>
+                <classifier>datastore</classifier>
             </dependency>
 
-            <!-- Features -->
+            <!-- Production Features -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-mdsal-broker</artifactId>
+                <artifactId>features-controller</artifactId>
+                <version>${project.version}</version>
+                <classifier>features</classifier>
+                <type>xml</type>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>odl-controller-akka</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-mdsal-broker-local</artifactId>
+                <artifactId>odl-controller-scala</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-mdsal-clustering</artifactId>
+                <artifactId>odl-mdsal-broker</artifactId>
+                <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>features</classifier>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>odl-controller-broker-local</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
                 <type>xml</type>
                 <classifier>features</classifier>
             </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>odl-controller-blueprint</artifactId>
+                <version>${project.version}</version>
+                <type>xml</type>
+                <classifier>features</classifier>
+            </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
                 <artifactId>odl-mdsal-distributed-datastore</artifactId>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-message-bus-collector</artifactId>
+                <artifactId>odl-toaster</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-mdsal-model-inventory</artifactId>
+                <artifactId>odl-jolokia</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
             </dependency>
+
+            <!-- CSIT and other testing Features -->
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-controller-model-topology</artifactId>
+                <artifactId>features-controller-testing</artifactId>
                 <version>${project.version}</version>
-                <type>xml</type>
                 <classifier>features</classifier>
+                <type>xml</type>
             </dependency>
             <dependency>
                 <groupId>${project.groupId}</groupId>
-                <artifactId>odl-toaster</artifactId>
+                <artifactId>odl-mdsal-benchmark</artifactId>
                 <version>${project.version}</version>
                 <type>xml</type>
                 <classifier>features</classifier>
                 <classifier>features</classifier>
             </dependency>
 
+            <!-- Benchmarks -->
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>benchmark-api</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>dsbenchmark</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>ntfbenchmark</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>rpcbenchmark</artifactId>
+                <version>${project.version}</version>
+            </dependency>
+
+            <!-- Expertimental Features -->
+            <dependency>
+                <groupId>${project.groupId}</groupId>
+                <artifactId>features-controller-experimental</artifactId>
+                <version>${project.version}</version>
+                <classifier>features</classifier>
+                <type>xml</type>
+            </dependency>
         </dependencies>
     </dependencyManagement>
 </project>
diff --git a/atomix-storage/LICENSE b/atomix-storage/LICENSE
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/atomix-storage/pom.xml b/atomix-storage/pom.xml
new file mode 100644 (file)
index 0000000..8fc7ca3
--- /dev/null
@@ -0,0 +1,153 @@
+<!--
+  ~ Copyright 2017-2021 Open Networking Foundation
+  ~ Copyright 2023 PANTHEON.tech, s.r.o.
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>bundle-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <relativePath>../bundle-parent</relativePath>
+  </parent>
+
+  <artifactId>atomix-storage</artifactId>
+  <name>Atomix Storage</name>
+  <packaging>bundle</packaging>
+
+  <properties>
+    <odlparent.checkstyle.skip>true</odlparent.checkstyle.skip>
+    <odlparent.spotbugs.enforce>false</odlparent.spotbugs.enforce>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>kryo</artifactId>
+      <version>4.0.3</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>minlog</artifactId>
+      <version>1.3.1</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.esotericsoftware</groupId>
+      <artifactId>reflectasm</artifactId>
+      <version>1.11.9</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.objenesis</groupId>
+      <artifactId>objenesis</artifactId>
+      <version>2.6</version>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava-testlib</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <!-- This project has a different license -->
+      <plugin>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>unpack-license</id>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-license</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <copy file="LICENSE" tofile="${project.build.directory}/classes/LICENSE"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>check-license</id>
+            <goals>
+              <goal>check</goal>
+            </goals>
+            <configuration>
+              <skip>true</skip>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.felix</groupId>
+        <artifactId>maven-bundle-plugin</artifactId>
+        <extensions>true</extensions>
+        <configuration>
+          <instructions>
+            <Export-Package>
+              io.atomix.storage.journal
+            </Export-Package>
+            <Import-Package>
+              sun.nio.ch;resolution:=optional,
+              sun.misc;resolution:=optional,
+              !COM.newmonics.*,
+              !android.os,
+              *
+            </Import-Package>
+
+            <!-- Kryo is using ancient objenesis, so let's embed it to prevent duplicates -->
+            <Embed-Dependency>
+                *;inline=true;groupId=com.esotericsoftware,
+                *;inline=true;groupId=org.objenesis,
+            </Embed-Dependency>
+          </instructions>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java b/atomix-storage/src/main/java/io/atomix/storage/journal/BufferCleaner.java
new file mode 100644 (file)
index 0000000..8244e57
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2019-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.Objects;
+
+import static java.lang.invoke.MethodHandles.constant;
+import static java.lang.invoke.MethodHandles.dropArguments;
+import static java.lang.invoke.MethodHandles.filterReturnValue;
+import static java.lang.invoke.MethodHandles.guardWithTest;
+import static java.lang.invoke.MethodHandles.lookup;
+import static java.lang.invoke.MethodType.methodType;
+
+/**
+ * Utility class which allows explicit calls to the DirectByteBuffer cleaner method instead of relying on GC.
+ */
+public class BufferCleaner {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(BufferCleaner.class);
+
+  /**
+   * Reference to a Cleaner that does unmapping; no-op if not supported.
+   */
+  private static final Cleaner CLEANER;
+
+  static {
+    final Object hack = AccessController.doPrivileged((PrivilegedAction<Object>) BufferCleaner::unmapHackImpl);
+    if (hack instanceof Cleaner) {
+      CLEANER = (Cleaner) hack;
+      LOGGER.debug("java.nio.DirectByteBuffer.cleaner(): available");
+    } else {
+      CLEANER = (ByteBuffer buffer) -> {
+        // noop
+      };
+      LOGGER.debug("java.nio.DirectByteBuffer.cleaner(): unavailable: {}", hack);
+    }
+  }
+
+  private static Object unmapHackImpl() {
+    final MethodHandles.Lookup lookup = lookup();
+    try {
+      try {
+        // *** sun.misc.Unsafe unmapping (Java 9+) ***
+        final Class<?> unsafeClass = Class.forName("sun.misc.Unsafe");
+        // first check if Unsafe has the right method, otherwise we can give up
+        // without doing any security critical stuff:
+        final MethodHandle unmapper = lookup.findVirtual(unsafeClass, "invokeCleaner",
+            methodType(void.class, ByteBuffer.class));
+        // fetch the unsafe instance and bind it to the virtual MH:
+        final Field f = unsafeClass.getDeclaredField("theUnsafe");
+        f.setAccessible(true);
+        final Object theUnsafe = f.get(null);
+        return newBufferCleaner(ByteBuffer.class, unmapper.bindTo(theUnsafe));
+      } catch (SecurityException se) {
+        // rethrow to report errors correctly (we need to catch it here, as we also catch RuntimeException below!):
+        throw se;
+      } catch (ReflectiveOperationException | RuntimeException e) {
+        // *** sun.misc.Cleaner unmapping (Java 8) ***
+        final Class<?> directBufferClass = Class.forName("java.nio.DirectByteBuffer");
+
+        final Method m = directBufferClass.getMethod("cleaner");
+        m.setAccessible(true);
+        final MethodHandle directBufferCleanerMethod = lookup.unreflect(m);
+        final Class<?> cleanerClass = directBufferCleanerMethod.type().returnType();
+
+        /* "Compile" a MH that basically is equivalent to the following code:
+         * void unmapper(ByteBuffer byteBuffer) {
+         *   sun.misc.Cleaner cleaner = ((java.nio.DirectByteBuffer) byteBuffer).cleaner();
+         *   if (Objects.nonNull(cleaner)) {
+         *     cleaner.clean();
+         *   } else {
+         *     noop(cleaner); // the noop is needed because MethodHandles#guardWithTest always needs ELSE
+         *   }
+         * }
+         */
+        final MethodHandle cleanMethod = lookup.findVirtual(cleanerClass, "clean", methodType(void.class));
+        final MethodHandle nonNullTest = lookup.findStatic(Objects.class, "nonNull", methodType(boolean.class, Object.class))
+            .asType(methodType(boolean.class, cleanerClass));
+        final MethodHandle noop = dropArguments(constant(Void.class, null).asType(methodType(void.class)), 0, cleanerClass);
+        final MethodHandle unmapper = filterReturnValue(directBufferCleanerMethod, guardWithTest(nonNullTest, cleanMethod, noop))
+            .asType(methodType(void.class, ByteBuffer.class));
+        return newBufferCleaner(directBufferClass, unmapper);
+      }
+    } catch (SecurityException se) {
+      return "Unmapping is not supported, because not all required permissions are given to the Lucene JAR file: "
+          + se + " [Please grant at least the following permissions: RuntimePermission(\"accessClassInPackage.sun.misc\") "
+          + " and ReflectPermission(\"suppressAccessChecks\")]";
+    } catch (ReflectiveOperationException | RuntimeException e) {
+      return "Unmapping is not supported on this platform, because internal Java APIs are not compatible with this Atomix version: " + e;
+    }
+  }
+
+  private static Cleaner newBufferCleaner(final Class<?> unmappableBufferClass, final MethodHandle unmapper) {
+    return (ByteBuffer buffer) -> {
+      if (!buffer.isDirect()) {
+        return;
+      }
+      if (!unmappableBufferClass.isInstance(buffer)) {
+        throw new IllegalArgumentException("buffer is not an instance of " + unmappableBufferClass.getName());
+      }
+      final Throwable error = AccessController.doPrivileged((PrivilegedAction<Throwable>) () -> {
+        try {
+          unmapper.invokeExact(buffer);
+          return null;
+        } catch (Throwable t) {
+          return t;
+        }
+      });
+      if (error != null) {
+        throw new IOException("Unable to unmap the mapped buffer", error);
+      }
+    };
+  }
+
+  /**
+   * Free {@link ByteBuffer} if possible.
+   */
+  public static void freeBuffer(ByteBuffer buffer) throws IOException {
+    CLEANER.freeBuffer(buffer);
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Cleaner.java
new file mode 100644 (file)
index 0000000..d812680
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+@FunctionalInterface
+interface Cleaner {
+
+  /**
+   * Free {@link ByteBuffer} if possible.
+   */
+  void freeBuffer(ByteBuffer buffer) throws IOException;
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/CommitsSegmentJournalReader.java
new file mode 100644 (file)
index 0000000..65f4de6
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * A {@link JournalReader} traversing only committed entries.
+ */
+final class CommitsSegmentJournalReader<E> extends SegmentedJournalReader<E> {
+    CommitsSegmentJournalReader(SegmentedJournal<E> journal, JournalSegment segment) {
+        super(journal, segment);
+    }
+
+    @Override
+    public Indexed<E> tryNext() {
+        return getNextIndex() <= journal.getCommitIndex() ? super.tryNext() : null;
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskFileReader.java
new file mode 100644 (file)
index 0000000..311d16b
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * A {@link StorageLevel#DISK} implementation of {@link FileReader}. Maintains an internal buffer.
+ */
+final class DiskFileReader extends FileReader {
+    /**
+     * Just do not bother with IO smaller than this many bytes.
+     */
+    private static final int MIN_IO_SIZE = 8192;
+
+    private final FileChannel channel;
+    private final ByteBuffer buffer;
+
+    // tracks where memory's first available byte maps to in terms of FileChannel.position()
+    private int bufferPosition;
+
+    DiskFileReader(final Path path, final FileChannel channel, final int maxSegmentSize, final int maxEntrySize) {
+        this(path, channel, allocateBuffer(maxSegmentSize, maxEntrySize));
+    }
+
+    // Note: take ownership of the buffer
+    DiskFileReader(final Path path, final FileChannel channel, final ByteBuffer buffer) {
+        super(path);
+        this.channel = requireNonNull(channel);
+        this.buffer = buffer.flip();
+        bufferPosition = 0;
+    }
+
+    static ByteBuffer allocateBuffer(final int maxSegmentSize, final int maxEntrySize) {
+        return ByteBuffer.allocate(chooseBufferSize(maxSegmentSize, maxEntrySize));
+    }
+
+    private static int chooseBufferSize(final int maxSegmentSize, final int maxEntrySize) {
+        if (maxSegmentSize <= MIN_IO_SIZE) {
+            // just buffer the entire segment
+            return maxSegmentSize;
+        }
+
+        // one full entry plus its header, or MIN_IO_SIZE, which benefits the read of many small entries
+        final int minBufferSize = maxEntrySize + SegmentEntry.HEADER_BYTES;
+        return minBufferSize <= MIN_IO_SIZE ? MIN_IO_SIZE : minBufferSize;
+    }
+
+    @Override
+    void invalidateCache() {
+        buffer.clear().flip();
+        bufferPosition = 0;
+    }
+
+    @Override
+    ByteBuffer read(final int position, final int size) {
+        // calculate logical seek distance between buffer's first byte and position and split flow between
+        // forward-moving and backwards-moving code paths.
+        final int seek = bufferPosition - position;
+        return seek >= 0 ? forwardAndRead(seek, position, size) : rewindAndRead(-seek, position, size);
+    }
+
+    private @NonNull ByteBuffer forwardAndRead(final int seek, final int position, final int size) {
+        final int missing = buffer.limit() - seek - size;
+        if (missing <= 0) {
+            // fast path: we have the requested region
+            return buffer.slice(seek, size).asReadOnlyBuffer();
+        }
+
+        // We need to read more data, but let's salvage what we can:
+        // - set buffer position to seek, which means it points to the same as position
+        // - run compact, which moves everything between position and limit onto the beginning of buffer and
+        //   sets it up to receive more bytes
+        // - start the read accounting for the seek
+        buffer.position(seek).compact();
+        readAtLeast(position + seek, missing);
+        return setAndSlice(position, size);
+    }
+
+    private @NonNull ByteBuffer rewindAndRead(final int rewindBy, final int position, final int size) {
+        // TODO: Lazy solution. To be super crisp, we want to find out how much of the buffer we can salvage and
+        //       do all the limit/position fiddling before and after read. Right now let's just flow the buffer up and
+        //       read it.
+        buffer.clear();
+        readAtLeast(position, size);
+        return setAndSlice(position, size);
+    }
+
+    private void readAtLeast(final int readPosition, final int readAtLeast) {
+        final int bytesRead;
+        try {
+            bytesRead = channel.read(buffer, readPosition);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+        verify(bytesRead >= readAtLeast, "Short read %s, expected %s", bytesRead, readAtLeast);
+        buffer.flip();
+    }
+
+    private @NonNull ByteBuffer setAndSlice(final int position, final int size) {
+        bufferPosition = position;
+        return buffer.slice(0, size).asReadOnlyBuffer();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/DiskJournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..54feee1
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+
+/**
+ * Segment writer.
+ * <p>
+ * The format of an entry in the log is as follows:
+ * <ul>
+ * <li>64-bit index</li>
+ * <li>8-bit boolean indicating whether a term change is contained in the entry</li>
+ * <li>64-bit optional term</li>
+ * <li>32-bit signed entry length, including the entry type ID</li>
+ * <li>8-bit signed entry type ID</li>
+ * <li>n-bit entry bytes</li>
+ * </ul>
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class DiskJournalSegmentWriter extends JournalSegmentWriter {
+    private static final ByteBuffer ZERO_ENTRY_HEADER = ByteBuffer.wrap(new byte[HEADER_BYTES]);
+
+    private final JournalSegmentReader reader;
+    private final ByteBuffer buffer;
+
+    DiskJournalSegmentWriter(final FileChannel channel, final JournalSegment segment, final int maxEntrySize,
+            final JournalIndex index) {
+        super(channel, segment, maxEntrySize, index);
+
+        buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize);
+        reader = new JournalSegmentReader(segment,
+            new DiskFileReader(segment.file().file().toPath(), channel, buffer), maxEntrySize);
+        reset(0);
+    }
+
+    DiskJournalSegmentWriter(final JournalSegmentWriter previous) {
+        super(previous);
+
+        buffer = DiskFileReader.allocateBuffer(maxSegmentSize, maxEntrySize);
+        reader = new JournalSegmentReader(segment,
+            new DiskFileReader(segment.file().file().toPath(), channel, buffer), maxEntrySize);
+    }
+
+    @Override
+    MappedByteBuffer buffer() {
+        return null;
+    }
+
+    @Override
+    MappedJournalSegmentWriter toMapped() {
+        return new MappedJournalSegmentWriter(this);
+    }
+
+    @Override
+    DiskJournalSegmentWriter toFileChannel() {
+        return this;
+    }
+
+    @Override
+    JournalSegmentReader reader() {
+        return reader;
+    }
+
+    @Override
+    ByteBuffer startWrite(final int position, final int size) {
+        return buffer.clear().slice(0, size);
+    }
+
+    @Override
+    void commitWrite(final int position, final ByteBuffer entry) {
+        try {
+            channel.write(entry, position);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void writeEmptyHeader(final int position) {
+        try {
+            channel.write(ZERO_ENTRY_HEADER.asReadOnlyBuffer(), position);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void flush() {
+        try {
+            if (channel.isOpen()) {
+                channel.force(true);
+            }
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    void close() {
+        flush();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/FileReader.java
new file mode 100644 (file)
index 0000000..fdc0597
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * An abstraction over how to read a {@link JournalSegmentFile}.
+ */
+abstract sealed class FileReader permits DiskFileReader, MappedFileReader {
+    private final Path path;
+
+    FileReader(final Path path) {
+        this.path = requireNonNull(path);
+    }
+
+    /**
+     * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+     */
+    abstract void invalidateCache();
+
+    /**
+     * Read the some bytes as specified position. The sum of position and size is guaranteed not to exceed the maximum
+     * segment size nor maximum entry size.
+     *
+     * @param position position to the entry header
+     * @param size to read
+     * @return resulting buffer
+     */
+    abstract @NonNull ByteBuffer read(int position, int size);
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("path", path).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Indexed.java
new file mode 100644 (file)
index 0000000..5bf7e6f
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+
+/**
+ * Indexed journal entry.
+ *
+ * @param <E> entry type
+ * @param index the entry index
+ * @param entry the indexed entry
+ * @param size the serialized entry size
+ */
+// FIXME: it seems 'index' has to be non-zero, we should enforce that if that really is the case
+// FIXME: it seems 'size' has not be non-zero, we should enforce that if that really is the case
+@NonNullByDefault
+public record Indexed<E>(long index, E entry, int size) {
+    public Indexed {
+        requireNonNull(entry);
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("index", index).add("entry", entry).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/Journal.java
new file mode 100644 (file)
index 0000000..5e37c12
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.Closeable;
+
+/**
+ * Journal.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface Journal<E> extends Closeable {
+
+  /**
+   * Returns the journal writer.
+   *
+   * @return The journal writer.
+   */
+  JournalWriter<E> writer();
+
+  /**
+   * Opens a new journal reader.
+   *
+   * @param index The index at which to start the reader.
+   * @return A new journal reader.
+   */
+  JournalReader<E> openReader(long index);
+
+  /**
+   * Opens a new journal reader.
+   *
+   * @param index The index at which to start the reader.
+   * @param mode the reader mode
+   * @return A new journal reader.
+   */
+  JournalReader<E> openReader(long index, JournalReader.Mode mode);
+
+  /**
+   * Returns a boolean indicating whether the journal is open.
+   *
+   * @return Indicates whether the journal is open.
+   */
+  boolean isOpen();
+
+  @Override
+  void close();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalReader.java
new file mode 100644 (file)
index 0000000..700f40d
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log reader.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface JournalReader<E> extends AutoCloseable {
+    /**
+     * Raft log reader mode.
+     */
+    enum Mode {
+        /**
+         * Reads all entries from the log.
+         */
+        ALL,
+        /**
+         * Reads committed entries from the log.
+         */
+        COMMITS,
+    }
+
+    /**
+     * Returns the first index in the journal.
+     *
+     * @return the first index in the journal
+     */
+    long getFirstIndex();
+
+    /**
+     * Returns the last read entry.
+     *
+     * @return The last read entry.
+     */
+    Indexed<E> getCurrentEntry();
+
+    /**
+     * Returns the next reader index.
+     *
+     * @return The next reader index.
+     */
+    long getNextIndex();
+
+    /**
+     * Try to move to the next entry.
+     *
+     * @return The next entry in the reader, or {@code null} if there is no next entry.
+     */
+    @Nullable Indexed<E> tryNext();
+
+    /**
+     * Resets the reader to the start.
+     */
+    void reset();
+
+    /**
+     * Resets the reader to the given index.
+     *
+     * @param index The index to which to reset the reader.
+     */
+    void reset(long index);
+
+    @Override
+    void close();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegment.java
new file mode 100644 (file)
index 0000000..45405aa
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.index.JournalIndex;
+import io.atomix.storage.journal.index.Position;
+import io.atomix.storage.journal.index.SparseJournalIndex;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.StandardOpenOption;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Log segment.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class JournalSegment implements AutoCloseable {
+  private final JournalSegmentFile file;
+  private final JournalSegmentDescriptor descriptor;
+  private final StorageLevel storageLevel;
+  private final int maxEntrySize;
+  private final JournalIndex journalIndex;
+  private final Set<JournalSegmentReader> readers = ConcurrentHashMap.newKeySet();
+  private final AtomicInteger references = new AtomicInteger();
+  private final FileChannel channel;
+
+  private JournalSegmentWriter writer;
+  private boolean open = true;
+
+  JournalSegment(
+      JournalSegmentFile file,
+      JournalSegmentDescriptor descriptor,
+      StorageLevel storageLevel,
+      int maxEntrySize,
+      double indexDensity) {
+    this.file = file;
+    this.descriptor = descriptor;
+    this.storageLevel = storageLevel;
+    this.maxEntrySize = maxEntrySize;
+    journalIndex = new SparseJournalIndex(indexDensity);
+    try {
+      channel = FileChannel.open(file.file().toPath(),
+        StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+    writer = switch (storageLevel) {
+        case DISK -> new DiskJournalSegmentWriter(channel, this, maxEntrySize, journalIndex);
+        case MAPPED -> new MappedJournalSegmentWriter(channel, this, maxEntrySize, journalIndex).toFileChannel();
+    };
+  }
+
+  /**
+   * Returns the segment's starting index.
+   *
+   * @return The segment's starting index.
+   */
+  long firstIndex() {
+    return descriptor.index();
+  }
+
+  /**
+   * Returns the last index in the segment.
+   *
+   * @return The last index in the segment.
+   */
+  long lastIndex() {
+    return writer.getLastIndex();
+  }
+
+  /**
+   * Returns the size of the segment.
+   *
+   * @return the size of the segment
+   */
+  int size() {
+    try {
+      return (int) channel.size();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Returns the segment file.
+   *
+   * @return The segment file.
+   */
+  JournalSegmentFile file() {
+    return file;
+  }
+
+  /**
+   * Returns the segment descriptor.
+   *
+   * @return The segment descriptor.
+   */
+  JournalSegmentDescriptor descriptor() {
+    return descriptor;
+  }
+
+  /**
+   * Looks up the position of the given index.
+   *
+   * @param index the index to lookup
+   * @return the position of the given index or a lesser index, or {@code null}
+   */
+  @Nullable Position lookup(long index) {
+    return journalIndex.lookup(index);
+  }
+
+  /**
+   * Acquires a reference to the log segment.
+   */
+  private void acquire() {
+    if (references.getAndIncrement() == 0 && storageLevel == StorageLevel.MAPPED) {
+      writer = writer.toMapped();
+    }
+  }
+
+  /**
+   * Releases a reference to the log segment.
+   */
+  private void release() {
+    if (references.decrementAndGet() == 0) {
+      if (storageLevel == StorageLevel.MAPPED) {
+        writer = writer.toFileChannel();
+      }
+      if (!open) {
+        finishClose();
+      }
+    }
+  }
+
+  /**
+   * Acquires a reference to the segment writer.
+   *
+   * @return The segment writer.
+   */
+  JournalSegmentWriter acquireWriter() {
+    checkOpen();
+    acquire();
+
+    return writer;
+  }
+
+  /**
+   * Releases the reference to the segment writer.
+   */
+  void releaseWriter() {
+      release();
+  }
+
+  /**
+   * Creates a new segment reader.
+   *
+   * @return A new segment reader.
+   */
+  JournalSegmentReader createReader() {
+    checkOpen();
+    acquire();
+
+    final var buffer = writer.buffer();
+    final var path = file.file().toPath();
+    final var fileReader = buffer != null ? new MappedFileReader(path, buffer)
+        : new DiskFileReader(path, channel, descriptor.maxSegmentSize(), maxEntrySize);
+    final var reader = new JournalSegmentReader(this, fileReader, maxEntrySize);
+    reader.setPosition(JournalSegmentDescriptor.BYTES);
+    readers.add(reader);
+    return reader;
+  }
+
+  /**
+   * Closes a segment reader.
+   *
+   * @param reader the closed segment reader
+   */
+  void closeReader(JournalSegmentReader reader) {
+    if (readers.remove(reader)) {
+      release();
+    }
+  }
+
+  /**
+   * Checks whether the segment is open.
+   */
+  private void checkOpen() {
+    if (!open) {
+      throw new IllegalStateException("Segment not open");
+    }
+  }
+
+  /**
+   * Returns a boolean indicating whether the segment is open.
+   *
+   * @return indicates whether the segment is open
+   */
+  public boolean isOpen() {
+    return open;
+  }
+
+  /**
+   * Closes the segment.
+   */
+  @Override
+  public void close() {
+    if (!open) {
+      return;
+    }
+
+    open = false;
+    readers.forEach(JournalSegmentReader::close);
+    if (references.get() == 0) {
+      finishClose();
+    }
+  }
+
+  private void finishClose() {
+    writer.close();
+    try {
+      channel.close();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Deletes the segment.
+   */
+  void delete() {
+    try {
+      Files.deleteIfExists(file.file().toPath());
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  @Override
+  public String toString() {
+    return MoreObjects.toStringHelper(this)
+        .add("id", descriptor.id())
+        .add("version", descriptor.version())
+        .add("index", firstIndex())
+        .toString();
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentDescriptor.java
new file mode 100644 (file)
index 0000000..757ca3a
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.ByteBuffer;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Stores information about a {@link JournalSegment} of the log.
+ * <p>
+ * The segment descriptor manages metadata related to a single segment of the log. Descriptors are stored within the
+ * first {@code 64} bytes of each segment in the following order:
+ * <ul>
+ * <li>{@code id} (64-bit signed integer) - A unique segment identifier. This is a monotonically increasing number within
+ * each log. Segments with in-sequence identifiers should contain in-sequence indexes.</li>
+ * <li>{@code index} (64-bit signed integer) - The effective first index of the segment. This indicates the index at which
+ * the first entry should be written to the segment. Indexes are monotonically increasing thereafter.</li>
+ * <li>{@code version} (64-bit signed integer) - The version of the segment. Versions are monotonically increasing
+ * starting at {@code 1}. Versions will only be incremented whenever the segment is rewritten to another memory/disk
+ * space, e.g. after log compaction.</li>
+ * <li>{@code maxSegmentSize} (32-bit unsigned integer) - The maximum number of bytes allowed in the segment.</li>
+ * <li>{@code maxEntries} (32-bit signed integer) - The total number of expected entries in the segment. This is the final
+ * number of entries allowed within the segment both before and after compaction. This entry count is used to determine
+ * the count of internal indexing and deduplication facilities.</li>
+ * <li>{@code updated} (64-bit signed integer) - The last update to the segment in terms of milliseconds since the epoch.
+ * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+ * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+ * change to {@code updated}.</li>
+ * <li>{@code locked} (8-bit boolean) - A boolean indicating whether the segment is locked. Segments will be locked once
+ * all entries have been committed to the segment. The lock state of each segment is used to determine log compaction
+ * and recovery behavior.</li>
+ * </ul>
+ * The remainder of the 64 segment header bytes are reserved for future metadata.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentDescriptor {
+  public static final int BYTES = 64;
+
+  // Current segment version.
+  @VisibleForTesting
+  static final int VERSION = 1;
+
+  // The lengths of each field in the header.
+  private static final int VERSION_LENGTH = Integer.BYTES;     // 32-bit signed integer
+  private static final int ID_LENGTH = Long.BYTES;             // 64-bit signed integer
+  private static final int INDEX_LENGTH = Long.BYTES;          // 64-bit signed integer
+  private static final int MAX_SIZE_LENGTH = Integer.BYTES;    // 32-bit signed integer
+  private static final int MAX_ENTRIES_LENGTH = Integer.BYTES; // 32-bit signed integer
+  private static final int UPDATED_LENGTH = Long.BYTES;        // 64-bit signed integer
+
+  // The positions of each field in the header.
+  private static final int VERSION_POSITION = 0;                                         // 0
+  private static final int ID_POSITION = VERSION_POSITION + VERSION_LENGTH;              // 4
+  private static final int INDEX_POSITION = ID_POSITION + ID_LENGTH;                     // 12
+  private static final int MAX_SIZE_POSITION = INDEX_POSITION + INDEX_LENGTH;            // 20
+  private static final int MAX_ENTRIES_POSITION = MAX_SIZE_POSITION + MAX_SIZE_LENGTH;   // 24
+  private static final int UPDATED_POSITION = MAX_ENTRIES_POSITION + MAX_ENTRIES_LENGTH; // 28
+
+  /**
+   * Returns a descriptor builder.
+   * <p>
+   * The descriptor builder will write segment metadata to a {@code 48} byte in-memory buffer.
+   *
+   * @return The descriptor builder.
+   */
+  public static Builder builder() {
+    return new Builder(ByteBuffer.allocate(BYTES));
+  }
+
+  /**
+   * Returns a descriptor builder for the given descriptor buffer.
+   *
+   * @param buffer The descriptor buffer.
+   * @return The descriptor builder.
+   * @throws NullPointerException if {@code buffer} is null
+   */
+  public static Builder builder(ByteBuffer buffer) {
+    return new Builder(buffer);
+  }
+
+  private final ByteBuffer buffer;
+  private final int version;
+  private final long id;
+  private final long index;
+  private final int maxSegmentSize;
+  private final int maxEntries;
+  private volatile long updated;
+  private volatile boolean locked;
+
+  /**
+   * @throws NullPointerException if {@code buffer} is null
+   */
+  public JournalSegmentDescriptor(ByteBuffer buffer) {
+    this.buffer = buffer;
+    this.version = buffer.getInt();
+    this.id = buffer.getLong();
+    this.index = buffer.getLong();
+    this.maxSegmentSize = buffer.getInt();
+    this.maxEntries = buffer.getInt();
+    this.updated = buffer.getLong();
+    this.locked = buffer.get() == 1;
+  }
+
+  /**
+   * Returns the segment version.
+   * <p>
+   * Versions are monotonically increasing starting at {@code 1}.
+   *
+   * @return The segment version.
+   */
+  public int version() {
+    return version;
+  }
+
+  /**
+   * Returns the segment identifier.
+   * <p>
+   * The segment ID is a monotonically increasing number within each log. Segments with in-sequence identifiers should
+   * contain in-sequence indexes.
+   *
+   * @return The segment identifier.
+   */
+  public long id() {
+    return id;
+  }
+
+  /**
+   * Returns the segment index.
+   * <p>
+   * The index indicates the index at which the first entry should be written to the segment. Indexes are monotonically
+   * increasing thereafter.
+   *
+   * @return The segment index.
+   */
+  public long index() {
+    return index;
+  }
+
+  /**
+   * Returns the maximum count of the segment.
+   *
+   * @return The maximum allowed count of the segment.
+   */
+  public int maxSegmentSize() {
+    return maxSegmentSize;
+  }
+
+  /**
+   * Returns the maximum number of entries allowed in the segment.
+   *
+   * @return The maximum number of entries allowed in the segment.
+   */
+  public int maxEntries() {
+    return maxEntries;
+  }
+
+  /**
+   * Returns last time the segment was updated.
+   * <p>
+   * When the segment is first constructed, the {@code updated} time is {@code 0}. Once all entries in the segment have
+   * been committed, the {@code updated} time should be set to the current time. Log compaction should not result in a
+   * change to {@code updated}.
+   *
+   * @return The last time the segment was updated in terms of milliseconds since the epoch.
+   */
+  public long updated() {
+    return updated;
+  }
+
+  /**
+   * Writes an update to the descriptor.
+   */
+  public void update(long timestamp) {
+    if (!locked) {
+      buffer.putLong(UPDATED_POSITION, timestamp);
+      this.updated = timestamp;
+    }
+  }
+
+  /**
+   * Copies the segment to a new buffer.
+   */
+  JournalSegmentDescriptor copyTo(ByteBuffer buffer) {
+    buffer.putInt(version);
+    buffer.putLong(id);
+    buffer.putLong(index);
+    buffer.putInt(maxSegmentSize);
+    buffer.putInt(maxEntries);
+    buffer.putLong(updated);
+    buffer.put(locked ? (byte) 1 : (byte) 0);
+    return this;
+  }
+
+  @Override
+  public String toString() {
+    return toStringHelper(this)
+        .add("version", version)
+        .add("id", id)
+        .add("index", index)
+        .add("updated", updated)
+        .toString();
+  }
+
+  /**
+   * Segment descriptor builder.
+   */
+  public static class Builder {
+    private final ByteBuffer buffer;
+
+    private Builder(ByteBuffer buffer) {
+      this.buffer = requireNonNull(buffer, "buffer cannot be null");
+      buffer.putInt(VERSION_POSITION, VERSION);
+    }
+
+    /**
+     * Sets the segment identifier.
+     *
+     * @param id The segment identifier.
+     * @return The segment descriptor builder.
+     */
+    public Builder withId(long id) {
+      buffer.putLong(ID_POSITION, id);
+      return this;
+    }
+
+    /**
+     * Sets the segment index.
+     *
+     * @param index The segment starting index.
+     * @return The segment descriptor builder.
+     */
+    public Builder withIndex(long index) {
+      buffer.putLong(INDEX_POSITION, index);
+      return this;
+    }
+
+    /**
+     * Sets maximum count of the segment.
+     *
+     * @param maxSegmentSize The maximum count of the segment.
+     * @return The segment descriptor builder.
+     */
+    public Builder withMaxSegmentSize(int maxSegmentSize) {
+      buffer.putInt(MAX_SIZE_POSITION, maxSegmentSize);
+      return this;
+    }
+
+    /**
+     * Sets the maximum number of entries in the segment.
+     *
+     * @param maxEntries The maximum number of entries in the segment.
+     * @return The segment descriptor builder.
+     * @deprecated since 3.0.2
+     */
+    @Deprecated
+    public Builder withMaxEntries(int maxEntries) {
+      buffer.putInt(MAX_ENTRIES_POSITION, maxEntries);
+      return this;
+    }
+
+    /**
+     * Builds the segment descriptor.
+     *
+     * @return The built segment descriptor.
+     */
+    public JournalSegmentDescriptor build() {
+      buffer.rewind();
+      return new JournalSegmentDescriptor(buffer);
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentFile.java
new file mode 100644 (file)
index 0000000..2190dee
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Segment file utility.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public final class JournalSegmentFile {
+  private static final char PART_SEPARATOR = '-';
+  private static final char EXTENSION_SEPARATOR = '.';
+  private static final String EXTENSION = "log";
+  private final File file;
+
+  /**
+   * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+   *
+   * @throws NullPointerException if {@code file} is null
+   */
+  public static boolean isSegmentFile(String name, File file) {
+    return isSegmentFile(name, file.getName());
+  }
+
+  /**
+   * Returns a boolean value indicating whether the given file appears to be a parsable segment file.
+   *
+   * @param journalName the name of the journal
+   * @param fileName the name of the file to check
+   * @throws NullPointerException if {@code file} is null
+   */
+  public static boolean isSegmentFile(String journalName, String fileName) {
+    requireNonNull(journalName, "journalName cannot be null");
+    requireNonNull(fileName, "fileName cannot be null");
+
+    int partSeparator = fileName.lastIndexOf(PART_SEPARATOR);
+    int extensionSeparator = fileName.lastIndexOf(EXTENSION_SEPARATOR);
+
+    if (extensionSeparator == -1
+        || partSeparator == -1
+        || extensionSeparator < partSeparator
+        || !fileName.endsWith(EXTENSION)) {
+      return false;
+    }
+
+    for (int i = partSeparator + 1; i < extensionSeparator; i++) {
+      if (!Character.isDigit(fileName.charAt(i))) {
+        return false;
+      }
+    }
+
+    return fileName.startsWith(journalName);
+  }
+
+  /**
+   * Creates a segment file for the given directory, log name, segment ID, and segment version.
+   */
+  static File createSegmentFile(String name, File directory, long id) {
+    return new File(directory, String.format("%s-%d.log", requireNonNull(name, "name cannot be null"), id));
+  }
+
+  /**
+   * @throws IllegalArgumentException if {@code file} is not a valid segment file
+   */
+  JournalSegmentFile(File file) {
+    this.file = file;
+  }
+
+  /**
+   * Returns the segment file.
+   *
+   * @return The segment file.
+   */
+  public File file() {
+    return file;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentReader.java
new file mode 100644 (file)
index 0000000..d89c720
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class JournalSegmentReader {
+    private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentReader.class);
+
+    private final JournalSegment segment;
+    private final FileReader fileReader;
+    private final int maxSegmentSize;
+    private final int maxEntrySize;
+
+    private int position;
+
+    JournalSegmentReader(final JournalSegment segment, final FileReader fileReader, final int maxEntrySize) {
+        this.segment = requireNonNull(segment);
+        this.fileReader = requireNonNull(fileReader);
+        maxSegmentSize = segment.descriptor().maxSegmentSize();
+        this.maxEntrySize = maxEntrySize;
+    }
+
+    /**
+     * Return the current position.
+     *
+     * @return current position.
+     */
+    int position() {
+        return position;
+    }
+
+    /**
+     * Set the file position.
+     *
+     * @param position new position
+     */
+    void setPosition(final int position) {
+        verify(position >= JournalSegmentDescriptor.BYTES && position < maxSegmentSize,
+            "Invalid position %s", position);
+        this.position = position;
+        fileReader.invalidateCache();
+    }
+
+    /**
+     * Invalidate any cache that is present, so that the next read is coherent with the backing file.
+     */
+    void invalidateCache() {
+        fileReader.invalidateCache();
+    }
+
+    /**
+     * Reads the next binary data block
+     *
+     * @param index entry index
+     * @return The binary data, or {@code null}
+     */
+    @Nullable ByteBuf readBytes(final long index) {
+        // Check if there is enough in the buffer remaining
+        final int remaining = maxSegmentSize - position - SegmentEntry.HEADER_BYTES;
+        if (remaining < 0) {
+            // Not enough space in the segment, there can never be another entry
+            return null;
+        }
+
+        // Calculate maximum entry length not exceeding file size nor maxEntrySize
+        final var maxLength = Math.min(remaining, maxEntrySize);
+        final var buffer = fileReader.read(position, maxLength + SegmentEntry.HEADER_BYTES);
+
+        // Read the entry length
+        final var length = buffer.getInt(0);
+        if (length < 1 || length > maxLength) {
+            // Invalid length, make sure next read re-tries
+            invalidateCache();
+            return null;
+        }
+
+        // Read the entry checksum
+        final int checksum = buffer.getInt(Integer.BYTES);
+
+        // Slice off the entry's bytes
+        final var entryBuffer = buffer.slice(SegmentEntry.HEADER_BYTES, length);
+        // Compute the checksum for the entry bytes.
+        final var crc32 = new CRC32();
+        crc32.update(entryBuffer);
+
+        // If the stored checksum does not equal the computed checksum, do not proceed further
+        final var computed = (int) crc32.getValue();
+        if (checksum != computed) {
+            LOG.warn("Expected checksum {}, computed {}", Integer.toHexString(checksum), Integer.toHexString(computed));
+            invalidateCache();
+            return null;
+        }
+
+        // update position
+        position += SegmentEntry.HEADER_BYTES + length;
+
+        // return bytes
+        entryBuffer.rewind();
+        return Unpooled.buffer(length).writeBytes(entryBuffer);
+    }
+
+    /**
+     * Close this reader.
+     */
+    void close() {
+        segment.closeReader(this);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..c2e0a25
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static io.atomix.storage.journal.SegmentEntry.HEADER_BYTES;
+import static java.util.Objects.requireNonNull;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import io.netty.buffer.ByteBuf;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.zip.CRC32;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+abstract sealed class JournalSegmentWriter permits DiskJournalSegmentWriter, MappedJournalSegmentWriter {
+    private static final Logger LOG = LoggerFactory.getLogger(JournalSegmentWriter.class);
+
+    final @NonNull FileChannel channel;
+    final @NonNull JournalSegment segment;
+    private final @NonNull JournalIndex index;
+    final int maxSegmentSize;
+    final int maxEntrySize;
+
+    private int currentPosition;
+    private Long lastIndex;
+    private ByteBuf lastWritten;
+
+    JournalSegmentWriter(final FileChannel channel, final JournalSegment segment, final int maxEntrySize,
+            final JournalIndex index) {
+        this.channel = requireNonNull(channel);
+        this.segment = requireNonNull(segment);
+        this.index = requireNonNull(index);
+        maxSegmentSize = segment.descriptor().maxSegmentSize();
+        this.maxEntrySize = maxEntrySize;
+    }
+
+    JournalSegmentWriter(final JournalSegmentWriter previous) {
+        channel = previous.channel;
+        segment = previous.segment;
+        index = previous.index;
+        maxSegmentSize = previous.maxSegmentSize;
+        maxEntrySize = previous.maxEntrySize;
+        lastWritten = previous.lastWritten;
+        lastIndex = previous.lastIndex;
+        currentPosition = previous.currentPosition;
+    }
+
+    /**
+     * Returns the last written index.
+     *
+     * @return The last written index.
+     */
+    final long getLastIndex() {
+        return lastIndex != null ? lastIndex : segment.firstIndex() - 1;
+    }
+
+    /**
+     * Returns the last data written.
+     *
+     * @return The last data written.
+     */
+    final ByteBuf getLastWritten() {
+        return lastWritten == null ? null : lastWritten.slice();
+    }
+
+    /**
+     * Returns the next index to be written.
+     *
+     * @return The next index to be written.
+     */
+    final long getNextIndex() {
+        return lastIndex != null ? lastIndex + 1 : segment.firstIndex();
+    }
+
+    /**
+     * Tries to append a binary data to the journal.
+     *
+     * @param buf binary data to append
+     * @return The index of appended data, or {@code null} if segment has no space
+     */
+    final Long append(final ByteBuf buf) {
+        final var length = buf.readableBytes();
+        if (length > maxEntrySize) {
+            throw new StorageException.TooLarge("Serialized entry size exceeds maximum allowed bytes ("
+                + maxEntrySize + ")");
+        }
+
+        // Store the entry index.
+        final long index = getNextIndex();
+        final int position = currentPosition;
+
+        // check space available
+        final int nextPosition = position + HEADER_BYTES + length;
+        if (nextPosition >= maxSegmentSize) {
+            LOG.trace("Not enough space for {} at {}", index, position);
+            return null;
+        }
+
+        // allocate buffer and write data
+        final var writeBuffer = startWrite(position, length + HEADER_BYTES).position(HEADER_BYTES);
+        writeBuffer.put(buf.nioBuffer());
+
+        // Compute the checksum for the entry.
+        final var crc32 = new CRC32();
+        crc32.update(writeBuffer.flip().position(HEADER_BYTES));
+
+        // Create a single byte[] in memory for the entire entry and write it as a batch to the underlying buffer.
+        writeBuffer.putInt(0, length).putInt(Integer.BYTES, (int) crc32.getValue());
+        commitWrite(position, writeBuffer.rewind());
+
+        // Update the last entry with the correct index/term/length.
+        currentPosition = nextPosition;
+        lastWritten = buf;
+        lastIndex = index;
+        this.index.index(index, position);
+
+        return index;
+    }
+
+    abstract ByteBuffer startWrite(int position, int size);
+
+    abstract void commitWrite(int position, ByteBuffer entry);
+
+    /**
+     * Resets the head of the segment to the given index.
+     *
+     * @param index the index to which to reset the head of the segment
+     */
+    final void reset(final long index) {
+        // acquire ownership of cache and make sure reader does not see anything we've done once we're done
+        final var reader = reader();
+        reader.invalidateCache();
+        try {
+            resetWithBuffer(reader, index);
+        } finally {
+            // Make sure reader does not see anything we've done
+            reader.invalidateCache();
+        }
+    }
+
+    abstract JournalSegmentReader reader();
+
+    private void resetWithBuffer(final JournalSegmentReader reader, final long index) {
+        long nextIndex = segment.firstIndex();
+
+        // Clear the buffer indexes and acquire ownership of the buffer
+        currentPosition = JournalSegmentDescriptor.BYTES;
+        reader.setPosition(JournalSegmentDescriptor.BYTES);
+
+        while (index == 0 || nextIndex <= index) {
+            final var buf = reader.readBytes(nextIndex);
+            if (buf == null) {
+                break;
+            }
+
+            lastWritten = buf;
+            lastIndex = nextIndex;
+            this.index.index(nextIndex, currentPosition);
+            nextIndex++;
+
+            // Update the current position for indexing.
+            currentPosition += HEADER_BYTES + buf.readableBytes();
+        }
+    }
+
+    /**
+     * Truncates the log to the given index.
+     *
+     * @param index The index to which to truncate the log.
+     */
+    final void truncate(final long index) {
+        // If the index is greater than or equal to the last index, skip the truncate.
+        if (index >= getLastIndex()) {
+            return;
+        }
+
+        // Reset the last written
+        lastIndex = null;
+        lastWritten = null;
+
+        // Truncate the index.
+        this.index.truncate(index);
+
+        if (index < segment.firstIndex()) {
+            // Reset the writer to the first entry.
+            currentPosition = JournalSegmentDescriptor.BYTES;
+        } else {
+            // Reset the writer to the given index.
+            reset(index);
+        }
+
+        // Zero the entry header at current channel position.
+        writeEmptyHeader(currentPosition);
+    }
+
+    /**
+     * Write {@link SegmentEntry#HEADER_BYTES} worth of zeroes at specified position.
+     *
+     * @param position position to write to
+     */
+    abstract void writeEmptyHeader(int position);
+
+    /**
+     * Flushes written entries to disk.
+     */
+    abstract void flush();
+
+    /**
+     * Closes this writer.
+     */
+    abstract void close();
+
+    /**
+     * Returns the mapped buffer underlying the segment writer, or {@code null} if the writer does not have such a
+     * buffer.
+     *
+     * @return the mapped buffer underlying the segment writer, or {@code null}.
+     */
+    abstract @Nullable MappedByteBuffer buffer();
+
+    abstract @NonNull MappedJournalSegmentWriter toMapped();
+
+    abstract @NonNull DiskJournalSegmentWriter toFileChannel();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerdes.java
new file mode 100644 (file)
index 0000000..a970882
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import io.atomix.utils.serializer.KryoJournalSerdesBuilder;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * Support for serialization of {@link Journal} entries.
+ *
+ * @deprecated due to dependency on outdated Kryo library, {@link JournalSerializer} to be used instead.
+ */
+@Deprecated(forRemoval = true, since="9.0.3")
+public interface JournalSerdes {
+    /**
+     * Serializes given object to byte array.
+     *
+     * @param obj Object to serialize
+     * @return serialized bytes
+     */
+    byte[] serialize(Object obj);
+
+    /**
+     * Serializes given object to byte array.
+     *
+     * @param obj        Object to serialize
+     * @param bufferSize maximum size of serialized bytes
+     * @return serialized bytes
+     */
+    byte[] serialize(Object obj, int bufferSize);
+
+    /**
+     * Serializes given object to byte buffer.
+     *
+     * @param obj    Object to serialize
+     * @param buffer to write to
+     */
+    void serialize(Object obj, ByteBuffer buffer);
+
+    /**
+     * Serializes given object to OutputStream.
+     *
+     * @param obj    Object to serialize
+     * @param stream to write to
+     */
+    void serialize(Object obj, OutputStream stream);
+
+    /**
+     * Serializes given object to OutputStream.
+     *
+     * @param obj        Object to serialize
+     * @param stream     to write to
+     * @param bufferSize size of the buffer in front of the stream
+     */
+    void serialize(Object obj, OutputStream stream, int bufferSize);
+
+    /**
+     * Deserializes given byte array to Object.
+     *
+     * @param bytes serialized bytes
+     * @param <T>   deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(byte[] bytes);
+
+    /**
+     * Deserializes given byte buffer to Object.
+     *
+     * @param buffer input with serialized bytes
+     * @param <T>    deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(final ByteBuffer buffer);
+
+    /**
+     * Deserializes given InputStream to an Object.
+     *
+     * @param stream input stream
+     * @param <T>    deserialized Object type
+     * @return deserialized Object
+     */
+    <T> T deserialize(InputStream stream);
+
+    /**
+     * Deserializes given InputStream to an Object.
+     *
+     * @param stream     input stream
+     * @param <T>        deserialized Object type
+     * @param bufferSize size of the buffer in front of the stream
+     * @return deserialized Object
+     */
+    <T> T deserialize(final InputStream stream, final int bufferSize);
+
+    /**
+     * Creates a new {@link JournalSerdes} builder.
+     *
+     * @return builder
+     */
+    static Builder builder() {
+        return new KryoJournalSerdesBuilder();
+    }
+
+    /**
+     * Builder for {@link JournalSerdes}.
+     */
+    interface Builder {
+        /**
+         * Builds a {@link JournalSerdes} instance.
+         *
+         * @return A {@link JournalSerdes} implementation.
+         */
+        JournalSerdes build();
+
+        /**
+         * Builds a {@link JournalSerdes} instance.
+         *
+         * @param friendlyName friendly name for the namespace
+         * @return A {@link JournalSerdes} implementation.
+         */
+        JournalSerdes build(String friendlyName);
+
+        /**
+         * Registers serializer for the given set of classes.
+         * <p>
+         * When multiple classes are registered with an explicitly provided serializer, the namespace guarantees
+         * all instances will be serialized with the same type ID.
+         *
+         * @param classes list of classes to register
+         * @param serdes  serializer to use for the class
+         * @return this builder
+         */
+        Builder register(EntrySerdes<?> serdes, Class<?>... classes);
+
+        /**
+         * Sets the namespace class loader.
+         *
+         * @param classLoader the namespace class loader
+         * @return this builder
+         */
+        Builder setClassLoader(ClassLoader classLoader);
+    }
+
+    /**
+     * Input data stream exposed to {@link EntrySerdes#read(EntryInput)}.
+     */
+    @Beta
+    interface EntryInput {
+
+        byte[] readBytes(int length) throws IOException;
+
+        long readLong() throws IOException;
+
+        String readString() throws IOException;
+
+        Object readObject() throws IOException;
+
+        @VisibleForTesting
+        int readVarInt() throws IOException;
+    }
+
+    /**
+     * Output data stream exposed to {@link EntrySerdes#write(EntryOutput, Object)}.
+     */
+    @Beta
+    interface EntryOutput {
+
+        void writeBytes(byte[] bytes) throws IOException;
+
+        void writeLong(long value) throws IOException;
+
+        void writeObject(Object value) throws IOException;
+
+        void writeString(String value) throws IOException;
+
+        @VisibleForTesting
+        void writeVarInt(int value) throws IOException;
+    }
+
+    /**
+     * A serializer/deserializer for an entry.
+     *
+     * @param <T> Entry type
+     */
+    interface EntrySerdes<T> {
+
+        T read(EntryInput input) throws IOException;
+
+        void write(EntryOutput output, T entry) throws IOException;
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalSerializer.java
new file mode 100644 (file)
index 0000000..eff9af8
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech s.r.o. All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package io.atomix.storage.journal;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufUtil;
+import io.netty.buffer.Unpooled;
+
+/**
+ * Support for serialization of {@link Journal} entries.
+ */
+public interface JournalSerializer<T> {
+
+    /**
+     * Serializes given object to byte array.
+     *
+     * @param obj Object to serialize
+     * @return serialized bytes as {@link ByteBuf}
+     */
+    ByteBuf serialize(T obj) ;
+
+    /**
+     * Deserializes given byte array to Object.
+     *
+     * @param buf serialized bytes as {@link ByteBuf}
+     * @return deserialized Object
+     */
+    T deserialize(final ByteBuf buf);
+
+    static <E> JournalSerializer<E> wrap(final JournalSerdes serdes) {
+        return new JournalSerializer<>() {
+            @Override
+            public ByteBuf serialize(final E obj) {
+                return Unpooled.wrappedBuffer(serdes.serialize(obj));
+            }
+
+            @Override
+            public E deserialize(final ByteBuf buf) {
+                return serdes.deserialize(ByteBufUtil.getBytes(buf));
+            }
+        };
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/JournalWriter.java
new file mode 100644 (file)
index 0000000..1462463
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Log writer.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public interface JournalWriter<E> {
+    /**
+     * Returns the last written index.
+     *
+     * @return The last written index.
+     */
+    long getLastIndex();
+
+    /**
+     * Returns the last entry written.
+     *
+     * @return The last entry written.
+     */
+    Indexed<E> getLastEntry();
+
+    /**
+     * Returns the next index to be written.
+     *
+     * @return The next index to be written.
+     */
+    long getNextIndex();
+
+    /**
+     * Appends an entry to the journal.
+     *
+     * @param entry The entry to append.
+     * @return The appended indexed entry.
+     */
+    <T extends E> @NonNull Indexed<T> append(T entry);
+
+    /**
+     * Commits entries up to the given index.
+     *
+     * @param index The index up to which to commit entries.
+     */
+    void commit(long index);
+
+    /**
+     * Resets the head of the journal to the given index.
+     *
+     * @param index the index to which to reset the head of the journal
+     */
+    void reset(long index);
+
+    /**
+     * Truncates the log to the given index.
+     *
+     * @param index The index to which to truncate the log.
+     */
+    void truncate(long index);
+
+    /**
+     * Flushes written entries to disk.
+     */
+    void flush();
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedFileReader.java
new file mode 100644 (file)
index 0000000..204fd72
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+/**
+ * A {@link StorageLevel#MAPPED} implementation of {@link FileReader}. Operates on direct mapping of the entire file.
+ */
+final class MappedFileReader extends FileReader {
+    private final ByteBuffer buffer;
+
+    MappedFileReader(final Path path, final ByteBuffer buffer) {
+        super(path);
+        this.buffer = buffer.slice().asReadOnlyBuffer();
+    }
+
+    @Override
+    void invalidateCache() {
+        // No-op: the mapping is guaranteed to be coherent
+    }
+
+    @Override
+    ByteBuffer read(final int position, final int size) {
+        return buffer.slice(position, size);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/MappedJournalSegmentWriter.java
new file mode 100644 (file)
index 0000000..48d9e76
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.index.JournalIndex;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Segment writer.
+ * <p>
+ * The format of an entry in the log is as follows:
+ * <ul>
+ * <li>64-bit index</li>
+ * <li>8-bit boolean indicating whether a term change is contained in the entry</li>
+ * <li>64-bit optional term</li>
+ * <li>32-bit signed entry length, including the entry type ID</li>
+ * <li>8-bit signed entry type ID</li>
+ * <li>n-bit entry bytes</li>
+ * </ul>
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+final class MappedJournalSegmentWriter extends JournalSegmentWriter {
+    private final @NonNull MappedByteBuffer mappedBuffer;
+    private final JournalSegmentReader reader;
+    private final ByteBuffer buffer;
+
+    MappedJournalSegmentWriter(final FileChannel channel, final JournalSegment segment, final int maxEntrySize,
+            final JournalIndex index) {
+        super(channel, segment, maxEntrySize, index);
+
+        mappedBuffer = mapBuffer(channel, maxSegmentSize);
+        buffer = mappedBuffer.slice();
+        reader = new JournalSegmentReader(segment, new MappedFileReader(segment.file().file().toPath(), mappedBuffer),
+            maxEntrySize);
+        reset(0);
+    }
+
+    MappedJournalSegmentWriter(final JournalSegmentWriter previous) {
+        super(previous);
+
+        mappedBuffer = mapBuffer(channel, maxSegmentSize);
+        buffer = mappedBuffer.slice();
+        reader = new JournalSegmentReader(segment, new MappedFileReader(segment.file().file().toPath(), mappedBuffer),
+            maxEntrySize);
+    }
+
+    private static @NonNull MappedByteBuffer mapBuffer(final FileChannel channel, final int maxSegmentSize) {
+        try {
+            return channel.map(FileChannel.MapMode.READ_WRITE, 0, maxSegmentSize);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+
+    @Override
+    @NonNull MappedByteBuffer buffer() {
+        return mappedBuffer;
+    }
+
+    @Override
+    MappedJournalSegmentWriter toMapped() {
+        return this;
+    }
+
+    @Override
+    DiskJournalSegmentWriter toFileChannel() {
+        close();
+        return new DiskJournalSegmentWriter(this);
+    }
+
+    @Override
+    JournalSegmentReader reader() {
+        return reader;
+    }
+
+    @Override
+    ByteBuffer startWrite(final int position, final int size) {
+        return buffer.slice(position, size);
+    }
+
+    @Override
+    void commitWrite(final int position, final ByteBuffer entry) {
+        // No-op, buffer is write-through
+    }
+
+    @Override
+    void writeEmptyHeader(final int position) {
+        // Note: we issue a single putLong() instead of two putInt()s.
+        buffer.putLong(position, 0L);
+    }
+
+    @Override
+    void flush() {
+        mappedBuffer.force();
+    }
+
+    @Override
+    void close() {
+        flush();
+        try {
+            BufferCleaner.freeBuffer(mappedBuffer);
+        } catch (IOException e) {
+            throw new StorageException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentEntry.java
new file mode 100644 (file)
index 0000000..be6c6ba
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.nio.ByteBuffer;
+
+/**
+ * An {@link Indexed} entry read from {@link JournalSegment}.
+ *
+ * @param checksum The CRC32 checksum of data
+ * @param bytes Entry bytes
+ */
+record SegmentEntry(int checksum, ByteBuffer bytes) {
+    /**
+     * The size of the header, comprising of:
+     * <ul>
+     *   <li>32-bit signed entry length</li>
+     *   <li>32-bit unsigned CRC32 checksum</li>
+     * </li>
+     */
+    static final int HEADER_BYTES = Integer.BYTES + Integer.BYTES;
+
+    SegmentEntry {
+        if (bytes.remaining() < 1) {
+            throw new IllegalArgumentException("Invalid entry bytes " + bytes);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournal.java
new file mode 100644 (file)
index 0000000..7289d3d
--- /dev/null
@@ -0,0 +1,877 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.StandardOpenOption;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Segmented journal.
+ */
+public final class SegmentedJournal<E> implements Journal<E> {
+  /**
+   * Returns a new Raft log builder.
+   *
+   * @return A new Raft log builder.
+   */
+  public static <E> Builder<E> builder() {
+    return new Builder<>();
+  }
+
+  private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournal.class);
+  private static final int SEGMENT_BUFFER_FACTOR = 3;
+
+  private final String name;
+  private final StorageLevel storageLevel;
+  private final File directory;
+  private final JournalSerializer<E> serializer;
+  private final int maxSegmentSize;
+  private final int maxEntrySize;
+  private final int maxEntriesPerSegment;
+  private final double indexDensity;
+  private final boolean flushOnCommit;
+  private final SegmentedJournalWriter<E> writer;
+  private volatile long commitIndex;
+
+  private final ConcurrentNavigableMap<Long, JournalSegment> segments = new ConcurrentSkipListMap<>();
+  private final Collection<SegmentedJournalReader> readers = ConcurrentHashMap.newKeySet();
+  private JournalSegment currentSegment;
+
+  private volatile boolean open = true;
+
+  public SegmentedJournal(
+      String name,
+      StorageLevel storageLevel,
+      File directory,
+      JournalSerdes namespace,
+      int maxSegmentSize,
+      int maxEntrySize,
+      int maxEntriesPerSegment,
+      double indexDensity,
+      boolean flushOnCommit) {
+    this.name = requireNonNull(name, "name cannot be null");
+    this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+    this.directory = requireNonNull(directory, "directory cannot be null");
+    this.serializer = JournalSerializer.wrap(requireNonNull(namespace, "namespace cannot be null"));
+    this.maxSegmentSize = maxSegmentSize;
+    this.maxEntrySize = maxEntrySize;
+    this.maxEntriesPerSegment = maxEntriesPerSegment;
+    this.indexDensity = indexDensity;
+    this.flushOnCommit = flushOnCommit;
+    open();
+    this.writer = new SegmentedJournalWriter<>(this);
+  }
+
+  /**
+   * Returns the segment file name prefix.
+   *
+   * @return The segment file name prefix.
+   */
+  public String name() {
+    return name;
+  }
+
+  /**
+   * Returns the storage directory.
+   * <p>
+   * The storage directory is the directory to which all segments write files. Segment files for multiple logs may be
+   * stored in the storage directory, and files for each log instance will be identified by the {@code prefix} provided
+   * when the log is opened.
+   *
+   * @return The storage directory.
+   */
+  public File directory() {
+    return directory;
+  }
+
+  /**
+   * Returns the storage level.
+   * <p>
+   * The storage level dictates how entries within individual journal segments should be stored.
+   *
+   * @return The storage level.
+   */
+  public StorageLevel storageLevel() {
+    return storageLevel;
+  }
+
+  /**
+   * Returns the maximum journal segment size.
+   * <p>
+   * The maximum segment size dictates the maximum size any segment in a segment may consume in bytes.
+   *
+   * @return The maximum segment size in bytes.
+   */
+  public int maxSegmentSize() {
+    return maxSegmentSize;
+  }
+
+  /**
+   * Returns the maximum journal entry size.
+   * <p>
+   * The maximum entry size dictates the maximum size any entry in the segment may consume in bytes.
+   *
+   * @return the maximum entry size in bytes
+   */
+  public int maxEntrySize() {
+    return maxEntrySize;
+  }
+
+  /**
+   * Returns the maximum number of entries per segment.
+   * <p>
+   * The maximum entries per segment dictates the maximum number of entries that are allowed to be stored in any segment
+   * in a journal.
+   *
+   * @return The maximum number of entries per segment.
+   * @deprecated since 3.0.2
+   */
+  @Deprecated
+  public int maxEntriesPerSegment() {
+    return maxEntriesPerSegment;
+  }
+
+  /**
+   * Returns the collection of journal segments.
+   *
+   * @return the collection of journal segments
+   */
+  public Collection<JournalSegment> segments() {
+    return segments.values();
+  }
+
+  /**
+   * Returns the collection of journal segments with indexes greater than the given index.
+   *
+   * @param index the starting index
+   * @return the journal segments starting with indexes greater than or equal to the given index
+   */
+  public Collection<JournalSegment> segments(long index) {
+    return segments.tailMap(index).values();
+  }
+
+  /**
+   * Returns serializer instance.
+   *
+   * @return serializer instance
+   */
+  JournalSerializer<E> serializer() {
+    return serializer;
+  }
+
+  /**
+   * Returns the total size of the journal.
+   *
+   * @return the total size of the journal
+   */
+  public long size() {
+    return segments.values().stream()
+        .mapToLong(segment -> segment.size())
+        .sum();
+  }
+
+  @Override
+  public JournalWriter<E> writer() {
+    return writer;
+  }
+
+  @Override
+  public JournalReader<E> openReader(long index) {
+    return openReader(index, JournalReader.Mode.ALL);
+  }
+
+  /**
+   * Opens a new Raft log reader with the given reader mode.
+   *
+   * @param index The index from which to begin reading entries.
+   * @param mode The mode in which to read entries.
+   * @return The Raft log reader.
+   */
+  public JournalReader<E> openReader(long index, JournalReader.Mode mode) {
+    final var segment = getSegment(index);
+    final var reader = switch (mode) {
+      case ALL -> new SegmentedJournalReader<>(this, segment);
+      case COMMITS -> new CommitsSegmentJournalReader<>(this, segment);
+    };
+
+    // Forward reader to specified index
+    long next = reader.getNextIndex();
+    while (index > next && reader.tryNext() != null) {
+        next = reader.getNextIndex();
+    }
+
+    readers.add(reader);
+    return reader;
+  }
+
+  /**
+   * Opens the segments.
+   */
+  private synchronized void open() {
+    // Load existing log segments from disk.
+    for (JournalSegment segment : loadSegments()) {
+      segments.put(segment.descriptor().index(), segment);
+    }
+
+    // If a segment doesn't already exist, create an initial segment starting at index 1.
+    if (!segments.isEmpty()) {
+      currentSegment = segments.lastEntry().getValue();
+    } else {
+      JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+          .withId(1)
+          .withIndex(1)
+          .withMaxSegmentSize(maxSegmentSize)
+          .withMaxEntries(maxEntriesPerSegment)
+          .build();
+
+      currentSegment = createSegment(descriptor);
+      currentSegment.descriptor().update(System.currentTimeMillis());
+
+      segments.put(1L, currentSegment);
+    }
+  }
+
+  /**
+   * Asserts that the manager is open.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  private void assertOpen() {
+    checkState(currentSegment != null, "journal not open");
+  }
+
+  /**
+   * Asserts that enough disk space is available to allocate a new segment.
+   */
+  private void assertDiskSpace() {
+    if (directory().getUsableSpace() < maxSegmentSize() * SEGMENT_BUFFER_FACTOR) {
+      throw new StorageException.OutOfDiskSpace("Not enough space to allocate a new journal segment");
+    }
+  }
+
+  /**
+   * Resets the current segment, creating a new segment if necessary.
+   */
+  private synchronized void resetCurrentSegment() {
+    JournalSegment lastSegment = getLastSegment();
+    if (lastSegment != null) {
+      currentSegment = lastSegment;
+    } else {
+      JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+          .withId(1)
+          .withIndex(1)
+          .withMaxSegmentSize(maxSegmentSize)
+          .withMaxEntries(maxEntriesPerSegment)
+          .build();
+
+      currentSegment = createSegment(descriptor);
+
+      segments.put(1L, currentSegment);
+    }
+  }
+
+  /**
+   * Resets and returns the first segment in the journal.
+   *
+   * @param index the starting index of the journal
+   * @return the first segment
+   */
+  JournalSegment resetSegments(long index) {
+    assertOpen();
+
+    // If the index already equals the first segment index, skip the reset.
+    JournalSegment firstSegment = getFirstSegment();
+    if (index == firstSegment.firstIndex()) {
+      return firstSegment;
+    }
+
+    for (JournalSegment segment : segments.values()) {
+      segment.close();
+      segment.delete();
+    }
+    segments.clear();
+
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(1)
+        .withIndex(index)
+        .withMaxSegmentSize(maxSegmentSize)
+        .withMaxEntries(maxEntriesPerSegment)
+        .build();
+    currentSegment = createSegment(descriptor);
+    segments.put(index, currentSegment);
+    return currentSegment;
+  }
+
+  /**
+   * Returns the first segment in the log.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  JournalSegment getFirstSegment() {
+    assertOpen();
+    Map.Entry<Long, JournalSegment> segment = segments.firstEntry();
+    return segment != null ? segment.getValue() : null;
+  }
+
+  /**
+   * Returns the last segment in the log.
+   *
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  JournalSegment getLastSegment() {
+    assertOpen();
+    Map.Entry<Long, JournalSegment> segment = segments.lastEntry();
+    return segment != null ? segment.getValue() : null;
+  }
+
+  /**
+   * Creates and returns the next segment.
+   *
+   * @return The next segment.
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  synchronized JournalSegment getNextSegment() {
+    assertOpen();
+    assertDiskSpace();
+
+    JournalSegment lastSegment = getLastSegment();
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(lastSegment != null ? lastSegment.descriptor().id() + 1 : 1)
+        .withIndex(currentSegment.lastIndex() + 1)
+        .withMaxSegmentSize(maxSegmentSize)
+        .withMaxEntries(maxEntriesPerSegment)
+        .build();
+
+    currentSegment = createSegment(descriptor);
+
+    segments.put(descriptor.index(), currentSegment);
+    return currentSegment;
+  }
+
+  /**
+   * Returns the segment following the segment with the given ID.
+   *
+   * @param index The segment index with which to look up the next segment.
+   * @return The next segment for the given index.
+   */
+  JournalSegment getNextSegment(long index) {
+    Map.Entry<Long, JournalSegment> nextSegment = segments.higherEntry(index);
+    return nextSegment != null ? nextSegment.getValue() : null;
+  }
+
+  /**
+   * Returns the segment for the given index.
+   *
+   * @param index The index for which to return the segment.
+   * @throws IllegalStateException if the segment manager is not open
+   */
+  synchronized JournalSegment getSegment(long index) {
+    assertOpen();
+    // Check if the current segment contains the given index first in order to prevent an unnecessary map lookup.
+    if (currentSegment != null && index > currentSegment.firstIndex()) {
+      return currentSegment;
+    }
+
+    // If the index is in another segment, get the entry with the next lowest first index.
+    Map.Entry<Long, JournalSegment> segment = segments.floorEntry(index);
+    if (segment != null) {
+      return segment.getValue();
+    }
+    return getFirstSegment();
+  }
+
+  /**
+   * Removes a segment.
+   *
+   * @param segment The segment to remove.
+   */
+  synchronized void removeSegment(JournalSegment segment) {
+    segments.remove(segment.firstIndex());
+    segment.close();
+    segment.delete();
+    resetCurrentSegment();
+  }
+
+  /**
+   * Creates a new segment.
+   */
+  JournalSegment createSegment(JournalSegmentDescriptor descriptor) {
+    File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, descriptor.id());
+
+    RandomAccessFile raf;
+    FileChannel channel;
+    try {
+      raf = new RandomAccessFile(segmentFile, "rw");
+      raf.setLength(descriptor.maxSegmentSize());
+      channel =  raf.getChannel();
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+
+    ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+    descriptor.copyTo(buffer);
+    buffer.flip();
+    try {
+      channel.write(buffer);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    } finally {
+      try {
+        channel.close();
+        raf.close();
+      } catch (IOException e) {
+      }
+    }
+    JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+    LOG.debug("Created segment: {}", segment);
+    return segment;
+  }
+
+  /**
+   * Creates a new segment instance.
+   *
+   * @param segmentFile The segment file.
+   * @param descriptor The segment descriptor.
+   * @return The segment instance.
+   */
+  protected JournalSegment newSegment(JournalSegmentFile segmentFile, JournalSegmentDescriptor descriptor) {
+    return new JournalSegment(segmentFile, descriptor, storageLevel, maxEntrySize, indexDensity);
+  }
+
+  /**
+   * Loads a segment.
+   */
+  private JournalSegment loadSegment(long segmentId) {
+    File segmentFile = JournalSegmentFile.createSegmentFile(name, directory, segmentId);
+    ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+    try (FileChannel channel = openChannel(segmentFile)) {
+      channel.read(buffer);
+      buffer.flip();
+      JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+      JournalSegment segment = newSegment(new JournalSegmentFile(segmentFile), descriptor);
+      LOG.debug("Loaded disk segment: {} ({})", descriptor.id(), segmentFile.getName());
+      return segment;
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  private FileChannel openChannel(File file) {
+    try {
+      return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+    } catch (IOException e) {
+      throw new StorageException(e);
+    }
+  }
+
+  /**
+   * Loads all segments from disk.
+   *
+   * @return A collection of segments for the log.
+   */
+  protected Collection<JournalSegment> loadSegments() {
+    // Ensure log directories are created.
+    directory.mkdirs();
+
+    TreeMap<Long, JournalSegment> segments = new TreeMap<>();
+
+    // Iterate through all files in the log directory.
+    for (File file : directory.listFiles(File::isFile)) {
+
+      // If the file looks like a segment file, attempt to load the segment.
+      if (JournalSegmentFile.isSegmentFile(name, file)) {
+        JournalSegmentFile segmentFile = new JournalSegmentFile(file);
+        ByteBuffer buffer = ByteBuffer.allocate(JournalSegmentDescriptor.BYTES);
+        try (FileChannel channel = openChannel(file)) {
+          channel.read(buffer);
+          buffer.flip();
+        } catch (IOException e) {
+          throw new StorageException(e);
+        }
+
+        JournalSegmentDescriptor descriptor = new JournalSegmentDescriptor(buffer);
+
+        // Load the segment.
+        JournalSegment segment = loadSegment(descriptor.id());
+
+        // Add the segment to the segments list.
+        LOG.debug("Found segment: {} ({})", segment.descriptor().id(), segmentFile.file().getName());
+        segments.put(segment.firstIndex(), segment);
+      }
+    }
+
+    // Verify that all the segments in the log align with one another.
+    JournalSegment previousSegment = null;
+    boolean corrupted = false;
+    Iterator<Map.Entry<Long, JournalSegment>> iterator = segments.entrySet().iterator();
+    while (iterator.hasNext()) {
+      JournalSegment segment = iterator.next().getValue();
+      if (previousSegment != null && previousSegment.lastIndex() != segment.firstIndex() - 1) {
+        LOG.warn("Journal is inconsistent. {} is not aligned with prior segment {}", segment.file().file(), previousSegment.file().file());
+        corrupted = true;
+      }
+      if (corrupted) {
+        segment.close();
+        segment.delete();
+        iterator.remove();
+      }
+      previousSegment = segment;
+    }
+
+    return segments.values();
+  }
+
+  /**
+   * Resets journal readers to the given head.
+   *
+   * @param index The index at which to reset readers.
+   */
+  void resetHead(long index) {
+    for (SegmentedJournalReader<E> reader : readers) {
+      if (reader.getNextIndex() < index) {
+        reader.reset(index);
+      }
+    }
+  }
+
+  /**
+   * Resets journal readers to the given tail.
+   *
+   * @param index The index at which to reset readers.
+   */
+  void resetTail(long index) {
+    for (SegmentedJournalReader<E> reader : readers) {
+      if (reader.getNextIndex() >= index) {
+        reader.reset(index);
+      }
+    }
+  }
+
+  void closeReader(SegmentedJournalReader<E> reader) {
+    readers.remove(reader);
+  }
+
+  @Override
+  public boolean isOpen() {
+    return open;
+  }
+
+  /**
+   * Returns a boolean indicating whether a segment can be removed from the journal prior to the given index.
+   *
+   * @param index the index from which to remove segments
+   * @return indicates whether a segment can be removed from the journal
+   */
+  public boolean isCompactable(long index) {
+    Map.Entry<Long, JournalSegment> segmentEntry = segments.floorEntry(index);
+    return segmentEntry != null && segments.headMap(segmentEntry.getValue().firstIndex()).size() > 0;
+  }
+
+  /**
+   * Returns the index of the last segment in the log.
+   *
+   * @param index the compaction index
+   * @return the starting index of the last segment in the log
+   */
+  public long getCompactableIndex(long index) {
+    Map.Entry<Long, JournalSegment> segmentEntry = segments.floorEntry(index);
+    return segmentEntry != null ? segmentEntry.getValue().firstIndex() : 0;
+  }
+
+  /**
+   * Compacts the journal up to the given index.
+   * <p>
+   * The semantics of compaction are not specified by this interface.
+   *
+   * @param index The index up to which to compact the journal.
+   */
+  public void compact(long index) {
+    final var segmentEntry = segments.floorEntry(index);
+    if (segmentEntry != null) {
+      final var compactSegments = segments.headMap(segmentEntry.getValue().firstIndex());
+      if (!compactSegments.isEmpty()) {
+        LOG.debug("{} - Compacting {} segment(s)", name, compactSegments.size());
+        for (JournalSegment segment : compactSegments.values()) {
+          LOG.trace("Deleting segment: {}", segment);
+          segment.close();
+          segment.delete();
+        }
+        compactSegments.clear();
+        resetHead(segmentEntry.getValue().firstIndex());
+      }
+    }
+  }
+
+  @Override
+  public void close() {
+    segments.values().forEach(segment -> {
+      LOG.debug("Closing segment: {}", segment);
+      segment.close();
+    });
+    currentSegment = null;
+    open = false;
+  }
+
+  /**
+   * Returns whether {@code flushOnCommit} is enabled for the log.
+   *
+   * @return Indicates whether {@code flushOnCommit} is enabled for the log.
+   */
+  boolean isFlushOnCommit() {
+    return flushOnCommit;
+  }
+
+  /**
+   * Commits entries up to the given index.
+   *
+   * @param index The index up to which to commit entries.
+   */
+  void setCommitIndex(long index) {
+    this.commitIndex = index;
+  }
+
+  /**
+   * Returns the Raft log commit index.
+   *
+   * @return The Raft log commit index.
+   */
+  long getCommitIndex() {
+    return commitIndex;
+  }
+
+  /**
+   * Raft log builder.
+   */
+  public static final class Builder<E> {
+    private static final boolean DEFAULT_FLUSH_ON_COMMIT = false;
+    private static final String DEFAULT_NAME = "atomix";
+    private static final String DEFAULT_DIRECTORY = System.getProperty("user.dir");
+    private static final int DEFAULT_MAX_SEGMENT_SIZE = 1024 * 1024 * 32;
+    private static final int DEFAULT_MAX_ENTRY_SIZE = 1024 * 1024;
+    private static final int DEFAULT_MAX_ENTRIES_PER_SEGMENT = 1024 * 1024;
+    private static final double DEFAULT_INDEX_DENSITY = .005;
+
+    private String name = DEFAULT_NAME;
+    private StorageLevel storageLevel = StorageLevel.DISK;
+    private File directory = new File(DEFAULT_DIRECTORY);
+    private JournalSerdes namespace;
+    private int maxSegmentSize = DEFAULT_MAX_SEGMENT_SIZE;
+    private int maxEntrySize = DEFAULT_MAX_ENTRY_SIZE;
+    private int maxEntriesPerSegment = DEFAULT_MAX_ENTRIES_PER_SEGMENT;
+    private double indexDensity = DEFAULT_INDEX_DENSITY;
+    private boolean flushOnCommit = DEFAULT_FLUSH_ON_COMMIT;
+
+    protected Builder() {
+    }
+
+    /**
+     * Sets the storage name.
+     *
+     * @param name The storage name.
+     * @return The storage builder.
+     */
+    public Builder<E> withName(String name) {
+      this.name = requireNonNull(name, "name cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the log storage level, returning the builder for method chaining.
+     * <p>
+     * The storage level indicates how individual entries should be persisted in the journal.
+     *
+     * @param storageLevel The log storage level.
+     * @return The storage builder.
+     */
+    public Builder<E> withStorageLevel(StorageLevel storageLevel) {
+      this.storageLevel = requireNonNull(storageLevel, "storageLevel cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the log directory, returning the builder for method chaining.
+     * <p>
+     * The log will write segment files into the provided directory.
+     *
+     * @param directory The log directory.
+     * @return The storage builder.
+     * @throws NullPointerException If the {@code directory} is {@code null}
+     */
+    public Builder<E> withDirectory(String directory) {
+      return withDirectory(new File(requireNonNull(directory, "directory cannot be null")));
+    }
+
+    /**
+     * Sets the log directory, returning the builder for method chaining.
+     * <p>
+     * The log will write segment files into the provided directory.
+     *
+     * @param directory The log directory.
+     * @return The storage builder.
+     * @throws NullPointerException If the {@code directory} is {@code null}
+     */
+    public Builder<E> withDirectory(File directory) {
+      this.directory = requireNonNull(directory, "directory cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the journal namespace, returning the builder for method chaining.
+     *
+     * @param namespace The journal serializer.
+     * @return The journal builder.
+     */
+    public Builder<E> withNamespace(JournalSerdes namespace) {
+      this.namespace = requireNonNull(namespace, "namespace cannot be null");
+      return this;
+    }
+
+    /**
+     * Sets the maximum segment size in bytes, returning the builder for method chaining.
+     * <p>
+     * The maximum segment size dictates when logs should roll over to new segments. As entries are written to a segment
+     * of the log, once the size of the segment surpasses the configured maximum segment size, the log will create a new
+     * segment and append new entries to that segment.
+     * <p>
+     * By default, the maximum segment size is {@code 1024 * 1024 * 32}.
+     *
+     * @param maxSegmentSize The maximum segment size in bytes.
+     * @return The storage builder.
+     * @throws IllegalArgumentException If the {@code maxSegmentSize} is not positive
+     */
+    public Builder<E> withMaxSegmentSize(int maxSegmentSize) {
+      checkArgument(maxSegmentSize > JournalSegmentDescriptor.BYTES, "maxSegmentSize must be greater than " + JournalSegmentDescriptor.BYTES);
+      this.maxSegmentSize = maxSegmentSize;
+      return this;
+    }
+
+    /**
+     * Sets the maximum entry size in bytes, returning the builder for method chaining.
+     *
+     * @param maxEntrySize the maximum entry size in bytes
+     * @return the storage builder
+     * @throws IllegalArgumentException if the {@code maxEntrySize} is not positive
+     */
+    public Builder<E> withMaxEntrySize(int maxEntrySize) {
+      checkArgument(maxEntrySize > 0, "maxEntrySize must be positive");
+      this.maxEntrySize = maxEntrySize;
+      return this;
+    }
+
+    /**
+     * Sets the maximum number of allows entries per segment, returning the builder for method chaining.
+     * <p>
+     * The maximum entry count dictates when logs should roll over to new segments. As entries are written to a segment
+     * of the log, if the entry count in that segment meets the configured maximum entry count, the log will create a
+     * new segment and append new entries to that segment.
+     * <p>
+     * By default, the maximum entries per segment is {@code 1024 * 1024}.
+     *
+     * @param maxEntriesPerSegment The maximum number of entries allowed per segment.
+     * @return The storage builder.
+     * @throws IllegalArgumentException If the {@code maxEntriesPerSegment} not greater than the default max entries
+     *     per segment
+     * @deprecated since 3.0.2
+     */
+    @Deprecated
+    public Builder<E> withMaxEntriesPerSegment(int maxEntriesPerSegment) {
+      checkArgument(maxEntriesPerSegment > 0, "max entries per segment must be positive");
+      checkArgument(maxEntriesPerSegment <= DEFAULT_MAX_ENTRIES_PER_SEGMENT,
+          "max entries per segment cannot be greater than " + DEFAULT_MAX_ENTRIES_PER_SEGMENT);
+      this.maxEntriesPerSegment = maxEntriesPerSegment;
+      return this;
+    }
+
+    /**
+     * Sets the journal index density.
+     * <p>
+     * The index density is the frequency at which the position of entries written to the journal will be recorded in an
+     * in-memory index for faster seeking.
+     *
+     * @param indexDensity the index density
+     * @return the journal builder
+     * @throws IllegalArgumentException if the density is not between 0 and 1
+     */
+    public Builder<E> withIndexDensity(double indexDensity) {
+      checkArgument(indexDensity > 0 && indexDensity < 1, "index density must be between 0 and 1");
+      this.indexDensity = indexDensity;
+      return this;
+    }
+
+    /**
+     * Enables flushing buffers to disk when entries are committed to a segment, returning the builder for method
+     * chaining.
+     * <p>
+     * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+     * committed in a given segment.
+     *
+     * @return The storage builder.
+     */
+    public Builder<E> withFlushOnCommit() {
+      return withFlushOnCommit(true);
+    }
+
+    /**
+     * Sets whether to flush buffers to disk when entries are committed to a segment, returning the builder for method
+     * chaining.
+     * <p>
+     * When flush-on-commit is enabled, log entry buffers will be automatically flushed to disk each time an entry is
+     * committed in a given segment.
+     *
+     * @param flushOnCommit Whether to flush buffers to disk when entries are committed to a segment.
+     * @return The storage builder.
+     */
+    public Builder<E> withFlushOnCommit(boolean flushOnCommit) {
+      this.flushOnCommit = flushOnCommit;
+      return this;
+    }
+
+    /**
+     * Build the {@link SegmentedJournal}.
+     *
+     * @return A new {@link SegmentedJournal}.
+     */
+    public SegmentedJournal<E> build() {
+      return new SegmentedJournal<>(
+          name,
+          storageLevel,
+          directory,
+          namespace,
+          maxSegmentSize,
+          maxEntrySize,
+          maxEntriesPerSegment,
+          indexDensity,
+          flushOnCommit);
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalReader.java
new file mode 100644 (file)
index 0000000..42f40e0
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A {@link JournalReader} traversing all entries.
+ */
+sealed class SegmentedJournalReader<E> implements JournalReader<E> permits CommitsSegmentJournalReader {
+    final SegmentedJournal<E> journal;
+
+    private JournalSegment currentSegment;
+    private JournalSegmentReader currentReader;
+    private Indexed<E> currentEntry;
+    private long nextIndex;
+
+    SegmentedJournalReader(final SegmentedJournal<E> journal, final JournalSegment segment) {
+        this.journal = requireNonNull(journal);
+        currentSegment = requireNonNull(segment);
+        currentReader = segment.createReader();
+        nextIndex = currentSegment.firstIndex();
+        currentEntry = null;
+    }
+
+    @Override
+    public final long getFirstIndex() {
+        return journal.getFirstSegment().firstIndex();
+    }
+
+    @Override
+    public final Indexed<E> getCurrentEntry() {
+        return currentEntry;
+    }
+
+    @Override
+    public final long getNextIndex() {
+        return nextIndex;
+    }
+
+    @Override
+    public final void reset() {
+        currentReader.close();
+
+        currentSegment = journal.getFirstSegment();
+        currentReader = currentSegment.createReader();
+        nextIndex = currentSegment.firstIndex();
+        currentEntry = null;
+    }
+
+    @Override
+    public final void reset(final long index) {
+        // If the current segment is not open, it has been replaced. Reset the segments.
+        if (!currentSegment.isOpen()) {
+            reset();
+        }
+
+        if (index < nextIndex) {
+            rewind(index);
+        } else if (index > nextIndex) {
+            while (index > nextIndex && tryNext() != null) {
+                // Nothing else
+            }
+        } else {
+            resetCurrentReader(index);
+        }
+    }
+
+    private void resetCurrentReader(final long index) {
+        final var position = currentSegment.lookup(index - 1);
+        if (position != null) {
+            nextIndex = position.index();
+            currentReader.setPosition(position.position());
+        } else {
+            nextIndex = currentSegment.firstIndex();
+            currentReader.setPosition(JournalSegmentDescriptor.BYTES);
+        }
+        while (nextIndex < index && tryNext() != null) {
+            // Nothing else
+        }
+    }
+
+    /**
+     * Rewinds the journal to the given index.
+     */
+    private void rewind(final long index) {
+        if (currentSegment.firstIndex() >= index) {
+            JournalSegment segment = journal.getSegment(index - 1);
+            if (segment != null) {
+                currentReader.close();
+
+                currentSegment = segment;
+                currentReader = currentSegment.createReader();
+            }
+        }
+
+        resetCurrentReader(index);
+    }
+
+    @Override
+    public Indexed<E> tryNext() {
+        var buf = currentReader.readBytes(nextIndex);
+        if (buf == null) {
+            final var nextSegment = journal.getNextSegment(currentSegment.firstIndex());
+            if (nextSegment == null || nextSegment.firstIndex() != nextIndex) {
+                return null;
+            }
+
+            currentReader.close();
+
+            currentSegment = nextSegment;
+            currentReader = currentSegment.createReader();
+            buf = currentReader.readBytes(nextIndex);
+            if (buf == null) {
+                return null;
+            }
+        }
+
+        final var entry = journal.serializer().deserialize(buf);
+        currentEntry = new Indexed<>(nextIndex++, entry, buf.readableBytes());
+        return currentEntry;
+    }
+
+    @Override
+    public final void close() {
+        currentReader.close();
+        journal.closeReader(this);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java b/atomix-storage/src/main/java/io/atomix/storage/journal/SegmentedJournalWriter.java
new file mode 100644 (file)
index 0000000..9ff5352
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+/**
+ * Raft log writer.
+ */
+final class SegmentedJournalWriter<E> implements JournalWriter<E> {
+  private final SegmentedJournal<E> journal;
+  private JournalSegment currentSegment;
+  private JournalSegmentWriter currentWriter;
+
+  SegmentedJournalWriter(SegmentedJournal<E> journal) {
+    this.journal = journal;
+    this.currentSegment = journal.getLastSegment();
+    this.currentWriter = currentSegment.acquireWriter();
+  }
+
+  @Override
+  public long getLastIndex() {
+    return currentWriter.getLastIndex();
+  }
+
+  @Override
+  public Indexed<E> getLastEntry() {
+    final var lastWritten = currentWriter.getLastWritten();
+    if (lastWritten == null) {
+      return null;
+    }
+    final E deserialized = journal.serializer().deserialize(lastWritten);
+    return new Indexed<>(currentWriter.getLastIndex(), deserialized, lastWritten.readableBytes()) ;
+  }
+
+  @Override
+  public long getNextIndex() {
+    return currentWriter.getNextIndex();
+  }
+
+  @Override
+  public void reset(long index) {
+    if (index > currentSegment.firstIndex()) {
+      currentSegment.releaseWriter();
+      currentSegment = journal.resetSegments(index);
+      currentWriter = currentSegment.acquireWriter();
+    } else {
+      truncate(index - 1);
+    }
+    journal.resetHead(index);
+  }
+
+  @Override
+  public void commit(long index) {
+    if (index > journal.getCommitIndex()) {
+      journal.setCommitIndex(index);
+      if (journal.isFlushOnCommit()) {
+        flush();
+      }
+    }
+  }
+
+  @Override
+  public <T extends E> Indexed<T> append(T entry) {
+    final var bytes = journal.serializer().serialize(entry);
+    var index = currentWriter.append(bytes);
+    if (index != null) {
+      return new Indexed<>(index, entry, bytes.readableBytes());
+    }
+
+    //  Slow path: we do not have enough capacity
+    currentWriter.flush();
+    currentSegment.releaseWriter();
+    currentSegment = journal.getNextSegment();
+    currentWriter = currentSegment.acquireWriter();
+    final var newIndex = verifyNotNull(currentWriter.append(bytes));
+    return new Indexed<>(newIndex, entry, bytes.readableBytes());
+  }
+
+  @Override
+  public void truncate(long index) {
+    if (index < journal.getCommitIndex()) {
+      throw new IndexOutOfBoundsException("Cannot truncate committed index: " + index);
+    }
+
+    // Delete all segments with first indexes greater than the given index.
+    while (index < currentSegment.firstIndex() && currentSegment != journal.getFirstSegment()) {
+      currentSegment.releaseWriter();
+      journal.removeSegment(currentSegment);
+      currentSegment = journal.getLastSegment();
+      currentWriter = currentSegment.acquireWriter();
+    }
+
+    // Truncate the current index.
+    currentWriter.truncate(index);
+
+    // Reset segment readers.
+    journal.resetTail(index + 1);
+  }
+
+  @Override
+  public void flush() {
+    currentWriter.flush();
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageException.java
new file mode 100644 (file)
index 0000000..0a220ec
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Log exception.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class StorageException extends RuntimeException {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    public StorageException() {
+    }
+
+    public StorageException(final String message) {
+        super(message);
+    }
+
+    public StorageException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+
+    public StorageException(final Throwable cause) {
+        super(cause);
+    }
+
+    /**
+     * Exception thrown when an entry being stored is too large.
+     */
+    public static class TooLarge extends StorageException {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        public TooLarge(final String message) {
+            super(message);
+        }
+
+        public TooLarge(final String message, final Throwable cause) {
+            super(message, cause);
+        }
+    }
+
+    /**
+     * Exception thrown when storage runs out of disk space.
+     */
+    public static class OutOfDiskSpace extends StorageException {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        public OutOfDiskSpace(final String message) {
+            super(message);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java b/atomix-storage/src/main/java/io/atomix/storage/journal/StorageLevel.java
new file mode 100644 (file)
index 0000000..e76a989
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2015-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Storage level configuration values which control how logs are stored on disk or in memory.
+ */
+public enum StorageLevel {
+    /**
+     * Stores data in a memory-mapped file.
+     */
+    MAPPED,
+    /**
+     * Stores data on disk.
+     */
+    DISK
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/JournalIndex.java
new file mode 100644 (file)
index 0000000..8608e00
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Index of a particular JournalSegment.
+ */
+public interface JournalIndex {
+    /**
+     * Adds an entry for the given index at the given position.
+     *
+     * @param index the index for which to add the entry
+     * @param position the position of the given index
+     */
+    void index(long index, int position);
+
+    /**
+     * Looks up the position of the given index.
+     *
+     * @param index the index to lookup
+     * @return the position of the given index or a lesser index, or {@code null}
+     */
+    @Nullable Position lookup(long index);
+
+    /**
+     * Truncates the index to the given index and returns its position, if available.
+     *
+     * @param index the index to which to truncate the index, or {@code null}
+     * @return the position of the given index or a lesser index, or {@code null}
+     */
+    @Nullable Position truncate(long index);
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/Position.java
new file mode 100644 (file)
index 0000000..640a8e8
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2018-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.Nullable;
+
+/**
+ * Journal index position.
+ */
+public record Position(long index, int position) {
+    public Position(final Entry<Long, Integer> entry) {
+        this(entry.getKey(), entry.getValue());
+    }
+
+    public static @Nullable Position ofNullable(final Entry<Long, Integer> entry) {
+        return entry == null ? null : new Position(entry);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/SparseJournalIndex.java
new file mode 100644 (file)
index 0000000..2b31736
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import java.util.TreeMap;
+
+/**
+ * A {@link JournalIndex} maintaining target density.
+ */
+public final class SparseJournalIndex implements JournalIndex {
+    private static final int MIN_DENSITY = 1000;
+
+    private final int density;
+    private final TreeMap<Long, Integer> positions = new TreeMap<>();
+
+    public SparseJournalIndex() {
+        density = MIN_DENSITY;
+    }
+
+    public SparseJournalIndex(final double density) {
+        this.density = (int) Math.ceil(MIN_DENSITY / (density * MIN_DENSITY));
+    }
+
+    @Override
+    public void index(final long index, final int position) {
+        if (index % density == 0) {
+            positions.put(index, position);
+        }
+    }
+
+    @Override
+    public Position lookup(final long index) {
+        return Position.ofNullable(positions.floorEntry(index));
+    }
+
+    @Override
+    public Position truncate(final long index) {
+        positions.tailMap(index, false).clear();
+        return Position.ofNullable(positions.lastEntry());
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/index/package-info.java
new file mode 100644 (file)
index 0000000..c17cabe
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for efficiently managing journal indexes.
+ */
+package io.atomix.storage.journal.index;
diff --git a/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java b/atomix-storage/src/main/java/io/atomix/storage/journal/package-info.java
new file mode 100644 (file)
index 0000000..7cabd15
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides a low-level journal abstraction for appending to logs and managing segmented logs.
+ */
+package io.atomix.storage.journal;
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStream.java
new file mode 100644 (file)
index 0000000..94fc322
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.io.ByteArrayOutputStream;
+
+/**
+ * Exposes protected byte array length in {@link ByteArrayOutputStream}.
+ */
+final class BufferAwareByteArrayOutputStream extends ByteArrayOutputStream {
+
+  BufferAwareByteArrayOutputStream(int size) {
+    super(size);
+  }
+
+  int getBufferSize() {
+    return buf.length;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/ByteArrayOutput.java
new file mode 100644 (file)
index 0000000..6df25b5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+
+/**
+ * Convenience class to avoid extra object allocation and casting.
+ */
+final class ByteArrayOutput extends Output {
+
+  private final BufferAwareByteArrayOutputStream stream;
+
+  ByteArrayOutput(final int bufferSize, final int maxBufferSize, final BufferAwareByteArrayOutputStream stream) {
+    super(bufferSize, maxBufferSize);
+    super.setOutputStream(stream);
+    this.stream = stream;
+  }
+
+  BufferAwareByteArrayOutputStream getByteArrayOutputStream() {
+    return stream;
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/EntrySerializer.java
new file mode 100644 (file)
index 0000000..0508f1e
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class EntrySerializer<T> extends Serializer<T> {
+    // Note: uses identity to create things in Kryo, hence we want an instance for every serdes we wrap
+    private final JavaSerializer javaSerializer = new JavaSerializer();
+    private final EntrySerdes<T> serdes;
+
+    EntrySerializer(final EntrySerdes<T> serdes) {
+        this.serdes = requireNonNull(serdes);
+    }
+
+    @Override
+    public T read(final Kryo kryo, final Input input, final Class<T> type) {
+        try {
+            return serdes.read(new KryoEntryInput(kryo, input, javaSerializer));
+        } catch (IOException e) {
+            throw new KryoException(e);
+        }
+    }
+
+    @Override
+    public void write(final Kryo kryo, final Output output, final T object) {
+        try {
+            serdes.write(new KryoEntryOutput(kryo, output, javaSerializer), object);
+        } catch (IOException e) {
+            throw new KryoException(e);
+        }
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).addValue(serdes).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/Kryo505ByteBufferInput.java
new file mode 100644 (file)
index 0000000..ed66011
--- /dev/null
@@ -0,0 +1,243 @@
+/* Copyright (c) 2008, Nathan Sweet
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following
+ * conditions are met:
+ * 
+ * - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided with the distribution.
+ * - Neither the name of Esoteric Software nor the names of its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import java.nio.ByteBuffer;
+
+/**
+ * A Kryo-4.0.3 ByteBufferInput adapted to deal with
+ * <a href="https://github.com/EsotericSoftware/kryo/issues/505">issue 505</a>.
+ *
+ * @author Roman Levenstein &lt;romixlev@gmail.com&gt;
+ * @author Robert Varga
+ */
+public final class Kryo505ByteBufferInput extends ByteBufferInput {
+       Kryo505ByteBufferInput (ByteBuffer buffer) {
+               super(buffer);
+       }
+
+       @Override
+       public String readString () {
+               niobuffer.position(position);
+               int available = require(1);
+               position++;
+               int b = niobuffer.get();
+               if ((b & 0x80) == 0) return readAscii(); // ASCII.
+               // Null, empty, or UTF8.
+               int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+               switch (charCount) {
+               case 0:
+                       return null;
+               case 1:
+                       return "";
+               }
+               charCount--;
+               if (chars.length < charCount) chars = new char[charCount];
+               readUtf8(charCount);
+               return new String(chars, 0, charCount);
+       }
+
+       private int readUtf8Length (int b) {
+               int result = b & 0x3F; // Mask all but first 6 bits.
+               if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+                       position++;
+                       b = niobuffer.get();
+                       result |= (b & 0x7F) << 6;
+                       if ((b & 0x80) != 0) {
+                               position++;
+                               b = niobuffer.get();
+                               result |= (b & 0x7F) << 13;
+                               if ((b & 0x80) != 0) {
+                                       position++;
+                                       b = niobuffer.get();
+                                       result |= (b & 0x7F) << 20;
+                                       if ((b & 0x80) != 0) {
+                                               position++;
+                                               b = niobuffer.get();
+                                               result |= (b & 0x7F) << 27;
+                                       }
+                               }
+                       }
+               }
+               return result;
+       }
+
+       private int readUtf8Length_slow (int b) {
+               int result = b & 0x3F; // Mask all but first 6 bits.
+               if ((b & 0x40) != 0) { // Bit 7 means another byte, bit 8 means UTF8.
+                       require(1);
+                       position++;
+                       b = niobuffer.get();
+                       result |= (b & 0x7F) << 6;
+                       if ((b & 0x80) != 0) {
+                               require(1);
+                               position++;
+                               b = niobuffer.get();
+                               result |= (b & 0x7F) << 13;
+                               if ((b & 0x80) != 0) {
+                                       require(1);
+                                       position++;
+                                       b = niobuffer.get();
+                                       result |= (b & 0x7F) << 20;
+                                       if ((b & 0x80) != 0) {
+                                               require(1);
+                                               position++;
+                                               b = niobuffer.get();
+                                               result |= (b & 0x7F) << 27;
+                                       }
+                               }
+                       }
+               }
+               return result;
+       }
+
+       private void readUtf8 (int charCount) {
+               char[] chars = this.chars;
+               // Try to read 7 bit ASCII chars.
+               int charIndex = 0;
+               int count = Math.min(require(1), charCount);
+               int position = this.position;
+               int b;
+               while (charIndex < count) {
+                       position++;
+                       b = niobuffer.get();
+                       if (b < 0) {
+                               position--;
+                               break;
+                       }
+                       chars[charIndex++] = (char)b;
+               }
+               this.position = position;
+               // If buffer didn't hold all chars or any were not ASCII, use slow path for remainder.
+               if (charIndex < charCount) {
+                       niobuffer.position(position);
+                       readUtf8_slow(charCount, charIndex);
+               }
+       }
+
+       private void readUtf8_slow (int charCount, int charIndex) {
+               char[] chars = this.chars;
+               while (charIndex < charCount) {
+                       if (position == limit) require(1);
+                       position++;
+                       int b = niobuffer.get() & 0xFF;
+                       switch (b >> 4) {
+                       case 0:
+                       case 1:
+                       case 2:
+                       case 3:
+                       case 4:
+                       case 5:
+                       case 6:
+                       case 7:
+                               chars[charIndex] = (char)b;
+                               break;
+                       case 12:
+                       case 13:
+                               if (position == limit) require(1);
+                               position++;
+                               chars[charIndex] = (char)((b & 0x1F) << 6 | niobuffer.get() & 0x3F);
+                               break;
+                       case 14:
+                               require(2);
+                               position += 2;
+                               int b2 = niobuffer.get();
+                               int b3 = niobuffer.get();
+                               chars[charIndex] = (char)((b & 0x0F) << 12 | (b2 & 0x3F) << 6 | b3 & 0x3F);
+                               break;
+                       }
+                       charIndex++;
+               }
+       }
+
+       private String readAscii () {
+               int end = position;
+               int start = end - 1;
+               int limit = this.limit;
+               int b;
+               do {
+                       if (end == limit) return readAscii_slow();
+                       end++;
+                       b = niobuffer.get();
+               } while ((b & 0x80) == 0);
+               int count = end - start;
+               byte[] tmp = new byte[count];
+               niobuffer.position(start);
+               niobuffer.get(tmp);
+               tmp[count - 1] &= 0x7F;  // Mask end of ascii bit.
+               String value = new String(tmp, 0, 0, count);
+               position = end;
+               niobuffer.position(position);
+               return value;
+       }
+
+       private String readAscii_slow () {
+               position--; // Re-read the first byte.
+               // Copy chars currently in buffer.
+               int charCount = limit - position;
+               if (charCount > chars.length) chars = new char[charCount * 2];
+               char[] chars = this.chars;
+               for (int i = position, ii = 0, n = limit; i < n; i++, ii++)
+                       chars[ii] = (char)niobuffer.get(i);
+               position = limit;
+               // Copy additional chars one by one.
+               while (true) {
+                       require(1);
+                       position++;
+                       int b = niobuffer.get();
+                       if (charCount == chars.length) {
+                               char[] newChars = new char[charCount * 2];
+                               System.arraycopy(chars, 0, newChars, 0, charCount);
+                               chars = newChars;
+                               this.chars = newChars;
+                       }
+                       if ((b & 0x80) == 0x80) {
+                               chars[charCount++] = (char)(b & 0x7F);
+                               break;
+                       }
+                       chars[charCount++] = (char)b;
+               }
+               return new String(chars, 0, charCount);
+       }
+
+       @Override
+       public StringBuilder readStringBuilder () {
+               niobuffer.position(position);
+               int available = require(1);
+               position++;
+               int b = niobuffer.get();
+               if ((b & 0x80) == 0) return new StringBuilder(readAscii()); // ASCII.
+               // Null, empty, or UTF8.
+               int charCount = available >= 5 ? readUtf8Length(b) : readUtf8Length_slow(b);
+               switch (charCount) {
+               case 0:
+                       return null;
+               case 1:
+                       return new StringBuilder("");
+               }
+               charCount--;
+               if (chars.length < charCount) chars = new char[charCount];
+               readUtf8(charCount);
+               StringBuilder builder = new StringBuilder(charCount);
+               builder.append(chars, 0, charCount);
+               return builder;
+       }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryInput.java
new file mode 100644 (file)
index 0000000..2a98f16
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import java.io.IOException;
+
+final class KryoEntryInput implements EntryInput {
+    private final Kryo kryo;
+    private final Input input;
+    private final JavaSerializer javaSerializer;
+
+    KryoEntryInput(final Kryo kryo, final Input input, final JavaSerializer javaSerializer) {
+        this.kryo = requireNonNull(kryo);
+        this.input = requireNonNull(input);
+        this.javaSerializer = requireNonNull(javaSerializer);
+    }
+
+    @Override
+    public byte[] readBytes(final int length) throws IOException {
+        try {
+            return input.readBytes(length);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public long readLong() throws IOException {
+        try {
+            return input.readLong(false);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public Object readObject() throws IOException {
+        try {
+            return javaSerializer.read(kryo, input, null);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public String readString() throws IOException {
+        try {
+            return input.readString();
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public int readVarInt() throws IOException {
+        try {
+            return input.readVarInt(true);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoEntryOutput.java
new file mode 100644 (file)
index 0000000..90886dd
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.KryoException;
+import com.esotericsoftware.kryo.io.Output;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import java.io.IOException;
+
+final class KryoEntryOutput implements EntryOutput {
+    private final Kryo kryo;
+    private final Output output;
+    private final JavaSerializer javaSerializer;
+
+    KryoEntryOutput(final Kryo kryo, final Output output, final JavaSerializer javaSerializer) {
+        this.kryo = requireNonNull(kryo);
+        this.output = requireNonNull(output);
+        this.javaSerializer = requireNonNull(javaSerializer);
+    }
+
+    @Override
+    public void writeBytes(final byte[] bytes) throws IOException {
+        try {
+            output.writeBytes(bytes);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeLong(final long value) throws IOException {
+        try {
+            output.writeLong(value, false);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeObject(final Object value) throws IOException {
+        try {
+            javaSerializer.write(kryo, output, value);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeString(final String value) throws IOException {
+        try {
+            output.writeString(value);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+
+    @Override
+    public void writeVarInt(final int value) throws IOException {
+        try {
+            output.writeVarInt(value, true);
+        } catch (KryoException e) {
+            throw new IOException(e);
+        }
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoIOPool.java
new file mode 100644 (file)
index 0000000..6324631
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import java.lang.ref.SoftReference;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.function.Function;
+
+abstract class KryoIOPool<T> {
+
+  private final ConcurrentLinkedQueue<SoftReference<T>> queue = new ConcurrentLinkedQueue<>();
+
+  private T borrow(final int bufferSize) {
+    T element;
+    SoftReference<T> reference;
+    while ((reference = queue.poll()) != null) {
+      if ((element = reference.get()) != null) {
+        return element;
+      }
+    }
+    return create(bufferSize);
+  }
+
+  protected abstract T create(final int bufferSize);
+
+  protected abstract boolean recycle(final T element);
+
+  <R> R run(final Function<T, R> function, final int bufferSize) {
+    final T element = borrow(bufferSize);
+    try {
+      return function.apply(element);
+    } finally {
+      if (recycle(element)) {
+        queue.offer(new SoftReference<>(element));
+      }
+    }
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoInputPool.java
new file mode 100644 (file)
index 0000000..0eeb8df
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+
+class KryoInputPool extends KryoIOPool<Input> {
+
+  static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+  @Override
+  protected Input create(int bufferSize) {
+    return new Input(bufferSize);
+  }
+
+  @Override
+  protected boolean recycle(Input input) {
+    if (input.getBuffer().length < MAX_POOLED_BUFFER_SIZE) {
+      input.setInputStream(null);
+      return true;
+    }
+    return false; // discard
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdes.java
new file mode 100644 (file)
index 0000000..64f3538
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Registration;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.ByteBufferInput;
+import com.esotericsoftware.kryo.io.ByteBufferOutput;
+import com.esotericsoftware.kryo.pool.KryoCallback;
+import com.esotericsoftware.kryo.pool.KryoFactory;
+import com.esotericsoftware.kryo.pool.KryoPool;
+import com.google.common.base.MoreObjects;
+import io.atomix.storage.journal.JournalSerdes;
+import java.io.ByteArrayInputStream;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.List;
+import org.objenesis.strategy.StdInstantiatorStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Pool of Kryo instances, with classes pre-registered.
+ */
+final class KryoJournalSerdes implements JournalSerdes, KryoFactory, KryoPool {
+    /**
+     * Default buffer size used for serialization.
+     *
+     * @see #serialize(Object)
+     */
+    private static final int DEFAULT_BUFFER_SIZE = 4096;
+
+    /**
+     * Smallest ID free to use for user defined registrations.
+     */
+    private static final int INITIAL_ID = 16;
+
+    static final String NO_NAME = "(no name)";
+
+    private static final Logger LOGGER = LoggerFactory.getLogger(KryoJournalSerdes.class);
+
+    private final KryoPool kryoPool = new KryoPool.Builder(this).softReferences().build();
+
+    private final KryoOutputPool kryoOutputPool = new KryoOutputPool();
+    private final KryoInputPool kryoInputPool = new KryoInputPool();
+
+    private final List<RegisteredType> registeredTypes;
+    private final ClassLoader classLoader;
+    private final String friendlyName;
+
+    /**
+     * Creates a Kryo instance pool.
+     *
+     * @param registeredTypes      types to register
+     * @param registrationRequired whether registration is required
+     * @param friendlyName         friendly name for the namespace
+     */
+    KryoJournalSerdes(
+            final List<RegisteredType> registeredTypes,
+            final ClassLoader classLoader,
+            final String friendlyName) {
+        this.registeredTypes = List.copyOf(registeredTypes);
+        this.classLoader = requireNonNull(classLoader);
+        this.friendlyName = requireNonNull(friendlyName);
+
+        // Pre-populate with a single instance
+        release(create());
+    }
+
+    @Override
+    public byte[] serialize(final Object obj) {
+        return serialize(obj, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public byte[] serialize(final Object obj, final int bufferSize) {
+        return kryoOutputPool.run(output -> kryoPool.run(kryo -> {
+            kryo.writeClassAndObject(output, obj);
+            output.flush();
+            return output.getByteArrayOutputStream().toByteArray();
+        }), bufferSize);
+    }
+
+    @Override
+    public void serialize(final Object obj, final ByteBuffer buffer) {
+        ByteBufferOutput out = new ByteBufferOutput(buffer);
+        Kryo kryo = borrow();
+        try {
+            kryo.writeClassAndObject(out, obj);
+            out.flush();
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public void serialize(final Object obj, final OutputStream stream) {
+        serialize(obj, stream, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public void serialize(final Object obj, final OutputStream stream, final int bufferSize) {
+        ByteBufferOutput out = new ByteBufferOutput(stream, bufferSize);
+        Kryo kryo = borrow();
+        try {
+            kryo.writeClassAndObject(out, obj);
+            out.flush();
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public <T> T deserialize(final byte[] bytes) {
+        return kryoInputPool.run(input -> {
+            input.setInputStream(new ByteArrayInputStream(bytes));
+            return kryoPool.run(kryo -> {
+                @SuppressWarnings("unchecked")
+                T obj = (T) kryo.readClassAndObject(input);
+                return obj;
+            });
+        }, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public <T> T deserialize(final ByteBuffer buffer) {
+        Kryo kryo = borrow();
+        try {
+            @SuppressWarnings("unchecked")
+            T obj = (T) kryo.readClassAndObject(new Kryo505ByteBufferInput(buffer));
+            return obj;
+        } finally {
+            release(kryo);
+        }
+    }
+
+    @Override
+    public <T> T deserialize(final InputStream stream) {
+        return deserialize(stream, DEFAULT_BUFFER_SIZE);
+    }
+
+    @Override
+    public <T> T deserialize(final InputStream stream, final int bufferSize) {
+        Kryo kryo = borrow();
+        try {
+            @SuppressWarnings("unchecked")
+            T obj = (T) kryo.readClassAndObject(new ByteBufferInput(stream, bufferSize));
+            return obj;
+        } finally {
+            release(kryo);
+        }
+    }
+
+    /**
+     * Creates a Kryo instance.
+     *
+     * @return Kryo instance
+     */
+    @Override
+    public Kryo create() {
+        LOGGER.trace("Creating Kryo instance for {}", this);
+        Kryo kryo = new Kryo();
+        kryo.setClassLoader(classLoader);
+        kryo.setRegistrationRequired(true);
+
+        // TODO rethink whether we want to use StdInstantiatorStrategy
+        kryo.setInstantiatorStrategy(
+            new Kryo.DefaultInstantiatorStrategy(new StdInstantiatorStrategy()));
+
+        int id = INITIAL_ID;
+        for (RegisteredType registeredType : registeredTypes) {
+            register(kryo, registeredType.types(), registeredType.serializer(), id++);
+        }
+        return kryo;
+    }
+
+    /**
+     * Register {@code type} and {@code serializer} to {@code kryo} instance.
+     *
+     * @param kryo       Kryo instance
+     * @param types      types to register
+     * @param serializer Specific serializer to register or null to use default.
+     * @param id         type registration id to use
+     */
+    private void register(final Kryo kryo, final Class<?>[] types, final Serializer<?> serializer, final int id) {
+        Registration existing = kryo.getRegistration(id);
+        if (existing != null) {
+            boolean matches = false;
+            for (Class<?> type : types) {
+                if (existing.getType() == type) {
+                    matches = true;
+                    break;
+                }
+            }
+
+            if (!matches) {
+                LOGGER.error("{}: Failed to register {} as {}, {} was already registered.",
+                    friendlyName, types, id, existing.getType());
+
+                throw new IllegalStateException(String.format(
+                    "Failed to register %s as %s, %s was already registered.",
+                    Arrays.toString(types), id, existing.getType()));
+            }
+            // falling through to register call for now.
+            // Consider skipping, if there's reasonable
+            // way to compare serializer equivalence.
+        }
+
+        for (Class<?> type : types) {
+            Registration r = null;
+            if (serializer == null) {
+                r = kryo.register(type, id);
+            } else if (type.isInterface()) {
+                kryo.addDefaultSerializer(type, serializer);
+            } else {
+                r = kryo.register(type, serializer, id);
+            }
+            if (r != null) {
+                if (r.getId() != id) {
+                    LOGGER.debug("{}: {} already registered as {}. Skipping {}.",
+                        friendlyName, r.getType(), r.getId(), id);
+                }
+                LOGGER.trace("{} registered as {}", r.getType(), r.getId());
+            }
+        }
+    }
+
+    @Override
+    public Kryo borrow() {
+        return kryoPool.borrow();
+    }
+
+    @Override
+    public void release(final Kryo kryo) {
+        kryoPool.release(kryo);
+    }
+
+    @Override
+    public <T> T run(final KryoCallback<T> callback) {
+        return kryoPool.run(callback);
+    }
+
+    @Override
+    public String toString() {
+        if (!NO_NAME.equals(friendlyName)) {
+            return MoreObjects.toStringHelper(getClass())
+                .omitNullValues()
+                .add("friendlyName", friendlyName)
+                // omit lengthy detail, when there's a name
+                .toString();
+        }
+        return MoreObjects.toStringHelper(getClass()).add("registeredTypes", registeredTypes).toString();
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoJournalSerdesBuilder.java
new file mode 100644 (file)
index 0000000..a62d8b3
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2014-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalSerdes.Builder;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.util.ArrayList;
+import java.util.List;
+
+public final class KryoJournalSerdesBuilder implements Builder {
+    private final List<RegisteredType> types = new ArrayList<>();
+    private ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+
+    @Override
+    public KryoJournalSerdesBuilder register(final EntrySerdes<?> serdes, final Class<?>... classes) {
+        types.add(new RegisteredType(new EntrySerializer<>(serdes), classes));
+        return this;
+    }
+
+    @Override
+    public KryoJournalSerdesBuilder setClassLoader(final ClassLoader classLoader) {
+        this.classLoader = requireNonNull(classLoader);
+        return this;
+    }
+
+    @Override
+    public JournalSerdes build() {
+        return build(KryoJournalSerdes.NO_NAME);
+    }
+
+    @Override
+    public JournalSerdes build(final String friendlyName) {
+        checkState(!types.isEmpty(), "No serializers registered");
+        return new KryoJournalSerdes(types, classLoader, friendlyName);
+    }
+}
\ No newline at end of file
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/KryoOutputPool.java
new file mode 100644 (file)
index 0000000..6b1737f
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2014-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+class KryoOutputPool extends KryoIOPool<ByteArrayOutput> {
+
+  private static final int MAX_BUFFER_SIZE = 768 * 1024;
+  static final int MAX_POOLED_BUFFER_SIZE = 512 * 1024;
+
+  @Override
+  protected ByteArrayOutput create(int bufferSize) {
+    return new ByteArrayOutput(bufferSize, MAX_BUFFER_SIZE, new BufferAwareByteArrayOutputStream(bufferSize));
+  }
+
+  @Override
+  protected boolean recycle(ByteArrayOutput output) {
+    if (output.getByteArrayOutputStream().getBufferSize() < MAX_POOLED_BUFFER_SIZE) {
+      output.getByteArrayOutputStream().reset();
+      output.clear();
+      return true;
+    }
+    return false; // discard
+  }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/RegisteredType.java
new file mode 100644 (file)
index 0000000..0a17c09
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import static java.util.Objects.requireNonNull;
+
+record RegisteredType(EntrySerializer<?> serializer, Class<?>[] types) {
+    RegisteredType {
+        requireNonNull(serializer);
+        requireNonNull(types);
+    }
+}
diff --git a/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java b/atomix-storage/src/main/java/io/atomix/utils/serializer/package-info.java
new file mode 100644 (file)
index 0000000..afc8022
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides classes and interfaces for binary serialization.
+ */
+package io.atomix.utils.serializer;
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/AbstractJournalTest.java
new file mode 100644 (file)
index 0000000..14e59e5
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2017-2021 Open Networking Foundation
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Base journal test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+@RunWith(Parameterized.class)
+public abstract class AbstractJournalTest {
+    private static final JournalSerdes NAMESPACE = JournalSerdes.builder()
+        .register(new TestEntrySerdes(), TestEntry.class)
+        .register(new ByteArraySerdes(), byte[].class)
+        .build();
+
+    protected static final TestEntry ENTRY = new TestEntry(32);
+    private static final Path PATH = Paths.get("target/test-logs/");
+
+    private final StorageLevel storageLevel;
+    private final int maxSegmentSize;
+    protected final int entriesPerSegment;
+
+    protected AbstractJournalTest(final StorageLevel storageLevel, final int maxSegmentSize) {
+        this.storageLevel = storageLevel;
+        this.maxSegmentSize = maxSegmentSize;
+        int entryLength = NAMESPACE.serialize(ENTRY).length + 8;
+        entriesPerSegment = (maxSegmentSize - 64) / entryLength;
+    }
+
+    @Parameterized.Parameters
+    public static List<Object[]> primeNumbers() {
+        var runs = new ArrayList<Object[]>();
+        for (int i = 1; i <= 10; i++) {
+            for (int j = 1; j <= 10; j++) {
+                runs.add(new Object[] { 64 + i * (NAMESPACE.serialize(ENTRY).length + 8) + j });
+            }
+        }
+        return runs;
+    }
+
+    protected SegmentedJournal<TestEntry> createJournal() {
+        return SegmentedJournal.<TestEntry>builder()
+            .withName("test")
+            .withDirectory(PATH.toFile())
+            .withNamespace(NAMESPACE)
+            .withStorageLevel(storageLevel)
+            .withMaxSegmentSize(maxSegmentSize)
+            .withIndexDensity(.2)
+            .build();
+    }
+
+    @Test
+    public void testCloseMultipleTimes() {
+        // given
+        final Journal<TestEntry> journal = createJournal();
+
+        // when
+        journal.close();
+
+        // then
+        journal.close();
+    }
+
+    @Test
+    public void testWriteRead() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            // Append a couple entries.
+            assertEquals(1, writer.getNextIndex());
+            var indexed = writer.append(ENTRY);
+            assertEquals(1, indexed.index());
+
+            assertEquals(2, writer.getNextIndex());
+            writer.append(ENTRY);
+            reader.reset(2);
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(2, indexed.index());
+            assertNull(reader.tryNext());
+
+            // Test reading an entry
+            reader.reset();
+            var entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            // Test reading a second entry
+            assertEquals(2, reader.getNextIndex());
+            var entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertEquals(3, reader.getNextIndex());
+            assertNull(reader.tryNext());
+
+            // Test opening a new reader and reading from the journal.
+            reader = journal.openReader(1);
+            entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+
+            // Reset the reader.
+            reader.reset();
+
+            // Test opening a new reader and reading from the journal.
+            reader = journal.openReader(1);
+            entry1 = reader.tryNext();
+            assertNotNull(entry1);
+            assertEquals(1, entry1.index());
+            assertEquals(entry1, reader.getCurrentEntry());
+
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+
+            // Truncate the journal and write a different entry.
+            writer.truncate(1);
+            assertEquals(2, writer.getNextIndex());
+            writer.append(ENTRY);
+            reader.reset(2);
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(2, indexed.index());
+
+            // Reset the reader to a specific index and read the last entry again.
+            reader.reset(2);
+
+            final var current = reader.getCurrentEntry();
+            assertNotNull(current);
+            assertEquals(1, current.index());
+            assertEquals(2, reader.getNextIndex());
+            entry2 = reader.tryNext();
+            assertNotNull(entry2);
+            assertEquals(2, entry2.index());
+            assertEquals(entry2, reader.getCurrentEntry());
+            assertNull(reader.tryNext());
+        }
+    }
+
+    @Test
+    public void testResetTruncateZero() throws Exception {
+        try (SegmentedJournal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+            writer.append(ENTRY);
+            writer.reset(1);
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+
+            var indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+            writer.reset(1);
+            assertEquals(0, writer.getLastIndex());
+            writer.append(ENTRY);
+            assertEquals(1, writer.getLastIndex());
+            assertEquals(1, writer.getLastEntry().index());
+
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+
+            writer.truncate(0);
+            assertEquals(0, writer.getLastIndex());
+            assertNull(writer.getLastEntry());
+            writer.append(ENTRY);
+            assertEquals(1, writer.getLastIndex());
+            assertEquals(1, writer.getLastEntry().index());
+
+            indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(1, indexed.index());
+        }
+    }
+
+    @Test
+    public void testTruncateRead() throws Exception {
+        int i = 10;
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            for (int j = 1; j <= i; j++) {
+                assertEquals(j, writer.append(new TestEntry(32)).index());
+            }
+
+            for (int j = 1; j <= i - 2; j++) {
+                final var indexed = reader.tryNext();
+                assertNotNull(indexed);
+                assertEquals(j, indexed.index());
+            }
+
+            writer.truncate(i - 2);
+
+            assertNull(reader.tryNext());
+            assertEquals(i - 1, writer.append(new TestEntry(32)).index());
+            assertEquals(i, writer.append(new TestEntry(32)).index());
+
+            Indexed<TestEntry> entry = reader.tryNext();
+            assertNotNull(entry);
+            assertEquals(i - 1, entry.index());
+            entry = reader.tryNext();
+            assertNotNull(entry);
+            assertEquals(i, entry.index());
+        }
+    }
+
+    @Test
+    public void testWriteReadEntries() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1);
+
+            for (int i = 1; i <= entriesPerSegment * 5; i++) {
+                writer.append(ENTRY);
+                var entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+
+                if (i > 6) {
+                    reader.reset(i - 5);
+                    final var current = reader.getCurrentEntry();
+                    assertNotNull(current);
+                    assertEquals(i - 6, current.index());
+                    assertEquals(i - 5, reader.getNextIndex());
+                    reader.reset(i + 1);
+                }
+
+                writer.truncate(i - 1);
+                writer.append(ENTRY);
+
+                assertNotNull(reader.tryNext());
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+            }
+        }
+    }
+
+    @Test
+    public void testWriteReadCommittedEntries() throws Exception {
+        try (Journal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+            for (int i = 1; i <= entriesPerSegment * 5; i++) {
+                writer.append(ENTRY);
+                assertNull(reader.tryNext());
+                writer.commit(i);
+                var entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+                reader.reset(i);
+                entry = reader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+                assertEquals(32, entry.entry().bytes().length);
+            }
+        }
+    }
+
+    @Test
+    public void testReadAfterCompact() throws Exception {
+        try (SegmentedJournal<TestEntry> journal = createJournal()) {
+            JournalWriter<TestEntry> writer = journal.writer();
+            JournalReader<TestEntry> uncommittedReader = journal.openReader(1, JournalReader.Mode.ALL);
+            JournalReader<TestEntry> committedReader = journal.openReader(1, JournalReader.Mode.COMMITS);
+
+            for (int i = 1; i <= entriesPerSegment * 10; i++) {
+                assertEquals(i, writer.append(ENTRY).index());
+            }
+
+            assertEquals(1, uncommittedReader.getNextIndex());
+            assertEquals(1, committedReader.getNextIndex());
+
+            // This creates asymmetry, as uncommitted reader will move one step ahead...
+            assertNotNull(uncommittedReader.tryNext());
+            assertEquals(2, uncommittedReader.getNextIndex());
+            assertNull(committedReader.tryNext());
+            assertEquals(1, committedReader.getNextIndex());
+
+            writer.commit(entriesPerSegment * 9);
+
+            // ... so here we catch up ...
+            assertNotNull(committedReader.tryNext());
+            assertEquals(2, committedReader.getNextIndex());
+
+            // ... and continue from the second entry
+            for (int i = 2; i <= entriesPerSegment * 2.5; i++) {
+                var entry = uncommittedReader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+
+                entry = committedReader.tryNext();
+                assertNotNull(entry);
+                assertEquals(i, entry.index());
+            }
+
+            journal.compact(entriesPerSegment * 5 + 1);
+
+            assertNull(uncommittedReader.getCurrentEntry());
+            assertEquals(entriesPerSegment * 5 + 1, uncommittedReader.getNextIndex());
+            var entry = uncommittedReader.tryNext();
+            assertNotNull(entry);
+            assertEquals(entriesPerSegment * 5 + 1, entry.index());
+
+            assertNull(committedReader.getCurrentEntry());
+            assertEquals(entriesPerSegment * 5 + 1, committedReader.getNextIndex());
+            entry = committedReader.tryNext();
+            assertNotNull(entry);
+            assertEquals(entriesPerSegment * 5 + 1, entry.index());
+        }
+    }
+
+    /**
+     * Tests reading from a compacted journal.
+     */
+    @Test
+    public void testCompactAndRecover() throws Exception {
+        try (var journal = createJournal()) {
+            // Write three segments to the journal.
+            final var writer = journal.writer();
+            for (int i = 0; i < entriesPerSegment * 3; i++) {
+                writer.append(ENTRY);
+            }
+
+            // Commit the entries and compact the first segment.
+            writer.commit(entriesPerSegment * 3);
+            journal.compact(entriesPerSegment + 1);
+        }
+
+        // Reopen the journal and create a reader.
+        try (var journal = createJournal()) {
+            final var writer = journal.writer();
+            final var reader = journal.openReader(1, JournalReader.Mode.COMMITS);
+            writer.append(ENTRY);
+            writer.append(ENTRY);
+            writer.commit(entriesPerSegment * 3);
+
+            // Ensure the reader starts at the first physical index in the journal.
+            assertEquals(entriesPerSegment + 1, reader.getNextIndex());
+            assertEquals(reader.getFirstIndex(), reader.getNextIndex());
+            final var indexed = reader.tryNext();
+            assertNotNull(indexed);
+            assertEquals(entriesPerSegment + 1, indexed.index());
+            assertEquals(entriesPerSegment + 2, reader.getNextIndex());
+        }
+    }
+
+    @Before
+    @After
+    public void cleanupStorage() throws IOException {
+        if (Files.exists(PATH)) {
+            Files.walkFileTree(PATH, new SimpleFileVisitor<Path>() {
+                @Override
+                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
+                    Files.delete(file);
+                    return FileVisitResult.CONTINUE;
+                }
+
+                @Override
+                public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException {
+                    Files.delete(dir);
+                    return FileVisitResult.CONTINUE;
+                }
+            });
+        }
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/ByteArraySerdes.java
new file mode 100644 (file)
index 0000000..79ce909
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2023 PANTHEON.tech, s.r.o.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class ByteArraySerdes implements EntrySerdes<byte[]> {
+    @Override
+    public byte[] read(final EntryInput input) throws IOException {
+        int length = input.readVarInt();
+        return length == 0 ? null : input.readBytes(length - 1);
+    }
+
+    @Override
+    public void write(final EntryOutput output, final byte[] entry) throws IOException {
+        if (entry != null) {
+            output.writeVarInt(entry.length + 1);
+            output.writeBytes(entry);
+        } else {
+            output.writeVarInt(0);
+        }
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/DiskJournalTest.java
new file mode 100644 (file)
index 0000000..11cbd6c
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Disk journal test.
+ */
+public class DiskJournalTest extends AbstractJournalTest {
+    public DiskJournalTest(final int maxSegmentSize) {
+        super(StorageLevel.DISK, maxSegmentSize);
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentDescriptorTest.java
new file mode 100644 (file)
index 0000000..6db959d
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Segment descriptor test.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class JournalSegmentDescriptorTest {
+
+  /**
+   * Tests the segment descriptor builder.
+   */
+  @Test
+  public void testDescriptorBuilder() {
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES))
+        .withId(2)
+        .withIndex(1025)
+        .withMaxSegmentSize(1024 * 1024)
+        .withMaxEntries(2048)
+        .build();
+
+    assertEquals(2, descriptor.id());
+    assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+    assertEquals(1025, descriptor.index());
+    assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+    assertEquals(2048, descriptor.maxEntries());
+
+    assertEquals(0, descriptor.updated());
+    long time = System.currentTimeMillis();
+    descriptor.update(time);
+    assertEquals(time, descriptor.updated());
+  }
+
+  /**
+   * Tests copying the segment descriptor.
+   */
+  @Test
+  public void testDescriptorCopy() {
+    JournalSegmentDescriptor descriptor = JournalSegmentDescriptor.builder()
+        .withId(2)
+        .withIndex(1025)
+        .withMaxSegmentSize(1024 * 1024)
+        .withMaxEntries(2048)
+        .build();
+
+    long time = System.currentTimeMillis();
+    descriptor.update(time);
+
+    descriptor = descriptor.copyTo(ByteBuffer.allocate(JournalSegmentDescriptor.BYTES));
+
+    assertEquals(2, descriptor.id());
+    assertEquals(JournalSegmentDescriptor.VERSION, descriptor.version());
+    assertEquals(1025, descriptor.index());
+    assertEquals(1024 * 1024, descriptor.maxSegmentSize());
+    assertEquals(2048, descriptor.maxEntries());
+    assertEquals(time, descriptor.updated());
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/JournalSegmentFileTest.java
new file mode 100644 (file)
index 0000000..114ae09
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.io.File;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Journal segment file test.
+ */
+public class JournalSegmentFileTest {
+
+  @Test
+  public void testIsSegmentFile() throws Exception {
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1.log"));
+    assertFalse(JournalSegmentFile.isSegmentFile("foo", "bar-1.log"));
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", "foo-1-1.log"));
+  }
+
+  @Test
+  public void testCreateSegmentFile() throws Exception {
+    File file = JournalSegmentFile.createSegmentFile("foo", new File(System.getProperty("user.dir")), 1);
+    assertTrue(JournalSegmentFile.isSegmentFile("foo", file));
+  }
+
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/MappedJournalTest.java
new file mode 100644 (file)
index 0000000..286c6df
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+/**
+ * Memory mapped journal test.
+ */
+public class MappedJournalTest extends AbstractJournalTest {
+    public MappedJournalTest(final int maxSegmentSize) {
+        super(StorageLevel.MAPPED, maxSegmentSize);
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntry.java
new file mode 100644 (file)
index 0000000..b549362
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import java.util.Arrays;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+
+/**
+ * Test entry.
+ *
+ * @author <a href="http://github.com/kuujo">Jordan Halterman</a>
+ */
+public class TestEntry {
+  private final byte[] bytes;
+
+  public TestEntry(int size) {
+    this(new byte[size]);
+  }
+
+  public TestEntry(byte[] bytes) {
+    this.bytes = bytes;
+  }
+
+  public byte[] bytes() {
+    return bytes;
+  }
+
+  @Override
+  public String toString() {
+    return toStringHelper(this)
+        .add("length", bytes.length)
+        .add("hash", Arrays.hashCode(bytes))
+        .toString();
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java b/atomix-storage/src/test/java/io/atomix/storage/journal/TestEntrySerdes.java
new file mode 100644 (file)
index 0000000..8b04539
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+final class TestEntrySerdes implements EntrySerdes<TestEntry> {
+    private static final ByteArraySerdes BA_SERIALIZER = new ByteArraySerdes();
+
+    @Override
+    public TestEntry read(final EntryInput input) throws IOException {
+        return new TestEntry(BA_SERIALIZER.read(input));
+    }
+
+    @Override
+    public void write(final EntryOutput output, final TestEntry entry) throws IOException {
+        BA_SERIALIZER.write(output, entry.bytes());
+    }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java b/atomix-storage/src/test/java/io/atomix/storage/journal/index/SparseJournalIndexTest.java
new file mode 100644 (file)
index 0000000..b7cd38a
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2018-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.storage.journal.index;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Sparse journal index test.
+ */
+public class SparseJournalIndexTest {
+  @Test
+  public void testSparseJournalIndex() throws Exception {
+    JournalIndex index = new SparseJournalIndex(.2);
+    assertNull(index.lookup(1));
+    index.index(1, 2);
+    assertNull(index.lookup(1));
+    index.index(2, 4);
+    index.index(3, 6);
+    index.index(4, 8);
+    index.index(5, 10);
+    assertEquals(new Position(5, 10), index.lookup(5));
+    index.index(6, 12);
+    index.index(7, 14);
+    index.index(8, 16);
+    assertEquals(new Position(5, 10), index.lookup(8));
+    index.index(9, 18);
+    index.index(10, 20);
+    assertEquals(new Position(10, 20), index.lookup(10));
+    index.truncate(8);
+    assertEquals(new Position(5, 10), index.lookup(8));
+    assertEquals(new Position(5, 10), index.lookup(10));
+    index.truncate(4);
+    assertNull(index.lookup(4));
+    assertNull(index.lookup(8));
+
+    index = new SparseJournalIndex(.2);
+    assertNull(index.lookup(100));
+    index.index(101, 2);
+    assertNull(index.lookup(1));
+    index.index(102, 4);
+    index.index(103, 6);
+    index.index(104, 8);
+    index.index(105, 10);
+    assertEquals(new Position(105, 10), index.lookup(105));
+    index.index(106, 12);
+    index.index(107, 14);
+    index.index(108, 16);
+    assertEquals(new Position(105, 10), index.lookup(108));
+    index.index(109, 18);
+    index.index(110, 20);
+    assertEquals(new Position(110, 20), index.lookup(110));
+    index.truncate(108);
+    assertEquals(new Position(105, 10), index.lookup(108));
+    assertEquals(new Position(105, 10), index.lookup(110));
+    index.truncate(104);
+    assertNull(index.lookup(104));
+    assertNull(index.lookup(108));
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/BufferAwareByteArrayOutputStreamTest.java
new file mode 100644 (file)
index 0000000..a47d378
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class BufferAwareByteArrayOutputStreamTest {
+
+  @Test
+  public void testBufferSize() throws Exception {
+    BufferAwareByteArrayOutputStream outputStream = new BufferAwareByteArrayOutputStream(8);
+    assertEquals(8, outputStream.getBufferSize());
+    outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+    assertEquals(8, outputStream.getBufferSize());
+    outputStream.write(new byte[]{1, 2, 3, 4, 5, 6, 7, 8});
+    assertEquals(16, outputStream.getBufferSize());
+    outputStream.reset();
+    assertEquals(16, outputStream.getBufferSize());
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoInputPoolTest.java
new file mode 100644 (file)
index 0000000..82a9629
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Input;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+public class KryoInputPoolTest {
+
+  private KryoInputPool kryoInputPool;
+
+  @Before
+  public void setUp() throws Exception {
+    kryoInputPool = new KryoInputPool();
+  }
+
+  @Test
+  public void discardOutput() {
+    final Input[] result = new Input[2];
+    kryoInputPool.run(input -> {
+      result[0] = input;
+      return null;
+    }, KryoInputPool.MAX_POOLED_BUFFER_SIZE + 1);
+    kryoInputPool.run(input -> {
+      result[1] = input;
+      return null;
+    }, 0);
+    assertTrue(result[0] != result[1]);
+  }
+
+  @Test
+  public void recycleOutput() {
+    final Input[] result = new Input[2];
+    kryoInputPool.run(input -> {
+      assertEquals(0, input.position());
+      byte[] payload = new byte[]{1, 2, 3, 4};
+      input.setBuffer(payload);
+      assertArrayEquals(payload, input.readBytes(4));
+      result[0] = input;
+      return null;
+    }, 0);
+    assertNull(result[0].getInputStream());
+    assertEquals(0, result[0].position());
+    kryoInputPool.run(input -> {
+      result[1] = input;
+      return null;
+    }, 0);
+    assertTrue(result[0] == result[1]);
+  }
+}
diff --git a/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java b/atomix-storage/src/test/java/io/atomix/utils/serializer/KryoOutputPoolTest.java
new file mode 100644 (file)
index 0000000..04d55d6
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017-2022 Open Networking Foundation and others.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.atomix.utils.serializer;
+
+import com.esotericsoftware.kryo.io.Output;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class KryoOutputPoolTest {
+
+  private KryoOutputPool kryoOutputPool;
+
+  @Before
+  public void setUp() throws Exception {
+    kryoOutputPool = new KryoOutputPool();
+  }
+
+  @Test
+  public void discardOutput() {
+    final Output[] result = new Output[2];
+    kryoOutputPool.run(output -> {
+      result[0] = output;
+      return null;
+    }, KryoOutputPool.MAX_POOLED_BUFFER_SIZE + 1);
+    kryoOutputPool.run(output -> {
+      result[1] = output;
+      return null;
+    }, 0);
+    assertTrue(result[0] != result[1]);
+  }
+
+  @Test
+  public void recycleOutput() {
+    final ByteArrayOutput[] result = new ByteArrayOutput[2];
+    kryoOutputPool.run(output -> {
+      output.writeInt(1);
+      assertEquals(Integer.BYTES, output.position());
+      result[0] = output;
+      return null;
+    }, 0);
+    assertEquals(0, result[0].position());
+    assertEquals(0, result[0].getByteArrayOutputStream().size());
+    kryoOutputPool.run(output -> {
+      assertEquals(0, output.position());
+      result[1] = output;
+      return null;
+    }, 0);
+    assertTrue(result[0] == result[1]);
+  }
+}
diff --git a/atomix-storage/src/test/resources/logback.xml b/atomix-storage/src/test/resources/logback.xml
new file mode 100644 (file)
index 0000000..41f8f99
--- /dev/null
@@ -0,0 +1,29 @@
+<!--
+  ~ Copyright 2017-present Open Networking Laboratory
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~ http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<configuration>
+    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n
+            </pattern>
+        </encoder>
+    </appender>
+
+    <logger name="io.atomix.storage" level="INFO" />
+
+    <root level="${root.logging.level:-INFO}">
+        <appender-ref ref="STDOUT" />
+    </root>
+</configuration>
\ No newline at end of file
index 929eebc638641d97f1d12645f124f11e41f4bafa..52fde3264014c0a8ffc7c9a737aa02ebd969f1d3 100644 (file)
@@ -7,17 +7,15 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>org.opendaylight.mdsal</groupId>
-    <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
-    <relativePath/>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>mdsal-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>benchmark-api</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
@@ -26,5 +24,4 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <artifactId>yang-ext</artifactId>
     </dependency>
   </dependencies>
-
 </project>
index f8394e8ae9ca4c4ccbe3b22b0679947372864f34..635ea805bb7e2d4fb45ee4d36ac4182b261f8f6b 100644 (file)
@@ -3,6 +3,13 @@ module dsbenchmark {
     namespace "urn:opendaylight:params:xml:ns:yang:dsbenchmark";
     prefix "dsbenchmark";
 
+    description
+        "Copyright © 2015, 2018 Cisco Systems, Inc. and others.
+
+        This program and the accompanying materials are made available under the
+        terms of the Eclipse Public License v1.0 which accompanies this distribution,
+        and is available at http://www.eclipse.org/legal/epl-v10.html";
+
     revision "2015-01-05" {
         description "Initial revision of dsbenchmark model";
     }
index 307c0634126ab40360a213b5a984989b827a0d99..92e1a10a23882990ce742b77064de7843822a8f1 100644 (file)
@@ -3,6 +3,13 @@ module ntfbench-payload {
     namespace "urn:opendaylight:params:xml:ns:yang:ntfbench-payload";
     prefix "ntfbenchmark";
 
+    description
+        "Copyright © 2015, 2018 Cisco Systems, Inc. and others.
+
+        This program and the accompanying materials are made available under the
+        terms of the Eclipse Public License v1.0 which accompanies this distribution,
+        and is available at http://www.eclipse.org/legal/epl-v10.html";
+
     revision "2015-07-09" {
         description "Initial revision of ntfbenchmark model";
     }
index c1619b6ec51fdf6ac81365f0952d04fdad0b89a7..b8a481197d58b8c9957207ca1dfeb09a54e5660d 100644 (file)
@@ -3,6 +3,13 @@ module ntfbenchmark {
     namespace "urn:opendaylight:params:xml:ns:yang:ntfbenchmark";
     prefix "ntfbenchmark";
 
+    description
+        "Copyright © 2015, 2018 Cisco Systems, Inc. and others.
+
+        This program and the accompanying materials are made available under the
+        terms of the Eclipse Public License v1.0 which accompanies this distribution,
+        and is available at http://www.eclipse.org/legal/epl-v10.html";
+
     revision "2015-01-05" {
         description "Initial revision of ntfbenchmark model";
     }
index a054908da0bf59d16603ba75db739e2f0f47efe9..db84f95940758ca535a65a8028db56f6df34e3ff 100644 (file)
@@ -5,6 +5,13 @@ module rpcbench-payload {
 
     import yang-ext { prefix ext; revision-date "2013-07-09"; }
 
+    description
+        "Copyright © 2015, 2018 Cisco Systems, Inc. and others.
+
+        This program and the accompanying materials are made available under the
+        terms of the Eclipse Public License v1.0 which accompanies this distribution,
+        and is available at http://www.eclipse.org/legal/epl-v10.html";
+
     revision "2015-07-02" {
         description "Initial revision of rpcbenchmark model";
     }
index 7ec13578bfaeb1e16190550ce4386da2fbfe326b..8a110b27976b551800e4cddb0b66afd0650dbc8a 100644 (file)
@@ -3,6 +3,13 @@ module rpcbenchmark {
     namespace "urn:opendaylight:params:xml:ns:yang:rpcbenchmark";
     prefix "rpcbenchmark";
 
+    description
+        "Copyright © 2015, 2018 Cisco Systems, Inc. and others.
+
+        This program and the accompanying materials are made available under the
+        terms of the Eclipse Public License v1.0 which accompanies this distribution,
+        and is available at http://www.eclipse.org/legal/epl-v10.html";
+
     revision "2015-07-02" {
         description "Initial revision of rpcbenchmark model";
     }
diff --git a/benchmark/artifacts/pom.xml b/benchmark/artifacts/pom.xml
deleted file mode 100644 (file)
index 565adb9..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems and others.  All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>benchmark-artifacts</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>${project.groupId}</groupId>
-        <artifactId>benchmark-api</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>${project.groupId}</groupId>
-        <artifactId>dsbenchmark</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>${project.groupId}</groupId>
-        <artifactId>benchmark-features</artifactId>
-        <version>${project.version}</version>
-        <classifier>features</classifier>
-        <type>xml</type>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-</project>
index c8e5ce6136e0fd2de73ca265cd0dc18bb629bbec..1595fb8111407437a739247dbb7cb264f12ab0d9 100644 (file)
@@ -8,37 +8,26 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
+  <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>org.opendaylight.mdsal</groupId>
-    <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
-    <relativePath/>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>mdsal-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>dsbenchmark</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>benchmark-api</artifactId>
-      <version>${project.version}</version>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
@@ -52,47 +41,19 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-impl</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
+    </dependency>
   </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
-        </configuration>
-      </plugin>
-    </plugins>
-
-    <pluginManagement>
-      <plugins>
-        <!--This plugin's configuration is used to store Eclipse m2e settings only. It has no influence on the Maven build itself.-->
-        <plugin>
-          <groupId>org.eclipse.m2e</groupId>
-          <artifactId>lifecycle-mapping</artifactId>
-          <version>1.0.0</version>
-          <configuration>
-            <lifecycleMappingMetadata>
-              <pluginExecutions>
-                <pluginExecution>
-                  <pluginExecutionFilter>
-                    <groupId>org.jacoco</groupId>
-                    <artifactId>jacoco-maven-plugin</artifactId>
-                    <versionRange>[0.7.2.201409121644,)</versionRange>
-                    <goals>
-                      <goal>prepare-agent</goal>
-                    </goals>
-                  </pluginExecutionFilter>
-                  <action>
-                    <ignore/>
-                  </action>
-                </pluginExecution>
-              </pluginExecutions>
-            </lifecycleMappingMetadata>
-          </configuration>
-        </plugin>
-        </plugins>
-    </pluginManagement>
-  </build>
 </project>
index 8378e8b3c8b8c8e7bbc1c20938149c440c0626c0..444ec2fc752040c357190b688ea71463a438e14e 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.dsbenchmark;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListKey;
@@ -25,25 +28,26 @@ public final class BaListBuilder {
         List<OuterList> outerList = new ArrayList<>(outerElements);
         for (int j = 0; j < outerElements; j++) {
             outerList.add(new OuterListBuilder()
-                                .setId(j)
-                                .setInnerList(buildInnerList(j, innerElements))
-                                .withKey(new OuterListKey(j))
-                                .build());
+                .setId(j)
+                .setInnerList(buildInnerList(j, innerElements))
+                .withKey(new OuterListKey(j))
+                .build());
         }
         return outerList;
     }
 
-    private static List<InnerList> buildInnerList(final int index, final int elements) {
-        List<InnerList> innerList = new ArrayList<>(elements);
+    private static Map<InnerListKey, InnerList> buildInnerList(final int index, final int elements) {
+        Builder<InnerListKey, InnerList> innerList = ImmutableMap.builderWithExpectedSize(elements);
 
-        final String itemStr = "Item-" + String.valueOf(index) + "-";
+        final String itemStr = "Item-" + index + "-";
         for (int i = 0; i < elements; i++) {
-            innerList.add(new InnerListBuilder()
-                                .withKey(new InnerListKey(i))
-                                .setName(i)
-                                .setValue(itemStr + String.valueOf(i))
-                                .build());
+            final InnerListKey key = new InnerListKey(i);
+            innerList.put(key, new InnerListBuilder()
+                .withKey(key)
+                .setName(i)
+                .setValue(itemStr + i)
+                .build());
         }
-        return innerList;
+        return innerList.build();
     }
 }
index 50eb0d4e8bbafab7c1723a6ba5a6234a862ef395..bddf80e8803e008ac028644dfd17418629f09c87 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.dsbenchmark;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Random;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
@@ -27,6 +28,7 @@ public abstract class DatastoreAbstractWriter {
     protected int txOk = 0;
     protected int txError = 0;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "'this' passed to logging")
     public DatastoreAbstractWriter(final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         this.outerListElem = outerListElem;
@@ -50,18 +52,7 @@ public abstract class DatastoreAbstractWriter {
     }
 
     protected LogicalDatastoreType getDataStoreType() {
-        final LogicalDatastoreType dsType;
-        if (dataStore == DataStore.CONFIG) {
-            dsType = LogicalDatastoreType.CONFIGURATION;
-        } else if (dataStore == DataStore.OPERATIONAL) {
-            dsType = LogicalDatastoreType.OPERATIONAL;
-        } else {
-            if (rn.nextBoolean() == true) {
-                dsType = LogicalDatastoreType.OPERATIONAL;
-            } else {
-                dsType = LogicalDatastoreType.CONFIGURATION;
-            }
-        }
-        return dsType;
+        return dataStore == DataStore.CONFIG || dataStore != DataStore.OPERATIONAL && !rn.nextBoolean()
+            ? LogicalDatastoreType.CONFIGURATION : LogicalDatastoreType.OPERATIONAL;
     }
 }
index 2641fead359e8873646e7184b98c50342298fc72..7bdc4d7768b9710d3a4a4d89922a93f598abd500 100644 (file)
@@ -13,11 +13,11 @@ import java.util.List;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.outer.list.InnerList;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public final class DomListBuilder {
     // Inner List Qname identifiers for yang model's 'name' and 'value'
@@ -32,27 +32,28 @@ public final class DomListBuilder {
     }
 
     public static List<MapEntryNode> buildOuterList(final int outerElements, final int innerElements) {
-        List<MapEntryNode> outerList = new ArrayList<>(outerElements);
+        final var outerList = new ArrayList<MapEntryNode>(outerElements);
         for (int j = 0; j < outerElements; j++) {
-            outerList.add(ImmutableNodes.mapEntryBuilder()
-                                .withNodeIdentifier(new NodeIdentifierWithPredicates(OuterList.QNAME, OL_ID, j))
-                                .withChild(ImmutableNodes.leafNode(OL_ID, j))
-                                .withChild(buildInnerList(j, innerElements))
-                                .build());
+            outerList.add(ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(OuterList.QNAME, OL_ID, j))
+                .withChild(ImmutableNodes.leafNode(OL_ID, j))
+                .withChild(buildInnerList(j, innerElements))
+                .build());
         }
         return outerList;
     }
 
     private static MapNode buildInnerList(final int index, final int elements) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> innerList = ImmutableNodes.mapNodeBuilder(InnerList.QNAME);
+        final var innerList = ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(InnerList.QNAME));
 
-        final String itemStr = "Item-" + String.valueOf(index) + "-";
+        final String itemStr = "Item-" + index + "-";
         for (int i = 0; i < elements; i++) {
-            innerList.addChild(ImmutableNodes.mapEntryBuilder()
-                                .withNodeIdentifier(new NodeIdentifierWithPredicates(InnerList.QNAME, IL_NAME, i))
-                                .withChild(ImmutableNodes.leafNode(IL_NAME, i))
-                                .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
-                                .build());
+            innerList.addChild(ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(InnerList.QNAME, IL_NAME, i))
+                .withChild(ImmutableNodes.leafNode(IL_NAME, i))
+                .withChild(ImmutableNodes.leafNode(IL_VALUE, itemStr + String.valueOf(i)))
+                .build());
         }
         return innerList.build();
     }
index 0cf0195ca41acccd8f4fc79bbcc9d36a48b4edc3..32f849596e16ca52f84959b89a92fdc704ec981b 100644 (file)
@@ -7,11 +7,16 @@
  */
 package org.opendaylight.dsbenchmark;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Collections;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.dsbenchmark.listener.DsbenchmarkListenerProvider;
 import org.opendaylight.dsbenchmark.simpletx.SimpletxBaDelete;
 import org.opendaylight.dsbenchmark.simpletx.SimpletxBaRead;
@@ -26,13 +31,15 @@ import org.opendaylight.dsbenchmark.txchain.TxchainDomDelete;
 import org.opendaylight.dsbenchmark.txchain.TxchainDomRead;
 import org.opendaylight.dsbenchmark.txchain.TxchainDomWrite;
 import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.CleanupStoreOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.DsbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestOutputBuilder;
@@ -41,76 +48,82 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchm
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatus.ExecStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestStatusBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
-
+@Singleton
+@Component(service = { })
+@RequireServiceComponentRuntime
+public final class DsbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkProvider.class);
-    private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
-            InstanceIdentifier.builder(TestExec.class).build();
-    private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID =
-            InstanceIdentifier.builder(TestStatus.class).build();
+    private static final InstanceIdentifier<TestExec> TEST_EXEC_IID = InstanceIdentifier.create(TestExec.class);
+    private static final InstanceIdentifier<TestStatus> TEST_STATUS_IID = InstanceIdentifier.create(TestStatus.class);
 
     private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
-    private final DsbenchmarkListenerProvider listenerProvider = new DsbenchmarkListenerProvider();
-    private final DOMDataBroker domDataBroker;  // Async DOM Broker for use with all DOM operations
-    private final DataBroker txChainDataBroker; // Async Binding-Aware Broker for use in tx chains; initialized to
-                                                // ping-pong broker in default config (see default-config.xml and
-                                                // dsbenchmark-impl.yang)
-    private final DataBroker simpleTxDataBroker;      // "Legacy" OSGI Data Broker for use in simple transactions
+    private final DsbenchmarkListenerProvider listenerProvider;
+    // Async DOM Broker for use with all DOM operations
+    private final DOMDataBroker domDataBroker;
+    // Async Binding-Aware Broker for use in tx chains;
+    private final DataBroker dataBroker;
+    private final Registration rpcReg;
 
     private long testsCompleted = 0;
 
-    public DsbenchmarkProvider(final DOMDataBroker domDataBroker, final DataBroker txChainDataBroker,
-            final DataBroker simpleTxDataBroker) {
-        this.domDataBroker = domDataBroker;
-        this.txChainDataBroker = txChainDataBroker;
-        this.simpleTxDataBroker = simpleTxDataBroker;
-    }
-
+    @Inject
+    @Activate
     @SuppressWarnings("checkstyle:illegalCatch")
-    public void init() {
-        listenerProvider.setDataBroker(simpleTxDataBroker);
+    public DsbenchmarkProvider(@Reference final DOMDataBroker domDataBroker, @Reference final DataBroker dataBroker,
+            @Reference final RpcProviderService rpcService) {
+        this.domDataBroker = requireNonNull(domDataBroker);
+        this.dataBroker = requireNonNull(dataBroker);
+        listenerProvider = new DsbenchmarkListenerProvider(dataBroker);
 
         try {
             // We want to set the initial operation status so users can detect we are ready to start test.
-            setTestOperData(this.execStatus.get(), testsCompleted);
+            setTestOperData(execStatus.get(), testsCompleted);
         } catch (final Exception e) {
             // TODO: Use a singleton service to make sure the initial write is performed only once.
-            LOG.warn("Working around Bugs 8829 and 6793 by ignoring exception from setTestOperData: {}", e);
+            LOG.warn("Working around Bugs 8829 and 6793 by ignoring exception from setTestOperData", e);
         }
 
+        rpcReg = rpcService.registerRpcImplementations((StartTest) this::startTest, (CleanupStore) this::cleanupStore);
         LOG.info("DsbenchmarkProvider initiated");
     }
 
     @Override
+    @PreDestroy
+    @Deactivate
     public void close() {
+        rpcReg.close();
         LOG.info("DsbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
+    private ListenableFuture<RpcResult<CleanupStoreOutput>> cleanupStore(final CleanupStoreInput input) {
         cleanupTestStore();
         LOG.debug("Data Store cleaned up");
         return Futures.immediateFuture(RpcResultBuilder.success(new CleanupStoreOutputBuilder().build()).build());
     }
 
-    @Override
     @SuppressWarnings("checkstyle:illegalCatch")
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         LOG.info("Starting the data store benchmark test, input: {}", input);
 
         // Check if there is a test in progress
-        if (execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing) == false) {
+        if (!execStatus.compareAndSet(ExecStatus.Idle, ExecStatus.Executing)) {
             LOG.info("Test in progress");
             return RpcResultBuilder.success(new StartTestOutputBuilder()
-                    .setStatus(StartTestOutput.Status.TESTINPROGRESS)
-                    .build()).buildFuture();
+                .setStatus(StartTestOutput.Status.TESTINPROGRESS)
+                .build()).buildFuture();
         }
 
         // Cleanup data that may be left over from a previous test run
@@ -136,14 +149,14 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
             endTime = System.nanoTime();
             execTime = (endTime - startTime) / 1000;
 
-            this.testsCompleted++;
+            testsCompleted++;
 
         } catch (final Exception e) {
-            LOG.error("Test error: {}", e.toString());
+            LOG.error("Test error", e);
             execStatus.set(ExecStatus.Idle);
             return RpcResultBuilder.success(new StartTestOutputBuilder()
-                    .setStatus(StartTestOutput.Status.FAILED)
-                    .build()).buildFuture();
+                .setStatus(StartTestOutput.Status.FAILED)
+                .build()).buildFuture();
         }
 
         LOG.info("Test finished");
@@ -158,10 +171,10 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
                 .setStatus(StartTestOutput.Status.OK)
                 .setListBuildTime(listCreateTime)
                 .setExecTime(execTime)
-                .setTxOk((long)dsWriter.getTxOk())
-                .setNtfOk(numEvents)
-                .setDataChangeEventsOk(numDataChanges)
-                .setTxError((long)dsWriter.getTxError())
+                .setTxOk(Uint32.valueOf(dsWriter.getTxOk()))
+                .setNtfOk(Uint32.valueOf(numEvents))
+                .setDataChangeEventsOk(Uint32.valueOf(numDataChanges))
+                .setTxError(Uint32.valueOf(dsWriter.getTxError()))
                 .build();
 
         return RpcResultBuilder.success(output).buildFuture();
@@ -170,10 +183,10 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
     private void setTestOperData(final ExecStatus sts, final long tstCompl) {
         TestStatus status = new TestStatusBuilder()
                 .setExecStatus(sts)
-                .setTestsCompleted(tstCompl)
+                .setTestsCompleted(Uint32.valueOf(tstCompl))
                 .build();
 
-        WriteTransaction tx = simpleTxDataBroker.newWriteOnlyTransaction();
+        WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
         tx.put(LogicalDatastoreType.OPERATIONAL, TEST_STATUS_IID, status);
 
         try {
@@ -186,11 +199,9 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
     }
 
     private void cleanupTestStore() {
-        TestExec data = new TestExecBuilder()
-                .setOuterList(Collections.<OuterList>emptyList())
-                .build();
+        TestExec data = new TestExecBuilder().setOuterList(Collections.emptyMap()).build();
 
-        WriteTransaction tx = simpleTxDataBroker.newWriteOnlyTransaction();
+        WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
         tx.put(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID, data);
         try {
             tx.commit().get();
@@ -200,7 +211,7 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
             throw new IllegalStateException(e);
         }
 
-        tx = simpleTxDataBroker.newWriteOnlyTransaction();
+        tx = dataBroker.newWriteOnlyTransaction();
         tx.put(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID, data);
         try {
             tx.commit().get();
@@ -228,52 +239,46 @@ public class DsbenchmarkProvider implements DsbenchmarkService, AutoCloseable {
             if (txType == StartTestInput.TransactionType.SIMPLETX) {
                 if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
                     if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new SimpletxBaDelete(this.simpleTxDataBroker, outerListElem,
+                        retVal = new SimpletxBaDelete(dataBroker, outerListElem,
                                 innerListElem,writesPerTx, dataStore);
                     } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new SimpletxBaRead(this.simpleTxDataBroker, outerListElem,
+                        retVal = new SimpletxBaRead(dataBroker, outerListElem,
                                 innerListElem, writesPerTx, dataStore);
                     } else {
-                        retVal = new SimpletxBaWrite(this.simpleTxDataBroker, oper, outerListElem,
+                        retVal = new SimpletxBaWrite(dataBroker, oper, outerListElem,
                                 innerListElem, writesPerTx, dataStore);
                     }
+                } else if (StartTestInput.Operation.DELETE == oper) {
+                    retVal = new SimpletxDomDelete(domDataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
+                } else if (StartTestInput.Operation.READ == oper) {
+                    retVal = new SimpletxDomRead(domDataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 } else {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new SimpletxDomDelete(this.domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new SimpletxDomRead(this.domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else {
-                        retVal = new SimpletxDomWrite(this.domDataBroker, oper, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    }
+                    retVal = new SimpletxDomWrite(domDataBroker, oper, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 }
-            } else {
-                if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new TxchainBaDelete(this.txChainDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new TxchainBaRead(this.txChainDataBroker,outerListElem,
-                                innerListElem,writesPerTx, dataStore);
-                    } else {
-                        retVal = new TxchainBaWrite(this.txChainDataBroker, oper, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    }
+            } else if (dataFormat == StartTestInput.DataFormat.BINDINGAWARE) {
+                if (StartTestInput.Operation.DELETE == oper) {
+                    retVal = new TxchainBaDelete(dataBroker, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
+                } else if (StartTestInput.Operation.READ == oper) {
+                    retVal = new TxchainBaRead(dataBroker, outerListElem,
+                            innerListElem,writesPerTx, dataStore);
                 } else {
-                    if (StartTestInput.Operation.DELETE == oper) {
-                        retVal = new TxchainDomDelete(this.domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-                    } else if (StartTestInput.Operation.READ == oper) {
-                        retVal = new TxchainDomRead(this.domDataBroker, outerListElem,
-                                innerListElem, writesPerTx, dataStore);
-
-                    } else {
-                        retVal = new TxchainDomWrite(this.domDataBroker, oper, outerListElem,
-                                innerListElem,writesPerTx, dataStore);
-                    }
+                    retVal = new TxchainBaWrite(dataBroker, oper, outerListElem,
+                            innerListElem, writesPerTx, dataStore);
                 }
+            } else if (StartTestInput.Operation.DELETE == oper) {
+                retVal = new TxchainDomDelete(domDataBroker, outerListElem,
+                        innerListElem, writesPerTx, dataStore);
+            } else if (StartTestInput.Operation.READ == oper) {
+                retVal = new TxchainDomRead(domDataBroker, outerListElem,
+                        innerListElem, writesPerTx, dataStore);
+
+            } else {
+                retVal = new TxchainDomWrite(domDataBroker, oper, outerListElem,
+                        innerListElem,writesPerTx, dataStore);
             }
         } finally {
             execStatus.set(ExecStatus.Idle);
index 75523eff7a7f61dc0bb093bde2563596ccd3ec5f..34c2bfdb68c3984dde2acc600946b594dc8b0f20 100644 (file)
@@ -7,15 +7,11 @@
  */
 package org.opendaylight.dsbenchmark.listener;
 
-import java.util.Collection;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -25,8 +21,7 @@ public class DsbenchmarkListener implements DataTreeChangeListener<TestExec> {
     private final AtomicInteger numDataChanges = new AtomicInteger(0);
 
     @Override
-    public void onDataTreeChanged(
-            final Collection<DataTreeModification<TestExec>> changes) {
+    public void onDataTreeChanged(final List<DataTreeModification<TestExec>> changes) {
         // Since we're registering the same DsbenchmarkListener object for both
         // OPERATIONAL and CONFIG, the onDataTreeChanged() method can be called
         // from different threads, and we need to use atomic counters.
@@ -40,20 +35,19 @@ public class DsbenchmarkListener implements DataTreeChangeListener<TestExec> {
     }
 
     private static synchronized void logDataTreeChangeEvent(final int eventNum,
-            final Collection<DataTreeModification<TestExec>> changes) {
+            final List<DataTreeModification<TestExec>> changes) {
         LOG.debug("DsbenchmarkListener-onDataTreeChanged: Event {}", eventNum);
 
-        for (DataTreeModification<TestExec> change : changes) {
-            final DataObjectModification<TestExec> rootNode = change.getRootNode();
-            final ModificationType modType = rootNode.getModificationType();
-            final PathArgument changeId = rootNode.getIdentifier();
-            final Collection<? extends DataObjectModification<? extends DataObject>> modifications =
-                    rootNode.getModifiedChildren();
+        for (var change : changes) {
+            final var rootNode = change.getRootNode();
+            final var modType = rootNode.modificationType();
+            final var changeId = rootNode.step();
+            final var modifications = rootNode.modifiedChildren();
 
             LOG.debug("    changeId {}, modType {}, mods: {}", changeId, modType, modifications.size());
 
-            for (DataObjectModification<? extends DataObject> mod : modifications) {
-                LOG.debug("      mod-getDataAfter: {}", mod.getDataAfter());
+            for (var mod : modifications) {
+                LOG.debug("      mod-getDataAfter: {}", mod.dataAfter());
             }
         }
     }
index 6d52d60644cc0434ea2312d029b6b08d52a50b4e..20e271ad9ac58779b0c4031692d87dece85f5a42 100644 (file)
@@ -7,13 +7,15 @@
  */
 package org.opendaylight.dsbenchmark.listener;
 
+import static java.util.Objects.requireNonNull;
+
 import java.util.ArrayList;
 import java.util.List;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -22,22 +24,23 @@ public class DsbenchmarkListenerProvider {
     private static final Logger LOG = LoggerFactory.getLogger(DsbenchmarkListenerProvider.class);
     private static final InstanceIdentifier<TestExec> TEST_EXEC_IID =
             InstanceIdentifier.builder(TestExec.class).build();
-    private final List<ListenerRegistration<DsbenchmarkListener>> listeners =
-            new ArrayList<>();
-    private DataBroker dataBroker;
+    private final List<DsbenchmarkListener> listeners = new ArrayList<>();
+    private final List<Registration> registrations = new ArrayList<>();
+    private final DataBroker dataBroker;
 
-    public void setDataBroker(final DataBroker dataBroker) {
-        this.dataBroker = dataBroker;
+    public DsbenchmarkListenerProvider(final DataBroker dataBroker) {
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("DsbenchmarkListenerProvider created");
     }
 
     public void createAndRegisterListeners(final int numListeners) {
         for (int i = 0; i < numListeners; i++) {
-            DsbenchmarkListener listener = new DsbenchmarkListener();
-            listeners.add(dataBroker.registerDataTreeChangeListener(
-                    DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
-            listeners.add(dataBroker.registerDataTreeChangeListener(
-                    DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
+            var listener = new DsbenchmarkListener();
+            listeners.add(listener);
+            registrations.add(dataBroker.registerTreeChangeListener(
+                    DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, TEST_EXEC_IID), listener));
+            registrations.add(dataBroker.registerTreeChangeListener(
+                    DataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_EXEC_IID), listener));
 
         }
         LOG.debug("DsbenchmarkListenerProvider created {} listeneres", numListeners);
@@ -46,8 +49,8 @@ public class DsbenchmarkListenerProvider {
     public long getDataChangeCount() {
         long dataChanges = 0;
 
-        for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
-            dataChanges += listenerRegistration.getInstance().getNumDataChanges();
+        for (var listener : listeners) {
+            dataChanges += listener.getNumDataChanges();
         }
         LOG.debug("DsbenchmarkListenerProvider , total data changes {}", dataChanges);
         return dataChanges;
@@ -56,11 +59,14 @@ public class DsbenchmarkListenerProvider {
     public long getEventCountAndDestroyListeners() {
         long totalEvents = 0;
 
-        for (ListenerRegistration<DsbenchmarkListener> listenerRegistration : listeners) {
-            totalEvents += listenerRegistration.getInstance().getNumEvents();
-            listenerRegistration.close();
+        registrations.forEach(Registration::close);
+        registrations.clear();
+
+        for (var listener : listeners) {
+            totalEvents += listener.getNumEvents();
         }
         listeners.clear();
+
         LOG.debug("DsbenchmarkListenerProvider destroyed listeneres, total events {}", totalEvents);
         return totalEvents;
     }
index f41cb1f50e6d5c494ef1d5a9a7d296a9cf7fb2f1..be52c401fd3f37f400830d166f282a2424cda2f5 100644 (file)
@@ -64,7 +64,7 @@ public class SimpletxBaDelete extends DatastoreAbstractWriter {
                     tx.commit().get();
                     txOk++;
                 } catch (final InterruptedException | ExecutionException e) {
-                    LOG.error("Transaction failed: {}", e);
+                    LOG.error("Transaction failed", e);
                     txError++;
                 }
                 tx = dataBroker.newWriteOnlyTransaction();
@@ -75,7 +75,7 @@ public class SimpletxBaDelete extends DatastoreAbstractWriter {
             try {
                 tx.commit().get();
             } catch (final InterruptedException | ExecutionException e) {
-                LOG.error("Transaction failed: {}", e);
+                LOG.error("Transaction failed", e);
             }
         }
     }
index 078fe03d6804209a412a69335b661c5adb28b9af..1f6b7f988421214299805af03b40f51322771a0c 100644 (file)
@@ -64,11 +64,11 @@ public class SimpletxBaRead extends DatastoreAbstractWriter {
                 try {
                     optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        OuterList outerList = optionalDataObject.get();
+                        OuterList outerList = optionalDataObject.orElseThrow();
 
                         String[] objectsArray = new String[outerList.getInnerList().size()];
 
-                        for (InnerList innerList : outerList.getInnerList()) {
+                        for (InnerList innerList : outerList.getInnerList().values()) {
                             if (objectsArray[innerList.getName()] != null) {
                                 LOG.error("innerList: DUPLICATE name: {}, value: {}", innerList.getName(),
                                     innerList.getValue());
@@ -77,7 +77,7 @@ public class SimpletxBaRead extends DatastoreAbstractWriter {
                         }
                         for (int i = 0; i < outerList.getInnerList().size(); i++) {
                             String itemStr = objectsArray[i];
-                            if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+                            if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
                                 LOG.error("innerList: name: {}, value: {}", i, itemStr);
                                 break;
                             }
index 80e42d0efedb4e1fe5092712a87b8e7cb4a28b67..5395868aeca487f7e0ba6fbc89cc72b1837d0676 100644 (file)
@@ -5,16 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.dsbenchmark.simpletx;
 
+import static java.util.Objects.requireNonNull;
+
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.BaListBuilder;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -25,31 +24,31 @@ import org.slf4j.LoggerFactory;
 
 public class SimpletxBaWrite extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(SimpletxBaWrite.class);
+
     private final DataBroker dataBroker;
-    private List<OuterList> list;
+    private List<OuterList> list = null;
 
     public SimpletxBaWrite(final DataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.dataBroker = dataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created SimpletxBaWrite");
     }
 
     @Override
     public void createList() {
-        list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
+        final var dsType = getDataStoreType();
 
-        WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
+        var tx = dataBroker.newWriteOnlyTransaction();
         long writeCnt = 0;
 
-        for (OuterList element : this.list) {
-            InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
-                                                    .child(OuterList.class, element.key());
+        for (var element : list) {
+            final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, iid, element);
             } else {
@@ -63,7 +62,7 @@ public class SimpletxBaWrite extends DatastoreAbstractWriter {
                     tx.commit().get();
                     txOk++;
                 } catch (final InterruptedException | ExecutionException e) {
-                    LOG.error("Transaction failed: {}", e);
+                    LOG.error("Transaction failed", e);
                     txError++;
                 }
                 tx = dataBroker.newWriteOnlyTransaction();
@@ -76,9 +75,8 @@ public class SimpletxBaWrite extends DatastoreAbstractWriter {
             try {
                 tx.commit().get();
             } catch (final InterruptedException | ExecutionException e) {
-                LOG.error("Transaction failed: {}", e);
+                LOG.error("Transaction failed", e);
             }
         }
     }
-
 }
index 2cda64b410467bbe7aef4e6b670dd954a310fbd0..4f29cdceab0b342aadefe9e153f0bdee9939e7db 100644 (file)
@@ -60,7 +60,7 @@ public class SimpletxDomDelete extends DatastoreAbstractWriter {
         long writeCnt = 0;
 
         for (int l = 0; l < outerListElem; l++) {
-            YangInstanceIdentifier yid = pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, olId, l));
+            YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
 
             tx.delete(dsType, yid);
             writeCnt++;
@@ -69,7 +69,7 @@ public class SimpletxDomDelete extends DatastoreAbstractWriter {
                     tx.commit().get();
                     txOk++;
                 } catch (final  InterruptedException | ExecutionException e) {
-                    LOG.error("Transaction failed: {}", e);
+                    LOG.error("Transaction failed", e);
                     txError++;
                 }
                 tx = domDataBroker.newWriteOnlyTransaction();
@@ -80,7 +80,7 @@ public class SimpletxDomDelete extends DatastoreAbstractWriter {
             try {
                 tx.commit().get();
             } catch (final InterruptedException | ExecutionException e) {
-                LOG.error("Transaction failed: {}", e);
+                LOG.error("Transaction failed", e);
             }
         }
     }
index 30812898c5ea72e343c74fb9afd45c1248011f81..d46a769b09a1da7435085b50d83b796fbab58fb0 100644 (file)
@@ -61,12 +61,12 @@ public class SimpletxDomRead extends DatastoreAbstractWriter {
 
         try (DOMDataTreeReadTransaction tx = domDataBroker.newReadOnlyTransaction()) {
             for (int l = 0; l < outerListElem; l++) {
-                YangInstanceIdentifier yid = pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, olId, l));
-                FluentFuture<Optional<NormalizedNode<?, ?>>> submitFuture = tx.read(dsType, yid);
+                YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
+                FluentFuture<Optional<NormalizedNode>> submitFuture = tx.read(dsType, yid);
                 try {
-                    Optional<NormalizedNode<?,?>> optionalDataObject = submitFuture.get();
+                    Optional<NormalizedNode> optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        NormalizedNode<?, ?> ret = optionalDataObject.get();
+                        NormalizedNode ret = optionalDataObject.orElseThrow();
                         LOG.trace("optionalDataObject is {}", ret);
                         txOk++;
                     } else {
index 181ff23f0b0804e8f71764134ea2e10fa90007aa..a57773125f9e707dca4ec3b30394d9896b490e04 100644 (file)
@@ -5,16 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.dsbenchmark.simpletx;
 
+import static java.util.Objects.requireNonNull;
+
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.dsbenchmark.DomListBuilder;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -27,33 +26,32 @@ import org.slf4j.LoggerFactory;
 
 public class SimpletxDomWrite extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(SimpletxDomWrite.class);
-    private final DOMDataBroker domDataBroker;
-    private List<MapEntryNode> list;
 
-    public SimpletxDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+    private final DOMDataBroker dataBroker;
+    private List<MapEntryNode> list = null;
+
+    public SimpletxDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long putsPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, putsPerTx, dataStore);
-        this.domDataBroker = domDataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created SimpletxDomWrite");
     }
 
     @Override
     public void createList() {
-        list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
-        final YangInstanceIdentifier pid =
-                YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
+        final var dsType = getDataStoreType();
+        final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
 
-        DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
+        var tx = dataBroker.newWriteOnlyTransaction();
         long writeCnt = 0;
 
-        for (MapEntryNode element : this.list) {
-            YangInstanceIdentifier yid =
-                    pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, element.getIdentifier().getKeyValues()));
+        for (var element : list) {
+            final var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, yid, element);
@@ -71,7 +69,7 @@ public class SimpletxDomWrite extends DatastoreAbstractWriter {
                     LOG.error("Transaction failed", e);
                     txError++;
                 }
-                tx = domDataBroker.newWriteOnlyTransaction();
+                tx = dataBroker.newWriteOnlyTransaction();
                 writeCnt = 0;
             }
         }
index 14561a58d4a9b602e308b4a9b48cfa044de8846e..41b233774a4c8fbf921bcdd5fed29e9bf1b73494 100644 (file)
@@ -12,9 +12,7 @@ import com.google.common.util.concurrent.MoreExecutors;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
 import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
@@ -24,10 +22,11 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchm
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterListKey;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaDelete extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaDelete.class);
     private final DataBroker bindingDataBroker;
 
@@ -53,7 +52,8 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact
     @Override
     public void executeList() {
         final LogicalDatastoreType dsType = getDataStoreType();
-        final TransactionChain chain = bindingDataBroker.createTransactionChain(this);
+        final TransactionChain chain = bindingDataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
         WriteTransaction tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
@@ -98,20 +98,18 @@ public class TxchainBaDelete extends DatastoreAbstractWriter implements Transact
         try {
             chain.close();
         } catch (final IllegalStateException e) {
-            LOG.error("Transaction close failed,", e);
+            LOG.error("Transaction close failed", e);
         }
         LOG.debug("Transactions: submitted {}, completed {}", txSubmitted, txOk + txError);
     }
 
     @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainBaDelete, transaction {}, cause {}", chain, transaction.getIdentifier(),
-            cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainBaDelete", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
+    public void onSuccess(final Empty chain) {
+        LOG.debug("TxchainBaDelete closed successfully");
     }
 }
index 4394daacb6370929a246dd89a0ce220aa32ab007..a45599566643f8ba05a5ca9f9416338c75c70d6c 100644 (file)
@@ -13,9 +13,6 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
@@ -27,7 +24,7 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaRead extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaRead extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaRead.class);
     private final DataBroker bindingDataBroker;
 
@@ -64,19 +61,19 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio
                 try {
                     Optional<OuterList> optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
-                        OuterList outerList = optionalDataObject.get();
+                        OuterList outerList = optionalDataObject.orElseThrow();
 
-                        String[] objectsArray = new String[outerList.getInnerList().size()];
-                        for (InnerList innerList : outerList.getInnerList()) {
+                        String[] objectsArray = new String[outerList.nonnullInnerList().size()];
+                        for (InnerList innerList : outerList.nonnullInnerList().values()) {
                             if (objectsArray[innerList.getName()] != null) {
                                 LOG.error("innerList: DUPLICATE name: {}, value: {}", innerList.getName(),
                                     innerList.getValue());
                             }
                             objectsArray[innerList.getName()] = innerList.getValue();
                         }
-                        for (int i = 0; i < outerList.getInnerList().size(); i++) {
+                        for (int i = 0; i < outerList.nonnullInnerList().size(); i++) {
                             String itemStr = objectsArray[i];
-                            if (!itemStr.contentEquals("Item-" + String.valueOf(l) + "-" + String.valueOf(i))) {
+                            if (!itemStr.contentEquals("Item-" + l + "-" + i)) {
                                 LOG.error("innerList: name: {}, value: {}", i, itemStr);
                                 break;
                             }
@@ -92,16 +89,4 @@ public class TxchainBaRead extends DatastoreAbstractWriter implements Transactio
             }
         }
     }
-
-    @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainBaDelete, transaction {}, cause {}", chain, transaction.getIdentifier(),
-            cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("TxchainBaDelete closed successfully, chain {}", chain);
-    }
 }
index e10f947a058f87d1d098ce73e1488cb0300530d1..3640f607dbf9145997963b253f624298c11c8609 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.dsbenchmark.txchain;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.List;
@@ -14,50 +16,47 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.BaListBuilder;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.Transaction;
-import org.opendaylight.mdsal.binding.api.TransactionChain;
-import org.opendaylight.mdsal.binding.api.TransactionChainListener;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.Operation;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainBaWrite extends DatastoreAbstractWriter implements TransactionChainListener {
+public class TxchainBaWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainBaWrite.class);
-    private final DataBroker bindingDataBroker;
-    private List<OuterList> list;
 
-    public TxchainBaWrite(final DataBroker bindingDataBroker, final Operation oper,
-            final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
+    private final DataBroker dataBroker;
+    private List<OuterList> list = null;
+
+    public TxchainBaWrite(final DataBroker dataBroker, final Operation oper, final int outerListElem,
+            final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.bindingDataBroker = bindingDataBroker;
+        this.dataBroker = requireNonNull(dataBroker);
         LOG.debug("Created TxchainBaWrite");
     }
 
     @Override
     public void createList() {
-        list = BaListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = BaListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final TransactionChain chain = bindingDataBroker.createTransactionChain(this);
-        final LogicalDatastoreType dsType = getDataStoreType();
+        final var chain = dataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
+        final var dsType = getDataStoreType();
 
-        WriteTransaction tx = chain.newWriteOnlyTransaction();
+        var tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
         int writeCnt = 0;
 
-        for (OuterList element : this.list) {
-            InstanceIdentifier<OuterList> iid = InstanceIdentifier.create(TestExec.class)
-                                                    .child(OuterList.class, element.key());
+        for (var element : list) {
+            final var iid = InstanceIdentifier.create(TestExec.class).child(OuterList.class, element.key());
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, iid, element);
@@ -106,14 +105,12 @@ public class TxchainBaWrite extends DatastoreAbstractWriter implements Transacti
     }
 
     @Override
-    public void onTransactionChainFailed(final TransactionChain chain, final Transaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in DatastoreBaAbstractWrite, transaction {}, cause {}", chain,
-            transaction.getIdentifier(), cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in DatastoreBaAbstractWrite", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final TransactionChain chain) {
-        LOG.debug("DatastoreBaAbstractWrite closed successfully, chain {}", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("DatastoreBaAbstractWrite closed successfully");
     }
 }
index 733f51d02ae30499ebb089b104cb91aaea928911..c0280c6b02b3642b548a65142e66de9b0b224352 100644 (file)
@@ -14,22 +14,21 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTransactionChainListener {
-    private static final Logger LOG = LoggerFactory.getLogger(TxchainBaWrite.class);
+public class TxchainDomDelete extends DatastoreAbstractWriter implements FutureCallback<Empty> {
+    private static final Logger LOG = LoggerFactory.getLogger(TxchainDomDelete.class);
     private final DOMDataBroker domDataBroker;
 
     public TxchainDomDelete(final DOMDataBroker domDataBroker, final int outerListElem, final int innerListElem,
@@ -57,14 +56,15 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran
         final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
         final YangInstanceIdentifier pid =
                 YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
-        final DOMTransactionChain chain = domDataBroker.createTransactionChain(this);
+        final DOMTransactionChain chain = domDataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
         DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
         int writeCnt = 0;
 
         for (int l = 0; l < outerListElem; l++) {
-            YangInstanceIdentifier yid = pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, olId, l));
+            YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
             tx.delete(dsType, yid);
 
             writeCnt++;
@@ -108,14 +108,12 @@ public class TxchainDomDelete extends DatastoreAbstractWriter implements DOMTran
     }
 
     @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomDelete, transaction {}, cause {}", chain, transaction.getIdentifier(),
-            cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainDomDelete", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("TxchainDomDelete closed successfully");
     }
 }
index e54a98a983a6e229b619587fbdb002ade6fe8083..cccdb4ecdb66a5bcaac31ac5c708877ac130db39 100644 (file)
@@ -14,9 +14,6 @@ import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
@@ -28,7 +25,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomRead extends DatastoreAbstractWriter {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainDomRead.class);
     private final DOMDataBroker domDataBroker;
 
@@ -54,15 +51,15 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa
     @Override
     public void executeList() {
         final LogicalDatastoreType dsType = getDataStoreType();
-        final org.opendaylight.yangtools.yang.common.QName olId = QName.create(OuterList.QNAME, "id");
+        final QName olId = QName.create(OuterList.QNAME, "id");
         final YangInstanceIdentifier pid =
                 YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
 
         try (DOMDataTreeReadTransaction tx = domDataBroker.newReadOnlyTransaction()) {
             for (int l = 0; l < outerListElem; l++) {
-                YangInstanceIdentifier yid = pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, olId, l));
-                Optional<NormalizedNode<?,?>> optionalDataObject;
-                FluentFuture<Optional<NormalizedNode<?, ?>>> submitFuture = tx.read(dsType, yid);
+                YangInstanceIdentifier yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, olId, l));
+                Optional<NormalizedNode> optionalDataObject;
+                FluentFuture<Optional<NormalizedNode>> submitFuture = tx.read(dsType, yid);
                 try {
                     optionalDataObject = submitFuture.get();
                     if (optionalDataObject != null && optionalDataObject.isPresent()) {
@@ -75,16 +72,4 @@ public class TxchainDomRead extends DatastoreAbstractWriter implements DOMTransa
             }
         }
     }
-
-    @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomDelete, transaction {}, cause {}", chain, transaction.getIdentifier(),
-            cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("TxchainDomDelete closed successfully, chain {}", chain);
-    }
 }
index 2b7820e5f1e68bd4cd701f6aadab6d78858185e2..b50a7e0ad13c609e220b443ea381299ebeeff73d 100644 (file)
@@ -14,53 +14,49 @@ import java.util.concurrent.ExecutionException;
 import org.opendaylight.dsbenchmark.DatastoreAbstractWriter;
 import org.opendaylight.dsbenchmark.DomListBuilder;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.StartTestInput.DataStore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.TestExec;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.dsbenchmark.rev150105.test.exec.OuterList;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTransactionChainListener {
+public class TxchainDomWrite extends DatastoreAbstractWriter implements FutureCallback<Empty> {
     private static final Logger LOG = LoggerFactory.getLogger(TxchainDomWrite.class);
-    private final DOMDataBroker domDataBroker;
-    private List<MapEntryNode> list;
 
-    public TxchainDomWrite(final DOMDataBroker domDataBroker, final StartTestInput.Operation oper,
+    private final DOMDataBroker dataBroker;
+    private List<MapEntryNode> list = null;
+
+    public TxchainDomWrite(final DOMDataBroker dataBroker, final StartTestInput.Operation oper,
             final int outerListElem, final int innerListElem, final long writesPerTx, final DataStore dataStore) {
         super(oper, outerListElem, innerListElem, writesPerTx, dataStore);
-        this.domDataBroker = domDataBroker;
+        this.dataBroker = dataBroker;
         LOG.debug("Created TxchainDomWrite");
     }
 
     @Override
     public void createList() {
-        list = DomListBuilder.buildOuterList(this.outerListElem, this.innerListElem);
+        list = DomListBuilder.buildOuterList(outerListElem, innerListElem);
     }
 
     @Override
     public void executeList() {
-        final LogicalDatastoreType dsType = getDataStoreType();
-        final YangInstanceIdentifier pid =
-                YangInstanceIdentifier.builder().node(TestExec.QNAME).node(OuterList.QNAME).build();
-        final DOMTransactionChain chain = domDataBroker.createTransactionChain(this);
+        final var dsType = getDataStoreType();
+        final var pid = YangInstanceIdentifier.of(TestExec.QNAME, OuterList.QNAME);
+        final var chain = dataBroker.createMergingTransactionChain();
+        chain.addCallback(this);
 
-        DOMDataTreeWriteTransaction tx = chain.newWriteOnlyTransaction();
+        var tx = chain.newWriteOnlyTransaction();
         int txSubmitted = 0;
         int writeCnt = 0;
 
-        for (MapEntryNode element : this.list) {
-            YangInstanceIdentifier yid =
-                    pid.node(new NodeIdentifierWithPredicates(OuterList.QNAME, element.getIdentifier().getKeyValues()));
+        for (var element : list) {
+            var yid = pid.node(NodeIdentifierWithPredicates.of(OuterList.QNAME, element.name().asMap()));
 
             if (oper == StartTestInput.Operation.PUT) {
                 tx.put(dsType, yid, element);
@@ -112,14 +108,12 @@ public class TxchainDomWrite extends DatastoreAbstractWriter implements DOMTrans
     }
 
     @Override
-    public void onTransactionChainFailed(final DOMTransactionChain chain, final DOMDataTreeTransaction transaction,
-            final Throwable cause) {
-        LOG.error("Broken chain {} in TxchainDomWrite, transaction {}, cause {}", chain, transaction.getIdentifier(),
-            cause);
+    public void onFailure(final Throwable cause) {
+        LOG.error("Broken chain in TxchainDomWrite", cause);
     }
 
     @Override
-    public void onTransactionChainSuccessful(final DOMTransactionChain chain) {
-        LOG.debug("Chain {} closed successfully", chain);
+    public void onSuccess(final Empty result) {
+        LOG.debug("Chain closed successfully");
     }
 }
diff --git a/benchmark/dsbenchmark/src/main/resources/OSGI-INF/blueprint/dsbenchmark.xml b/benchmark/dsbenchmark/src/main/resources/OSGI-INF/blueprint/dsbenchmark.xml
deleted file mode 100644 (file)
index 7829d2a..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-  <reference id="domDataBroker" interface="org.opendaylight.mdsal.dom.api.DOMDataBroker" odl:type="pingpong"/>
-  <reference id="txChainDataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker" odl:type="pingpong"/>
-  <reference id="simpleTxDataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
-
-  <bean id="provider" class="org.opendaylight.dsbenchmark.DsbenchmarkProvider"
-          init-method="init" destroy-method="close">
-    <argument ref="domDataBroker"/>
-    <argument ref="txChainDataBroker"/>
-    <argument ref="simpleTxDataBroker"/>
-  </bean>
-
-  <odl:rpc-implementation ref="provider"/>
-</blueprint>
index 683dd2e58c6e8b50acc8f2f2bea02667c4ff53e0..82b3c3aabf3ead4c528807bab530e0b04f8feb6a 100644 (file)
@@ -8,61 +8,39 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
+  <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>org.opendaylight.mdsal</groupId>
-    <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
-    <relativePath/>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>mdsal-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <relativePath>../../opendaylight/md-sal/parent</relativePath>
   </parent>
 
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>ntfbenchmark</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>benchmark-api</artifactId>
-      <version>${project.version}</version>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <artifactId>mdsal-binding-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-api</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
     </dependency>
   </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <propertyExpansion>checkstyle.violationseverity=error</propertyExpansion>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
 </project>
index e9371cb6654c8a62e61575cc5eaa0da9db16ad54..59843e7618a369835a80e05be126ba73a73506c4 100644 (file)
@@ -7,13 +7,14 @@
  */
 package ntfbenchmark.impl;
 
-import java.util.ArrayList;
-import java.util.List;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.NtfbenchBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.payload.Payload;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.payload.PayloadBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.payload.PayloadKey;
 
 public abstract class AbstractNtfbenchProducer implements Runnable {
     protected final NotificationPublishService publishService;
@@ -46,11 +47,12 @@ public abstract class AbstractNtfbenchProducer implements Runnable {
         this.publishService = publishService;
         this.iterations = iterations;
 
-        final List<Payload> listVals = new ArrayList<>();
+        final Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(payloadSize);
         for (int i = 0; i < payloadSize; i++) {
-            listVals.add(new PayloadBuilder().setId(i).build());
+            final PayloadKey key = new PayloadKey(i);
+            listVals.put(key, new PayloadBuilder().withKey(key).build());
         }
 
-        ntf = new NtfbenchBuilder().setPayload(listVals).build();
+        ntf = new NtfbenchBuilder().setPayload(listVals.build()).build();
     }
 }
index 021369acb0fe0178ee87853f569f313021217759..ee77a6c342ed841258ce086d3481255590aa06d1 100644 (file)
@@ -8,8 +8,11 @@
 package ntfbenchmark.impl;
 
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer {
+    private static final Logger LOG = LoggerFactory.getLogger(NtfbenchBlockingProducer.class);
 
     public NtfbenchBlockingProducer(final NotificationPublishService publishService, final int iterations,
             final int payloadSize) {
@@ -22,12 +25,13 @@ public class NtfbenchBlockingProducer extends AbstractNtfbenchProducer {
         int ntfOk = 0;
         int ntfError = 0;
 
-        for (int i = 0; i < this.iterations; i++) {
+        for (int i = 0; i < iterations; i++) {
             try {
-                this.publishService.putNotification(this.ntf);
+                publishService.putNotification(ntf);
                 ntfOk++;
             } catch (final Exception e) {
                 ntfError++;
+                LOG.debug("Failed to push notification", e);
             }
         }
 
index c6e2e8affb2af60bb4f88820498a0546a876439e..4b44e1d73dbf2f25bffbd3b4de47f83c03e38f10 100644 (file)
@@ -8,14 +8,13 @@
 package ntfbenchmark.impl;
 
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
 import java.util.concurrent.ExecutionException;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
-
-    private final SettableFuture<?> lastDeliveryFuture = SettableFuture.create();
-
+    private static final Logger LOG = LoggerFactory.getLogger(NtfbenchNonblockingProducer.class);
 
     public NtfbenchNonblockingProducer(final NotificationPublishService publishService, final int iterations,
             final int payloadSize) {
@@ -29,9 +28,9 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
         int ntfOk = 0;
         int ntfError = 0;
         ListenableFuture<?> lastOkFuture = null;
-        for (int i = 0; i < this.iterations; i++) {
+        for (int i = 0; i < iterations; i++) {
             try {
-                final ListenableFuture<?> result = this.publishService.offerNotification(this.ntf);
+                final ListenableFuture<?> result = publishService.offerNotification(ntf);
                 if (NotificationPublishService.REJECTED == result) {
                     ntfError++;
                 } else {
@@ -39,6 +38,7 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
                     lastOkFuture = result;
                 }
             } catch (final Exception e) {
+                LOG.debug("Failed to publish notification", e);
                 ntfError++;
             }
         }
@@ -50,9 +50,8 @@ public class NtfbenchNonblockingProducer extends AbstractNtfbenchProducer {
             try {
                 lastOkFuture.get();
             } catch (InterruptedException | ExecutionException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
     }
-
 }
index d126b2774ae35bdb240a7ce850f3ff51f707b3c6..b57d37b99b0254395c54ef6ea648a8a0aef31273 100644 (file)
@@ -9,11 +9,10 @@ package ntfbenchmark.impl;
 
 import com.google.common.util.concurrent.Futures;
 import java.util.concurrent.Future;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.NtfbenchPayloadListener;
-
-public class NtfbenchTestListener implements NtfbenchPayloadListener {
 
+public class NtfbenchTestListener implements Listener<Ntfbench> {
     private final int expectedSize;
     private int received = 0;
 
@@ -22,7 +21,7 @@ public class NtfbenchTestListener implements NtfbenchPayloadListener {
     }
 
     @Override
-    public void onNtfbench(final Ntfbench notification) {
+    public void onNotification(final Ntfbench notification) {
         if (expectedSize == notification.getPayload().size()) {
             received++;
         }
index 1b16329d524b7210bec5a948599c0d8f95b1389b..8cce7448bcd0d7272cfcca06cafb5c052ac82c7f 100644 (file)
@@ -20,9 +20,8 @@ public class NtfbenchWTCListener extends NtfbenchTestListener {
     }
 
     @Override
-    public void onNtfbench(final Ntfbench notification) {
-        // TODO Auto-generated method stub
-        super.onNtfbench(notification);
+    public void onNotification(final Ntfbench notification) {
+        super.onNotification(notification);
         if (expectedCount == getReceived()) {
             allDone.set(null);
         }
index 5ea3b0fffb2ddb618a30cd2049d53e7a85823e22..7c8e51f6431afaee79079c65833d180a710093a2 100644 (file)
@@ -7,65 +7,86 @@
  */
 package ntfbenchmark.impl;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.NtfbenchmarkService;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbench.payload.rev150709.Ntfbench;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestInput.ProducerType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.ntfbenchmark.rev150105.TestStatusOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService {
+@Singleton
+@Component(service = {})
+@RequireServiceComponentRuntime
+public final class NtfbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(NtfbenchmarkProvider.class);
     private static final int TEST_TIMEOUT = 5;
 
     private final NotificationService listenService;
     private final NotificationPublishService publishService;
-
-    public NtfbenchmarkProvider(final NotificationService listenServiceDependency,
-            final NotificationPublishService publishServiceDependency) {
-        LOG.debug("NtfbenchmarkProvider Constructor");
-        listenService = listenServiceDependency;
-        publishService = publishServiceDependency;
-    }
-
-    public void init() {
-        LOG.info("NtfbenchmarkProvider initiated");
+    private final Registration reg;
+
+    @Inject
+    @Activate
+    public NtfbenchmarkProvider(@Reference final NotificationService listenService,
+            @Reference final NotificationPublishService publishService,
+            @Reference final RpcProviderService rpcService) {
+        this.listenService = requireNonNull(listenService);
+        this.publishService = requireNonNull(publishService);
+        reg = rpcService.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
+        LOG.debug("NtfbenchmarkProvider initiated");
     }
 
     @Override
+    @PreDestroy
+    @Deactivate
     public void close() {
+        reg.close();
         LOG.info("NtfbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         final int producerCount = input.getProducers().intValue();
         final int listenerCount = input.getListeners().intValue();
         final int iterations = input.getIterations().intValue();
         final int payloadSize = input.getIterations().intValue();
 
-        final List<AbstractNtfbenchProducer> producers = new ArrayList<>(producerCount);
-        final List<ListenerRegistration<NtfbenchTestListener>> listeners = new ArrayList<>(listenerCount);
+        final var producers = new ArrayList<AbstractNtfbenchProducer>(producerCount);
         for (int i = 0; i < producerCount; i++) {
             producers.add(new NtfbenchBlockingProducer(publishService, iterations, payloadSize));
         }
         int expectedCntPerListener = producerCount * iterations;
 
+        final var listeners = new ArrayList<NtfbenchTestListener>(listenerCount);
+        final var registrations = new ArrayList<Registration>(listenerCount);
         for (int i = 0; i < listenerCount; i++) {
             final NtfbenchTestListener listener;
             if (input.getProducerType() == ProducerType.BLOCKING) {
@@ -73,7 +94,8 @@ public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService
             } else {
                 listener = new NtfbenchTestListener(payloadSize);
             }
-            listeners.add(listenService.registerNotificationListener(listener));
+            listeners.add(listener);
+            registrations.add(listenService.registerListener(Ntfbench.class, listener));
         }
 
         try {
@@ -83,16 +105,17 @@ public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService
             final long startTime = System.nanoTime();
 
             for (int i = 0; i < input.getProducers().intValue(); i++) {
-                executor.submit(producers.get(i));
+                // FIXME: fools RV_RETURN_VALUE_IGNORED_BAD_PRACTICE for now, but we should check some more
+                verifyNotNull(executor.submit(producers.get(i)));
             }
             executor.shutdown();
             try {
                 executor.awaitTermination(TEST_TIMEOUT, TimeUnit.MINUTES);
-                for (ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                    listenerRegistration.getInstance().getAllDone().get();
+                for (var listener : listeners) {
+                    listener.getAllDone().get();
                 }
             } catch (final InterruptedException | ExecutionException e) {
-                LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT);
+                LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT, e);
             }
 
             final long producerEndTime = System.nanoTime();
@@ -102,11 +125,10 @@ public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService
             long allProducersOk = 0;
             long allProducersError = 0;
 
-            for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                allListeners += listenerRegistration.getInstance().getReceived();
+            for (var listener : listeners) {
+                allListeners += listener.getReceived();
             }
 
-            final long listenerEndTime = System.nanoTime();
             final long listenerElapsedTime = producerEndTime - startTime;
 
             LOG.info("Test Done");
@@ -116,28 +138,23 @@ public class NtfbenchmarkProvider implements AutoCloseable, NtfbenchmarkService
                 allProducersError += abstractNtfbenchProducer.getNtfError();
             }
 
-            final StartTestOutput output =
-                    new StartTestOutputBuilder()
-                            .setProducerElapsedTime(producerElapsedTime / 1000000)
-                            .setListenerElapsedTime(listenerElapsedTime / 1000000)
-                            .setListenerOk(allListeners)
-                            .setProducerOk(allProducersOk)
-                            .setProducerError(allProducersError)
-                            .setProducerRate((allProducersOk + allProducersError) * 1000000000 / producerElapsedTime)
-                            .setListenerRate(allListeners * 1000000000 / listenerElapsedTime)
-                           .build();
+            final StartTestOutput output = new StartTestOutputBuilder()
+                .setProducerElapsedTime(Uint32.valueOf(producerElapsedTime / 1000000))
+                .setListenerElapsedTime(Uint32.valueOf(listenerElapsedTime / 1000000))
+                .setListenerOk(Uint32.valueOf(allListeners))
+                .setProducerOk(Uint32.valueOf(allProducersOk))
+                .setProducerError(Uint32.valueOf(allProducersError))
+                .setProducerRate(
+                    Uint32.valueOf((allProducersOk + allProducersError) * 1000000000 / producerElapsedTime))
+                .setListenerRate(Uint32.valueOf(allListeners * 1000000000 / listenerElapsedTime))
+                .build();
             return RpcResultBuilder.success(output).buildFuture();
         } finally {
-            for (final ListenerRegistration<NtfbenchTestListener> listenerRegistration : listeners) {
-                listenerRegistration.close();
-            }
+            registrations.forEach(Registration::close);
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
-        // TODO Auto-generated method stub
-        return null;
+    private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+        throw new UnsupportedOperationException("Not implemented");
     }
-
 }
diff --git a/benchmark/ntfbenchmark/src/main/resources/OSGI-INF/blueprint/ntfbenchmark.xml b/benchmark/ntfbenchmark/src/main/resources/OSGI-INF/blueprint/ntfbenchmark.xml
deleted file mode 100644 (file)
index 0b36361..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-  <reference id="publishService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService"/>
-  <reference id="listenerService" interface="org.opendaylight.mdsal.binding.api.NotificationService"/>
-
-  <bean id="provider" class="ntfbenchmark.impl.NtfbenchmarkProvider"
-          init-method="init" destroy-method="close">
-    <argument ref="publishService"/>
-    <argument ref="listenerService"/>
-  </bean>
-
-  <odl:rpc-implementation ref="provider"/>
-</blueprint>
index 9defdcbe454e1a91a56f3e0eb12a6dbbb6fd85f6..cf915b7bc29116a9f8e91e6389fabc22cc6652a3 100644 (file)
@@ -3,26 +3,31 @@
 Copyright (c) 2015 Cisco Systems and others. All rights reserved.
 This program and the accompanying materials are made available under the
 terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
+and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
+
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>benchmark-aggregator</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
-  <name>${project.artifactId}</name>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
-  <modelVersion>4.0.0</modelVersion>
+
+  <properties>
+    <maven.deploy.skip>true</maven.deploy.skip>
+    <maven.install.skip>true</maven.install.skip>
+  </properties>
+
   <modules>
     <module>api</module>
     <module>dsbenchmark</module>
     <module>ntfbenchmark</module>
     <module>rpcbenchmark</module>
-    <module>artifacts</module>
   </modules>
 </project>
index 99e8ccbfa0bc1e7d2b7ca871f5bdf43d25fffdaa..8eefdf66648207dbec42eb4f8940b94b2e70bf59 100644 (file)
@@ -8,61 +8,55 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>mdsal-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../../opendaylight/md-sal/parent</relativePath>
+    </parent>
 
-  <parent>
-    <groupId>org.opendaylight.mdsal</groupId>
-    <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
-    <relativePath/>
-  </parent>
-
-  <modelVersion>4.0.0</modelVersion>
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>rpcbenchmark</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
+    <artifactId>rpcbenchmark</artifactId>
+    <packaging>bundle</packaging>
 
-  <dependencyManagement>
     <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
+        <dependency>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>benchmark-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>yang-binding</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>concepts</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
     </dependencies>
-  </dependencyManagement>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>benchmark-api</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <propertyExpansion>checkstyle.violationseverity=error</propertyExpansion>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
 </project>
diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/AbstractRpcbenchPayloadService.java
new file mode 100644 (file)
index 0000000..34aa71c
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015 Cisco Systems and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package rpcbenchmark.impl;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutputBuilder;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+
+abstract class AbstractRpcbenchPayloadService {
+    private int numRpcs = 0;
+
+    final ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
+        numRpcs++;
+        return RpcResultBuilder.success(new GlobalRpcBenchOutputBuilder(input).build()).buildFuture();
+    }
+
+    final ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
+        numRpcs++;
+        return RpcResultBuilder.success(new RoutedRpcBenchOutputBuilder(input).build()).buildFuture();
+    }
+
+    final int getNumRpcs() {
+        return numRpcs;
+    }
+}
index f03c064b03aee53f597dcba90d80d8f1d454150c..86e3a097bf08a32af53fc489086fffae9d145536 100644 (file)
@@ -7,81 +7,78 @@
  */
 package rpcbenchmark.impl;
 
-import java.util.ArrayList;
-import java.util.List;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
-
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
-import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class GlobalBindingRTCClient implements RTCClient {
-
     private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCClient.class);
-    private final RpcbenchPayloadService service;
+
+    private final GlobalRpcBench globalRpcBench;
     private final AtomicLong rpcOk = new AtomicLong(0);
     private final AtomicLong rpcError = new AtomicLong(0);
     private final GlobalRpcBenchInput inVal;
     private final int inSize;
 
+    @Override
     public long getRpcOk() {
         return rpcOk.get();
     }
 
+    @Override
     public long getRpcError() {
         return rpcError.get();
     }
 
-    public GlobalBindingRTCClient(final RpcConsumerRegistry registry, final int inSize) {
-        if (registry != null) {
-            this.service = registry.getRpcService(RpcbenchPayloadService.class);
-        } else {
-            this.service = null;
-        }
+    public GlobalBindingRTCClient(final RpcService rpcService, final int inSize) {
+        globalRpcBench = rpcService.getRpc(GlobalRpcBench.class);
 
         this.inSize = inSize;
-        List<Payload> listVals = new ArrayList<>();
+        Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
         for (int i = 0; i < inSize; i++) {
-            listVals.add(new PayloadBuilder().setId(i).build());
+            final PayloadKey key = new PayloadKey(i);
+            listVals.put(key, new PayloadBuilder().withKey(key).build());
         }
-        inVal = new GlobalRpcBenchInputBuilder().setPayload(listVals).build();
+        inVal = new GlobalRpcBenchInputBuilder().setPayload(listVals.build()).build();
     }
 
+    @Override
     public void runTest(final int iterations) {
-        int rpcOk = 0;
-        int rpcError = 0;
+        int ok = 0;
+        int error = 0;
 
         for (int i = 0; i < iterations; i++) {
-            Future<RpcResult<GlobalRpcBenchOutput>> output = service.globalRpcBench(inVal);
+            final var output = globalRpcBench.invoke(inVal);
             try {
-                RpcResult<GlobalRpcBenchOutput> rpcResult = output.get();
+                final var rpcResult = output.get();
 
                 if (rpcResult.isSuccessful()) {
-                    List<Payload> retVal = rpcResult.getResult().getPayload();
+                    final var retVal = rpcResult.getResult().getPayload();
                     if (retVal.size() == inSize) {
-                        rpcOk++;
+                        ok++;
                     }
                     else {
-                        rpcError++;
+                        error++;
                     }
                 }
             } catch (InterruptedException | ExecutionException e) {
-                rpcError++;
+                error++;
                 LOG.error("Execution failed: ", e);
             }
         }
 
-        this.rpcOk.addAndGet(rpcOk);
-        this.rpcError.addAndGet(rpcError);
+        rpcOk.addAndGet(ok);
+        rpcError.addAndGet(error);
     }
 
     @Override
@@ -89,5 +86,4 @@ public class GlobalBindingRTCClient implements RTCClient {
         // TODO Auto-generated method stub
 
     }
-
 }
index edded6a835298b741adc092155530657915b6782..78d8e4a1ba49456e11740d338278ce657a0f16e3 100644 (file)
@@ -5,49 +5,31 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package rpcbenchmark.impl;
 
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchInput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBenchOutputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutputBuilder;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class GlobalBindingRTCServer implements RpcbenchPayloadService {
-
+final class GlobalBindingRTCServer extends AbstractRpcbenchPayloadService implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCServer.class);
-    private int numRpcs = 0;
 
-    public GlobalBindingRTCServer() {
-        LOG.debug("GlobalBindingRTCServer created.");
-    }
+    private final Registration reg;
 
-    @Override
-    public ListenableFuture<RpcResult<GlobalRpcBenchOutput>> globalRpcBench(final GlobalRpcBenchInput input) {
-        GlobalRpcBenchOutput output = new GlobalRpcBenchOutputBuilder(input).build();
-        RpcResult<GlobalRpcBenchOutput> result = RpcResultBuilder.success(output).build();
-        numRpcs++;
-        return Futures.immediateFuture(result);
+    GlobalBindingRTCServer(@Reference final RpcProviderService rpcProvider) {
+        reg = rpcProvider.registerRpcImplementations(
+            (GlobalRpcBench) this::globalRpcBench,
+            (RoutedRpcBench) this::routedRpcBench);
+        LOG.debug("GlobalBindingRTCServer started");
     }
 
     @Override
-    public ListenableFuture<RpcResult<RoutedRpcBenchOutput>> routedRpcBench(final RoutedRpcBenchInput input) {
-        RoutedRpcBenchOutput output = new RoutedRpcBenchOutputBuilder(input).build();
-        RpcResult<RoutedRpcBenchOutput> result = RpcResultBuilder.success(output).build();
-        numRpcs++;
-        return Futures.immediateFuture(result);
-    }
-
-    public int getNumRpcs() {
-        return numRpcs;
+    public void close() {
+        reg.close();
+        LOG.debug("GlobalBindingRTCServer stopped");
     }
 }
index 5565e037c0eb4fc3c2b0f2d4e1435d5f20ec85b1..d1b5edd726896976309f751e26c0a9687bc064fd 100644 (file)
@@ -5,12 +5,14 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package rpcbenchmark.impl;
 
 public interface RTCClient {
     long getRpcOk();
+
     long getRpcError();
+
     void runTest(int iterations);
+
     void close();
 }
diff --git a/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java b/benchmark/rpcbenchmark/src/main/java/rpcbenchmark/impl/RoutedBindingRTCServer.java
new file mode 100644 (file)
index 0000000..a638460
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015 Cisco Systems and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package rpcbenchmark.impl;
+
+import java.util.List;
+import java.util.Set;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.GlobalRpcBench;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+final class RoutedBindingRTCServer extends AbstractRpcbenchPayloadService implements AutoCloseable {
+    private final Registration reg;
+
+    RoutedBindingRTCServer(final RpcProviderService rpcProvider, final Set<InstanceIdentifier<?>> paths) {
+        reg = rpcProvider.registerRpcImplementations(List.of(
+            (GlobalRpcBench) this::globalRpcBench,
+            (RoutedRpcBench) this::routedRpcBench), paths);
+    }
+
+    @Override
+    public void close() {
+        reg.close();
+    }
+}
index 1c44b98a3701d7d7620c309a5f7ec2af25fdb5cf..bd5e83e8bf511286f0ff7f56c6042f8b64c0ce98 100644 (file)
@@ -5,92 +5,94 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package rpcbenchmark.impl;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicLong;
-
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBench;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInput;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchInputBuilder;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RoutedRpcBenchOutput;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.Payload;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadBuilder;
+import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.payload.PayloadKey;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public class RoutedBindingRTClient implements RTCClient {
-    private static final Logger LOG = LoggerFactory.getLogger(GlobalBindingRTCClient.class);
-    private final RpcbenchPayloadService service;
+    private static final Logger LOG = LoggerFactory.getLogger(RoutedBindingRTClient.class);
+    private final RoutedRpcBench routedRpcBench;
     private final AtomicLong rpcOk = new AtomicLong(0);
     private final AtomicLong rpcError = new AtomicLong(0);
-    private final List<RoutedRpcBenchInput> inVal;
+    private final List<RoutedRpcBenchInput> inVal = new ArrayList<>();
     private final int inSize;
 
-    public long getRpcOk() {
-        return rpcOk.get();
-    }
-
-    public long getRpcError() {
-        return rpcError.get();
-    }
-
-    public RoutedBindingRTClient(final RpcConsumerRegistry registry, final int inSize, final List<InstanceIdentifier<?>> routeIid) {
-        if (registry != null) {
-            this.service = registry.getRpcService(RpcbenchPayloadService.class);
-        } else {
-            this.service = null;
-        }
+    public RoutedBindingRTClient(final RpcService rpcService, final int inSize,
+            final List<InstanceIdentifier<?>> routeIid) {
+        routedRpcBench = rpcService.getRpc(RoutedRpcBench.class);
         this.inSize = inSize;
-        this.inVal = new ArrayList<>();
 
-        List<Payload> listVals = new ArrayList<>();
+        Builder<PayloadKey, Payload> listVals = ImmutableMap.builderWithExpectedSize(inSize);
         for (int i = 0; i < inSize; i++) {
-            listVals.add(new PayloadBuilder().setId(i).build());
+            final PayloadKey key = new PayloadKey(i);
+            listVals.put(key, new PayloadBuilder().withKey(key).build());
         }
 
         for (InstanceIdentifier<?> iid : routeIid) {
-            inVal.add(new RoutedRpcBenchInputBuilder().setNode(iid).setPayload(listVals).build());
+            inVal.add(new RoutedRpcBenchInputBuilder().setNode(iid).setPayload(listVals.build()).build());
         }
 
     }
 
+    @Override
+    public long getRpcOk() {
+        return rpcOk.get();
+    }
+
+    @Override
+    public long getRpcError() {
+        return rpcError.get();
+    }
+
+    @Override
     public void runTest(final int iterations) {
-        int rpcOk = 0;
-        int rpcError = 0;
+        int ok = 0;
+        int error = 0;
 
         int rpcServerCnt = inVal.size();
         for (int i = 0; i < iterations; i++) {
             RoutedRpcBenchInput input = inVal.get(ThreadLocalRandom.current().nextInt(rpcServerCnt));
-            Future<RpcResult<RoutedRpcBenchOutput>> output = service.routedRpcBench(input);
+            Future<RpcResult<RoutedRpcBenchOutput>> output = routedRpcBench.invoke(input);
             try {
                 RpcResult<RoutedRpcBenchOutput> rpcResult = output.get();
 
                 if (rpcResult.isSuccessful()) {
-                    List<Payload> retVal = rpcResult.getResult().getPayload();
+                    Map<PayloadKey, Payload> retVal = rpcResult.getResult().getPayload();
                     if (retVal.size() == inSize) {
-                        rpcOk++;
+                        ok++;
                     }
                     else {
-                        rpcError++;
+                        error++;
                     }
                 }
             } catch (InterruptedException | ExecutionException e) {
-                rpcError++;
+                error++;
                 LOG.error("Execution failed: ", e);
             }
         }
 
-        this.rpcOk.addAndGet(rpcOk);
-        this.rpcError.addAndGet(rpcError);
+        rpcOk.addAndGet(ok);
+        rpcError.addAndGet(error);
     }
 
     @Override
@@ -98,5 +100,4 @@ public class RoutedBindingRTClient implements RTCClient {
         // TODO Auto-generated method stub
 
     }
-
 }
index 8e8e31e97527ef09cb1749cb6b82cb0c64ec9df9..c785e9b384134eb023833364961ecc68566e1eca 100644 (file)
  */
 package rpcbenchmark.impl;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.NodeContext;
-import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchPayloadService;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.api.RpcService;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.RpcbenchRpcRoutes;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRoute;
 import org.opendaylight.yang.gen.v1.rpcbench.payload.rev150702.rpcbench.rpc.routes.RpcRouteKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.RpcbenchmarkService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.StartTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutput.ExecStatus;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.rpcbenchmark.rev150702.TestStatusOutputBuilder;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService {
-
+@Singleton
+@Component(service = {})
+@RequireServiceComponentRuntime
+public final class RpcbenchmarkProvider implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(RpcbenchmarkProvider.class);
-    private static final int testTimeout = 5;
+    private static final int TEST_TIMEOUT = 5;
 
-    private final GlobalBindingRTCServer globalServer;
     private final AtomicReference<ExecStatus> execStatus = new AtomicReference<>(ExecStatus.Idle);
-    private final RpcProviderRegistry providerRegistry;
-
-    public RpcbenchmarkProvider(final RpcProviderRegistry providerRegistry, final GlobalBindingRTCServer globalServer) {
-        this.providerRegistry = providerRegistry;
-        this.globalServer = globalServer;
-    }
-
-    public void init() {
+    private final RpcProviderService providerRegistry;
+    private final RpcService consumerRegistry;
+    private final GlobalBindingRTCServer globalServer;
+    private final Registration reg;
+
+    @Inject
+    @Activate
+    public RpcbenchmarkProvider(@Reference final RpcProviderService providerRegistry,
+            @Reference final RpcService consumerRegistry) {
+        this.providerRegistry = requireNonNull(providerRegistry);
+        this.consumerRegistry = requireNonNull(consumerRegistry);
+        globalServer = new GlobalBindingRTCServer(providerRegistry);
+        reg = providerRegistry.registerRpcImplementations((TestStatus) this::testStatus, (StartTest) this::startTest);
         LOG.info("RpcbenchmarkProvider initiated");
     }
 
     @Override
+    @Deactivate
+    @PreDestroy
     public void close() {
+        globalServer.close();
+        reg.close();
         LOG.info("RpcbenchmarkProvider closed");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
+    private ListenableFuture<RpcResult<StartTestOutput>> startTest(final StartTestInput input) {
         LOG.debug("startTest {}", input);
 
         final RTCClient client;
-        final List<RoutedRpcRegistration<?>> rpcRegs = new ArrayList<>();
+        RoutedBindingRTCServer routed = null;
 
         switch (input.getOperation()) {
-        case ROUTEDRTC:
-            List<InstanceIdentifier<?>> routeIid = new ArrayList<>();
-            for (int i = 0; i < input.getNumServers().intValue(); i++) {
-                GlobalBindingRTCServer server = new GlobalBindingRTCServer();
-                RoutedRpcRegistration<RpcbenchPayloadService> routedReg =
-                        providerRegistry.addRoutedRpcImplementation(RpcbenchPayloadService.class, server);
-
-                KeyedInstanceIdentifier<RpcRoute, RpcRouteKey> iid =
-                        InstanceIdentifier
-                            .create(RpcbenchRpcRoutes.class)
-                            .child(RpcRoute.class, new RpcRouteKey(Integer.toString(i)));
-                routeIid.add(iid);
-                routedReg.registerPath(NodeContext.class, iid);
-                rpcRegs.add(routedReg);
-            }
-
-            client = new RoutedBindingRTClient(providerRegistry, input.getPayloadSize().intValue(), routeIid);
-            break;
-
-        case GLOBALRTC:
-            client = new GlobalBindingRTCClient(providerRegistry, input.getPayloadSize().intValue());
-            break;
-
-        default:
-            LOG.error("Unsupported server/client type {}", input.getOperation());
-            throw new IllegalArgumentException("Unsupported server/client type" + input.getOperation());
+            case ROUTEDRTC:
+                List<InstanceIdentifier<?>> routeIid = new ArrayList<>();
+                for (int i = 0; i < input.getNumServers().intValue(); i++) {
+                    routeIid.add(InstanceIdentifier.create(RpcbenchRpcRoutes.class)
+                        .child(RpcRoute.class, new RpcRouteKey(Integer.toString(i))));
+                }
+
+                routed = new RoutedBindingRTCServer(providerRegistry, Set.copyOf(routeIid));
+                client = new RoutedBindingRTClient(consumerRegistry, input.getPayloadSize().intValue(), routeIid);
+                break;
+
+            case GLOBALRTC:
+                client = new GlobalBindingRTCClient(consumerRegistry, input.getPayloadSize().intValue());
+                break;
+
+            default:
+                LOG.error("Unsupported server/client type {}", input.getOperation());
+                throw new IllegalArgumentException("Unsupported server/client type" + input.getOperation());
         }
 
         try {
@@ -101,17 +114,18 @@ public class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService
             final Runnable testRun = () -> client.runTest(input.getIterations().intValue());
 
             LOG.info("Test Started");
-            long startTime = System.nanoTime();
+            final long startTime = System.nanoTime();
 
-            for (int i = 0; i < input.getNumClients().intValue(); i++ ) {
-                executor.submit(testRun);
+            for (int i = 0; i < input.getNumClients().intValue(); i++) {
+                // FIXME: fools RV_RETURN_VALUE_IGNORED_BAD_PRACTICE, but we should check more
+                verifyNotNull(executor.submit(testRun));
             }
 
             executor.shutdown();
             try {
-                executor.awaitTermination(testTimeout, TimeUnit.MINUTES);
+                executor.awaitTermination(TEST_TIMEOUT, TimeUnit.MINUTES);
             } catch (final InterruptedException e) {
-                LOG.error("Out of time: test did not finish within the {} min deadline ", testTimeout);
+                LOG.error("Out of time: test did not finish within the {} min deadline ", TEST_TIMEOUT);
             }
 
             long endTime = System.nanoTime();
@@ -120,25 +134,25 @@ public class RpcbenchmarkProvider implements AutoCloseable, RpcbenchmarkService
             long elapsedTime = endTime - startTime;
 
             StartTestOutput output = new StartTestOutputBuilder()
-                                            .setRate((long)0)
-                                            .setGlobalRtcClientError(client.getRpcError())
-                                            .setGlobalRtcClientOk(client.getRpcOk())
-                                            .setExecTime(TimeUnit.NANOSECONDS.toMillis(elapsedTime))
-                                            .setRate((client.getRpcOk() + client.getRpcError()) * 1000000000 / elapsedTime)
+                                            .setRate(Uint32.ZERO)
+                                            .setGlobalRtcClientError(Uint32.valueOf(client.getRpcError()))
+                                            .setGlobalRtcClientOk(Uint32.valueOf(client.getRpcOk()))
+                                            .setExecTime(Uint32.valueOf(TimeUnit.NANOSECONDS.toMillis(elapsedTime)))
+                                            .setRate(Uint32.valueOf(
+                                                (client.getRpcOk() + client.getRpcError()) * 1000000000 / elapsedTime))
                                             .build();
             return RpcResultBuilder.success(output).buildFuture();
         } finally {
-            for (RoutedRpcRegistration<?> routedRpcRegistration : rpcRegs) {
-                routedRpcRegistration.close();
+            if (routed != null) {
+                routed.close();
             }
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
+    private ListenableFuture<RpcResult<TestStatusOutput>> testStatus(final TestStatusInput input) {
         LOG.info("testStatus");
         TestStatusOutput output = new TestStatusOutputBuilder()
-                                        .setGlobalServerCnt((long)globalServer.getNumRpcs())
+                                        .setGlobalServerCnt(Uint32.valueOf(globalServer.getNumRpcs()))
                                         .setExecStatus(execStatus.get())
                                         .build();
         return RpcResultBuilder.success(output).buildFuture();
diff --git a/benchmark/rpcbenchmark/src/main/resources/OSGI-INF/blueprint/rpcbenchmark.xml b/benchmark/rpcbenchmark/src/main/resources/OSGI-INF/blueprint/rpcbenchmark.xml
deleted file mode 100644 (file)
index 96611d3..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-  <bean id="globalServer" class="rpcbenchmark.impl.GlobalBindingRTCServer"/>
-
-  <reference id="rpcRegistry" interface="org.opendaylight.controller.sal.binding.api.RpcProviderRegistry"/>
-
-  <bean id="provider" class="rpcbenchmark.impl.RpcbenchmarkProvider"
-          init-method="init" destroy-method="close">
-    <argument ref="rpcRegistry"/>
-    <argument ref="globalServer"/>
-  </bean>
-
-  <odl:rpc-implementation ref="globalServer"/>
-  <odl:rpc-implementation ref="provider"/>
-</blueprint>
diff --git a/bundle-parent/pom.xml b/bundle-parent/pom.xml
new file mode 100644 (file)
index 0000000..5d373d3
--- /dev/null
@@ -0,0 +1,141 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.mdsal</groupId>
+        <artifactId>bundle-parent</artifactId>
+        <version>13.0.1</version>
+        <relativePath/>
+    </parent>
+
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>bundle-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <packaging>pom</packaging>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.opendaylight.controller</groupId>
+                <artifactId>controller-artifacts</artifactId>
+                <version>9.0.3-SNAPSHOT</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+
+            <!-- Scala and its modules -->
+            <dependency>
+                <groupId>org.scala-lang</groupId>
+                <artifactId>scala-library</artifactId>
+                <version>2.13.13</version>
+            </dependency>
+            <dependency>
+                <groupId>org.scala-lang</groupId>
+                <artifactId>scala-reflect</artifactId>
+                <version>2.13.13</version>
+            </dependency>
+            <dependency>
+                <groupId>org.scala-lang.modules</groupId>
+                <artifactId>scala-java8-compat_2.13</artifactId>
+                <version>1.0.2</version>
+            </dependency>
+            <dependency>
+                <groupId>org.scala-lang.modules</groupId>
+                <artifactId>scala-parser-combinators_2.13</artifactId>
+                <version>1.1.2</version>
+            </dependency>
+            <dependency>
+                <groupId>org.scalatestplus</groupId>
+                <artifactId>junit-4-13_2.13</artifactId>
+                <version>3.2.13.0</version>
+                <scope>test</scope>
+            </dependency>
+
+            <!-- Configuration library -->
+            <!-- This needs to be kept in sync with the version used by akka -->
+            <dependency>
+                <groupId>com.typesafe</groupId>
+                <artifactId>config</artifactId>
+                <version>1.4.2</version>
+            </dependency>
+            <dependency>
+                <groupId>com.typesafe</groupId>
+                <artifactId>ssl-config-core_2.13</artifactId>
+                <version>0.4.3</version>
+            </dependency>
+
+            <!-- Akka testkit -->
+            <dependency>
+                <groupId>com.typesafe.akka</groupId>
+                <artifactId>akka-testkit_2.13</artifactId>
+                <version>2.6.21</version>
+                <scope>test</scope>
+                <exclusions>
+                    <exclusion>
+                        <groupId>com.typesafe.akka</groupId>
+                        <artifactId>akka-actor_2.13</artifactId>
+                    </exclusion>
+                </exclusions>
+            </dependency>
+            <dependency>
+                <groupId>com.typesafe.akka</groupId>
+                <artifactId>akka-actor-testkit-typed_2.13</artifactId>
+                <version>2.6.21</version>
+                <scope>test</scope>
+                <exclusions>
+                    <exclusion>
+                        <groupId>com.typesafe.akka</groupId>
+                        <artifactId>akka-actor-typed_2.13</artifactId>
+                    </exclusion>
+                    <exclusion>
+                        <groupId>com.typesafe.akka</groupId>
+                        <artifactId>akka-slf4j_2.13</artifactId>
+                    </exclusion>
+                </exclusions>
+            </dependency>
+            <dependency>
+                <groupId>com.typesafe.akka</groupId>
+                <artifactId>akka-persistence-tck_2.13</artifactId>
+                <version>2.6.21</version>
+                <scope>test</scope>
+                <exclusions>
+                    <exclusion>
+                        <groupId>com.typesafe.akka</groupId>
+                        <artifactId>akka-persistence_2.13</artifactId>
+                    </exclusion>
+                </exclusions>
+            </dependency>
+
+            <!-- Reactive Streams, used by Akka -->
+            <dependency>
+                <groupId>org.reactivestreams</groupId>
+                <artifactId>reactive-streams</artifactId>
+                <version>1.0.4</version>
+            </dependency>
+
+            <!-- Aeron, required by Akka -->
+            <dependency>
+                <groupId>org.agrona</groupId>
+                <artifactId>agrona</artifactId>
+                <version>1.15.2</version>
+            </dependency>
+            <dependency>
+                <groupId>io.aeron</groupId>
+                <artifactId>aeron-client</artifactId>
+                <version>1.38.1</version>
+            </dependency>
+            <dependency>
+                <groupId>io.aeron</groupId>
+                <artifactId>aeron-driver</artifactId>
+                <version>1.38.1</version>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+</project>
index 925316abfa248333bf07605e24fc4a9ad928c78e..7093f9de2a3382369a905f654211a608fcc12041 100644 (file)
 # http://www.eclipse.org/legal/epl-v10.html
 ##############################################################################
 
+import xml.etree.ElementTree as ET
+
 from docs_conf.conf import *
 
+data = ET.parse('pom.xml')
+project_version = data.getroot().find('./{http://maven.apache.org/POM/4.0.0}version').text
+version = project_version
+release = project_version
index e9fd0669b077802d0109b657e56022f08bfe6a68..dc9cd8fcf4cf35886a5c7c62cb94004232064e81 100644 (file)
@@ -1,4 +1,2 @@
 project_cfg: opendaylight
 project: Controller
-version: Neon
-
index 58e3b0df680d34358e90636e2137cae56c1a9b01..686cfefcde3dd6bc82db98bfd683e2738ef5ff9e 100644 (file)
@@ -28,11 +28,6 @@ The OpenDaylight Controller relies on the following technologies:
 The OpenDaylight Controller provides following model-driven subsystems
 as a foundation for Java applications:
 
--  :ref:`config_subsystem` - an activation,
-   dependency-injection and configuration framework, which allows
-   two-phase commits of configuration and dependency-injection, and
-   allows for run-time rewiring.
-
 -  :ref:`MD-SAL <mdsal_dev_guide>` - messaging and data storage
    functionality for data, notifications and RPCs modeled by application
    developers. MD-SAL uses YANG as the modeling for both interface and
@@ -885,15 +880,15 @@ RESTCONF operations overview
 RESTCONF supports **OPTIONS**, **GET**, **PUT**, **POST**, and
 **DELETE** operations. Request and response data can either be in the
 XML or JSON format. XML structures according to yang are defined at:
-`XML-YANG <http://tools.ietf.org/html/rfc6020>`__. JSON structures are
+`XML-YANG <https://www.rfc-editor.org/rfc/rfc6020>`__. JSON structures are
 defined at:
-`JSON-YANG <http://tools.ietf.org/html/draft-lhotka-netmod-yang-json-02>`__.
+`JSON-YANG <https://datatracker.ietf.org/doc/html/draft-lhotka-netmod-yang-json-02>`__.
 Data in the request must have a correctly set **Content-Type** field in
 the http header with the allowed value of the media type. The media type
 of the requested data has to be set in the **Accept** field. Get the
 media types for each resource by calling the OPTIONS operation. Most of
 the paths of the pathsRestconf endpoints use `Instance
-Identifier <https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
+Identifier <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Concepts#Instance_Identifier>`__.
 ``<identifier>`` is used in the explanation of the operations.
 
 | **<identifier>**
@@ -915,7 +910,7 @@ Identifier <https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Co
      be known which node is X (for example: C:X). For more details about
      encoding, see: `RESTCONF 02 - Encoding YANG Instance Identifiers in
      the Request
-     URI. <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02#section-5.3.1>`__
+     URI. <https://datatracker.ietf.org/doc/html/draft-bierman-netconf-restconf-02#section-5.3.1>`__
 
 Mount point
 ~~~~~~~~~~~
@@ -927,7 +922,7 @@ Mount point
   point itself by using <identifier>/**yang-ext:mount**.
 | More information on how to actually use mountpoints is available at:
   `OpenDaylight
-  Controller:Config:Examples:Netconf <https://wiki.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
+  Controller:Config:Examples:Netconf <https://wiki-archive.opendaylight.org/view/OpenDaylight_Controller:Config:Examples:Netconf>`__.
 
 HTTP methods
 ~~~~~~~~~~~~
@@ -1091,7 +1086,7 @@ DELETE /restconf/config/<identifier>
 -  <identifier> points to a data node which must be removed.
 
 More information is available in the `RESTCONF
-RFC <http://tools.ietf.org/html/draft-bierman-netconf-restconf-02>`__.
+RFC <https://datatracker.ietf.org/doc/html/draft-bierman-netconf-restconf-02>`__.
 
 How RESTCONF works
 ~~~~~~~~~~~~~~~~~~
@@ -1370,538 +1365,3 @@ Something practical
 
     Status: 200 OK
 
-Websocket change event notification subscription tutorial
----------------------------------------------------------
-
-Subscribing to data change notifications makes it possible to obtain
-notifications about data manipulation (insert, change, delete) which are
-done on any specified **path** of any specified **datastore** with
-specific **scope**. In following examples *{odlAddress}* is address of
-server where ODL is running and *{odlPort}* is port on which
-OpenDaylight is running.
-
-Websocket notifications subscription process
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In this section we will learn what steps need to be taken in order to
-successfully subscribe to data change event notifications.
-
-Create stream
-^^^^^^^^^^^^^
-
-In order to use event notifications you first need to call RPC that
-creates notification stream that you can later listen to. You need to
-provide three parameters to this RPC:
-
--  **path**: data store path that you plan to listen to. You can
-   register listener on containers, lists and leaves.
-
--  **datastore**: data store type. *OPERATIONAL* or *CONFIGURATION*.
-
--  **scope**: Represents scope of data change. Possible options are:
-
-   -  BASE: only changes directly to the data tree node specified in the
-      path will be reported
-
-   -  ONE: changes to the node and to direct child nodes will be
-      reported
-
-   -  SUBTREE: changes anywhere in the subtree starting at the node will
-      be reported
-
-The RPC to create the stream can be invoked via RESTCONF like this:
-
--  URI:
-   http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription
-
--  HEADER: Content-Type=application/json
-
--  OPERATION: POST
-
--  DATA:
-
-   .. code:: json
-
-       {
-           "input": {
-               "path": "/toaster:toaster/toaster:toasterStatus",
-               "sal-remote-augment:datastore": "OPERATIONAL",
-               "sal-remote-augment:scope": "ONE"
-           }
-       }
-
-The response should look something like this:
-
-.. code:: json
-
-    {
-        "output": {
-            "stream-name": "data-change-event-subscription/toaster:toaster/toaster:toasterStatus/datastore=CONFIGURATION/scope=SUBTREE"
-        }
-    }
-
-**stream-name** is important because you will need to use it when you
-subscribe to the stream in the next step.
-
-.. note::
-
-    Internally, this will create a new listener for *stream-name* if it
-    did not already exist.
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-In order to subscribe to stream and obtain WebSocket location you need
-to call *GET* on your stream path. The URI should generally be
-http://{odlAddress}:{odlPort}/restconf/streams/stream/{streamName},
-where *{streamName}* is the *stream-name* parameter contained in
-response from *create-data-change-event-subscription* RPC from the
-previous step.
-
--  URI:
-   http://{odlAddress}:{odlPort}/restconf/streams/stream/data-change-event-subscription/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE
-
--  OPERATION: GET
-
-The subscription call may be modified with the following query parameters defined in the RESTCONF RFC:
-
--  `filter <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.6>`__
-
--  `start-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.7>`__
-
--  `end-time <https://tools.ietf.org/html/draft-ietf-netconf-restconf-05#section-4.8.8>`__
-
-In addition, the following ODL extension query parameter is supported:
-
-:odl-leaf-nodes-only:
-  If this parameter is set to "true", create and update notifications will only
-  contain the leaf nodes modified instead of the entire subscription subtree.
-  This can help in reducing the size of the notifications.
-
-The expected response status is 200 OK and response body should be
-empty. You will get your WebSocket location from **Location** header of
-response. For example in our particular toaster example location header
-would have this value:
-*ws://{odlAddress}:8185/toaster:toaster/datastore=CONFIGURATION/scope=SUBTREE*
-
-.. note::
-
-    During this phase there is an internal check for to see if a
-    listener for the *stream-name* from the URI exists. If not, new a
-    new listener is registered with the DOM data broker.
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-You should now have a data change notification stream created and have
-location of a WebSocket. You can use this WebSocket to listen to data
-change notifications. To listen to notifications you can use a
-JavaScript client or if you are using chrome browser you can use the
-`Simple WebSocket
-Client <https://chrome.google.com/webstore/detail/simple-websocket-client/pfdhoblngboilpfeibdedpjgfnlcodoo>`__.
-
-Also, for testing purposes, there is simple Java application named
-WebSocketClient. The application is placed in the
-*-sal-rest-connector-classes.class* project. It accepts a WebSocket URI
-as and input parameter. After starting the utility (WebSocketClient
-class directly in Eclipse/InteliJ Idea) received notifications should be
-displayed in console.
-
-Notifications are always in XML format and look like this:
-
-.. code:: xml
-
-    <notification xmlns="urn:ietf:params:xml:ns:netconf:notification:1.0">
-        <eventTime>2014-09-11T09:58:23+02:00</eventTime>
-        <data-changed-notification xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote">
-            <data-change-event>
-                <path xmlns:meae="http://netconfcentral.org/ns/toaster">/meae:toaster</path>
-                <operation>updated</operation>
-                <data>
-                   <!-- updated data -->
-                </data>
-            </data-change-event>
-        </data-changed-notification>
-    </notification>
-
-Example use case
-~~~~~~~~~~~~~~~~
-
-The typical use case is listening to data change events to update web
-page data in real-time. In this tutorial we will be using toaster as the
-base.
-
-When you call *make-toast* RPC, it sets *toasterStatus* to "down" to
-reflect that the toaster is busy making toast. When it finishes,
-*toasterStatus* is set to "up" again. We will listen to this toaster
-status changes in data store and will reflect it on our web page in
-real-time thanks to WebSocket data change notification.
-
-Simple javascript client implementation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-We will create simple JavaScript web application that will listen
-updates on *toasterStatus* leaf and update some element of our web page
-according to new toaster status state.
-
-Create stream
-^^^^^^^^^^^^^
-
-First you need to create stream that you are planing to subscribe to.
-This can be achieved by invoking "create-data-change-event-subscription"
-RPC on RESTCONF via AJAX request. You need to provide data store
-**path** that you plan to listen on, **data store type** and **scope**.
-If the request is successful you can extract the **stream-name** from
-the response and use that to subscribe to the newly created stream. The
-*{username}* and *{password}* fields represent your credentials that you
-use to connect to OpenDaylight via RESTCONF:
-
-.. note::
-
-    The default user name and password are "admin".
-
-.. code:: javascript
-
-    function createStream() {
-        $.ajax(
-            {
-                url: 'http://{odlAddress}:{odlPort}/restconf/operations/sal-remote:create-data-change-event-subscription',
-                type: 'POST',
-                headers: {
-                  'Authorization': 'Basic ' + btoa('{username}:{password}'),
-                  'Content-Type': 'application/json'
-                },
-                data: JSON.stringify(
-                    {
-                        'input': {
-                            'path': '/toaster:toaster/toaster:toasterStatus',
-                            'sal-remote-augment:datastore': 'OPERATIONAL',
-                            'sal-remote-augment:scope': 'ONE'
-                        }
-                    }
-                )
-            }).done(function (data) {
-                // this function will be called when ajax call is executed successfully
-                subscribeToStream(data.output['stream-name']);
-            }).fail(function (data) {
-                // this function will be called when ajax call fails
-                console.log("Create stream call unsuccessful");
-            })
-    }
-
-Subscribe to stream
-^^^^^^^^^^^^^^^^^^^
-
-The Next step is to subscribe to the stream. To subscribe to the stream
-you need to call *GET* on
-*http://{odlAddress}:{odlPort}/restconf/streams/stream/{stream-name}*.
-If the call is successful, you get WebSocket address for this stream in
-**Location** parameter inside response header. You can get response
-header by calling *getResponseHeader(\ *Location*)* on HttpRequest
-object inside *done()* function call:
-
-.. code:: javascript
-
-    function subscribeToStream(streamName) {
-        $.ajax(
-            {
-                url: 'http://{odlAddress}:{odlPort}/restconf/streams/stream/' + streamName;
-                type: 'GET',
-                headers: {
-                  'Authorization': 'Basic ' + btoa('{username}:{password}'),
-                }
-            }
-        ).done(function (data, textStatus, httpReq) {
-            // we need function that has http request object parameter in order to access response headers.
-            listenToNotifications(httpReq.getResponseHeader('Location'));
-        }).fail(function (data) {
-            console.log("Subscribe to stream call unsuccessful");
-        });
-    }
-
-Receive notifications
-^^^^^^^^^^^^^^^^^^^^^
-
-Once you got WebSocket server location you can now connect to it and
-start receiving data change events. You need to define functions that
-will handle events on WebSocket. In order to process incoming events
-from OpenDaylight you need to provide a function that will handle
-*onmessage* events. The function must have one parameter that represents
-the received event object. The event data will be stored in
-*event.data*. The data will be in an XML format that you can then easily
-parse using jQuery.
-
-.. code:: javascript
-
-    function listenToNotifications(socketLocation) {
-        try {
-            var notificatinSocket = new WebSocket(socketLocation);
-
-            notificatinSocket.onmessage = function (event) {
-                // we process our received event here
-                console.log('Received toaster data change event.');
-                $($.parseXML(event.data)).find('data-change-event').each(
-                    function (index) {
-                        var operation = $(this).find('operation').text();
-                        if (operation == 'updated') {
-                            // toaster status was updated so we call function that gets the value of toasterStatus leaf
-                            updateToasterStatus();
-                            return false;
-                        }
-                    }
-                );
-            }
-            notificatinSocket.onerror = function (error) {
-                console.log("Socket error: " + error);
-            }
-            notificatinSocket.onopen = function (event) {
-                console.log("Socket connection opened.");
-            }
-            notificatinSocket.onclose = function (event) {
-                console.log("Socket connection closed.");
-            }
-            // if there is a problem on socket creation we get exception (i.e. when socket address is incorrect)
-        } catch(e) {
-            alert("Error when creating WebSocket" + e );
-        }
-    }
-
-The *updateToasterStatus()* function represents function that calls
-*GET* on the path that was modified and sets toaster status in some web
-page element according to received data. After the WebSocket connection
-has been established you can test events by calling make-toast RPC via
-RESTCONF.
-
-.. note::
-
-    for more information about WebSockets in JavaScript visit `Writing
-    WebSocket client
-    applications <https://developer.mozilla.org/en-US/docs/WebSockets/Writing_WebSocket_client_applications>`__
-
-.. _config_subsystem:
-
-Config Subsystem
-----------------
-
-Overview
-~~~~~~~~
-
-The Controller configuration operation has three stages:
-
--  First, a Proposed configuration is created. Its target is to replace
-   the old configuration.
-
--  Second, the Proposed configuration is validated, and then committed.
-   If it passes validation successfully, the Proposed configuration
-   state will be changed to Validated.
-
--  Finally, a Validated configuration can be Committed, and the affected
-   modules can be reconfigured.
-
-In fact, each configuration operation is wrapped in a transaction. Once
-a transaction is created, it can be configured, that is to say, a user
-can abort the transaction during this stage. After the transaction
-configuration is done, it is committed to the validation stage. In this
-stage, the validation procedures are invoked. If one or more validations
-fail, the transaction can be reconfigured. Upon success, the second
-phase commit is invoked. If this commit is successful, the transaction
-enters the last stage, committed. After that, the desired modules are
-reconfigured. If the second phase commit fails, it means that the
-transaction is unhealthy - basically, a new configuration instance
-creation failed, and the application can be in an inconsistent state.
-
-.. figure:: ./images/configuration.jpg
-   :alt: Configuration states
-
-   Configuration states
-
-.. figure:: ./images/Transaction.jpg
-   :alt: Transaction states
-
-   Transaction states
-
-Validation
-~~~~~~~~~~
-
-To secure the consistency and safety of the new configuration and to
-avoid conflicts, the configuration validation process is necessary.
-Usually, validation checks the input parameters of a new configuration,
-and mostly verifies module-specific relationships. The validation
-procedure results in a decision on whether the proposed configuration is
-healthy.
-
-Dependency resolver
-~~~~~~~~~~~~~~~~~~~
-
-Since there can be dependencies between modules, a change in a module
-configuration can affect the state of other modules. Therefore, we need
-to verify whether dependencies on other modules can be resolved. The
-Dependency Resolver acts in a manner similar to dependency injectors.
-Basically, a dependency tree is built.
-
-APIs and SPIs
-~~~~~~~~~~~~~
-
-This section describes configuration system APIs and SPIs.
-
-SPIs
-^^^^
-
-**Module** org.opendaylight.controller.config.spi. Module is the common
-interface for all modules: every module must implement it. The module is
-designated to hold configuration attributes, validate them, and create
-instances of service based on the attributes. This instance must
-implement the AutoCloseable interface, owing to resources clean up. If
-the module was created from an already running instance, it contains an
-old instance of the module. A module can implement multiple services. If
-the module depends on other modules, setters need to be annotated with
-@RequireInterface.
-
-**Module creation**
-
-1. The module needs to be configured, set with all required attributes.
-
-2. The module is then moved to the commit stage for validation. If the
-   validation fails, the module attributes can be reconfigured.
-   Otherwise, a new instance is either created, or an old instance is
-   reconfigured. A module instance is identified by ModuleIdentifier,
-   consisting of the factory name and instance name.
-
-| **ModuleFactory** org.opendaylight.controller.config.spi. The
-  ModuleFactory interface must be implemented by each module factory.
-| A module factory can create a new module instance in two ways:
-
--  From an existing module instance
-
--  | An entirely new instance
-   | ModuleFactory can also return default modules, useful for
-     populating registry with already existing configurations. A module
-     factory implementation must have a globally unique name.
-
-APIs
-^^^^
-
-+--------------------------------------+--------------------------------------+
-| ConfigRegistry                       | Represents functionality provided by |
-|                                      | a configuration transaction (create, |
-|                                      | destroy module, validate, or abort   |
-|                                      | transaction).                        |
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionController          | Represents functionality for         |
-|                                      | manipulating with configuration      |
-|                                      | transactions (begin, commit config). |
-+--------------------------------------+--------------------------------------+
-| RuntimeBeanRegistratorAwareConfiBean | The module implementing this         |
-|                                      | interface will receive               |
-|                                      | RuntimeBeanRegistrator before        |
-|                                      | getInstance is invoked.              |
-+--------------------------------------+--------------------------------------+
-
-Runtime APIs
-^^^^^^^^^^^^
-
-+--------------------------------------+--------------------------------------+
-| RuntimeBean                          | Common interface for all runtime     |
-|                                      | beans                                |
-+--------------------------------------+--------------------------------------+
-| RootRuntimeBeanRegistrator           | Represents functionality for root    |
-|                                      | runtime bean registration, which     |
-|                                      | subsequently allows hierarchical     |
-|                                      | registrations                        |
-+--------------------------------------+--------------------------------------+
-| HierarchicalRuntimeBeanRegistration  | Represents functionality for runtime |
-|                                      | bean registration and                |
-|                                      | unreregistration from hierarchy      |
-+--------------------------------------+--------------------------------------+
-
-JMX APIs
-^^^^^^^^
-
-| JMX API is purposed as a transition between the Client API and the JMX
-  platform.
-
-+--------------------------------------+--------------------------------------+
-| ConfigTransactionControllerMXBean    | Extends ConfigTransactionController, |
-|                                      | executed by Jolokia clients on       |
-|                                      | configuration transaction.           |
-+--------------------------------------+--------------------------------------+
-| ConfigRegistryMXBean                 | Represents entry point of            |
-|                                      | configuration management for         |
-|                                      | MXBeans.                             |
-+--------------------------------------+--------------------------------------+
-| Object names                         | Object Name is the pattern used in   |
-|                                      | JMX to locate JMX beans. It consists |
-|                                      | of domain and key properties (at     |
-|                                      | least one key-value pair). Domain is |
-|                                      | defined as                           |
-|                                      | "org.opendaylight.controller". The   |
-|                                      | only mandatory property is "type".   |
-+--------------------------------------+--------------------------------------+
-
-Use case scenarios
-^^^^^^^^^^^^^^^^^^
-
-| A few samples of successful and unsuccessful transaction scenarios
-  follow:
-
-**Successful commit scenario**
-
-1.  The user creates a transaction calling creteTransaction() method on
-    ConfigRegistry.
-
-2.  ConfigRegisty creates a transaction controller, and registers the
-    transaction as a new bean.
-
-3.  Runtime configurations are copied to the transaction. The user can
-    create modules and set their attributes.
-
-4.  The configuration transaction is to be committed.
-
-5.  The validation process is performed.
-
-6.  After successful validation, the second phase commit begins.
-
-7.  Modules proposed to be destroyed are destroyed, and their service
-    instances are closed.
-
-8.  Runtime beans are set to registrator.
-
-9.  The transaction controller invokes the method getInstance on each
-    module.
-
-10. The transaction is committed, and resources are either closed or
-    released.
-
-| **Validation failure scenario**
-| The transaction is the same as the previous case until the validation
-  process.
-
-1. If validation fails, (that is to day, illegal input attributes values
-   or dependency resolver failure), the validationException is thrown
-   and exposed to the user.
-
-2. The user can decide to reconfigure the transaction and commit again,
-   or abort the current transaction.
-
-3. On aborted transactions, TransactionController and JMXRegistrator are
-   properly closed.
-
-4. Unregistration event is sent to ConfigRegistry.
-
-Default module instances
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-The configuration subsystem provides a way for modules to create default
-instances. A default instance is an instance of a module, that is
-created at the module bundle start-up (module becomes visible for
-configuration subsystem, for example, its bundle is activated in the
-OSGi environment). By default, no default instances are produced.
-
-The default instance does not differ from instances created later in the
-module life-cycle. The only difference is that the configuration for the
-default instance cannot be provided by the configuration subsystem. The
-module has to acquire the configuration for these instances on its own.
-It can be acquired from, for example, environment variables. After the
-creation of a default instance, it acts as a regular instance and fully
-participates in the configuration subsystem (It can be reconfigured or
-deleted in following transactions.).
diff --git a/docs/images/Get.png b/docs/images/Get.png
new file mode 100644 (file)
index 0000000..5c1f484
Binary files /dev/null and b/docs/images/Get.png differ
diff --git a/docs/images/Put.png b/docs/images/Put.png
new file mode 100644 (file)
index 0000000..bfcaa87
Binary files /dev/null and b/docs/images/Put.png differ
index 4b2399c2eecedde8a4d99c635feac37d7e210696..22c11bcb3cb9aaf90e66592d343e3f425deaccf9 100644 (file)
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>odlparent</artifactId>
-        <version>4.0.9</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>controller-docs</artifactId>
     <packaging>jar</packaging>
-    <version>0.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <name>${project.artifactId}</name>
     <description>Controller documentation</description>
 
         <dependencies>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
-                <artifactId>config-artifacts</artifactId>
-                <version>0.11.0-SNAPSHOT</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-            <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>1.10.0-SNAPSHOT</version>
+                <artifactId>controller-artifacts</artifactId>
+                <version>${project.version}</version>
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
     </dependencyManagement>
 
     <dependencies>
-        <!-- Config Subsystem remnants -->
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-config-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-event-executor-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-threadgroup-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>netty-timer-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>threadpool-config-impl</artifactId>
-        </dependency>
-
-        <!-- MD-SAL artifacts -->
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-util</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-core-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-core-spi</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-core-compat</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-broker-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-binding-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-binding-broker-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-binding-util</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-connector-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-inmemory-datastore</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-dom-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-binding-impl</artifactId>
-        </dependency>
-
-        <!-- Base model augmentations -->
-        <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-inventory</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-topology</artifactId>
-        </dependency>
-
         <!-- Clustered implementation -->
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-clustering-commons</artifactId>
         </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-distributed-datastore</artifactId>
             <artifactId>cds-dom-api</artifactId>
         </dependency>
 
-        <!-- MessageBus -->
+        <!-- Third-party dependencies -->
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-api</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-spi</artifactId>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-impl</artifactId>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-util</artifactId>
+            <groupId>org.kohsuke.metainf-services</groupId>
+            <artifactId>metainf-services</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.framework</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.metatype.annotations</artifactId>
         </dependency>
     </dependencies>
 
             </plugin>
             <plugin>
                 <artifactId>maven-javadoc-plugin</artifactId>
-                    <!-- FIXME: remove override once odlparent ships 3.1.0+ -->
-                <version>3.0.0</version>
                 <executions>
                     <execution>
                         <id>attach-javadocs</id>
                 <!-- FIXME: remove this section once we can activate javadoc-links profile -->
                 <configuration combine.children="append">
                     <links>
-                        <link>https://junit.org/junit4/javadoc/4.11/</link>
-                        <link>http://hamcrest.org/JavaHamcrest/javadoc/1.3/</link>
-                        <link>http://google.github.io/truth/api/0.42/</link>
+                        <link>https://junit.org/junit4/javadoc/4.13/</link>
+                        <link>https://hamcrest.org/JavaHamcrest/javadoc/2.2/</link>
                         <link>https://www.slf4j.org/apidocs/</link>
-                        <link>https://xerces.apache.org/xerces2-j/javadocs/api/</link>
-                        <link>https://google.github.io/guava/releases/25.1-jre/api/docs/</link>
-                        <link>http://doc.akka.io/japi/akka/2.5.19/</link>
-                        <link>http://netty.io/4.1/api/</link>
-                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/</link>
-                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-3.8.1/</link>
-                        <link>https://commons.apache.org/proper/commons-codec/apidocs/</link>
+                        <link>https://guava.dev/releases/32.0.1-jre/api/docs/</link>
+                        <link>https://doc.akka.io/japi/akka/2.6/</link>
+                        <link>https://netty.io/4.1/api/</link>
+                        <link>https://commons.apache.org/proper/commons-lang/javadocs/api-release/</link>
+
+                        <link>https://www.javadoc.io/doc/org.opendaylight.odlparent/odlparent-docs/13.0.11/</link>
+                        <link>https://www.javadoc.io/doc/org.opendaylight.yangtools/yangtools-docs/13.0.2/</link>
+                        <link>https://www.javadoc.io/doc/org.opendaylight.mdsal/mdsal-docs/13.0.1/</link>
                     </links>
                     <groups>
                         <group>
                             <title>Distributed Datastore DOM API extensions</title>
                             <packages>org.opendaylight.controller.cluster.dom.api</packages>
                         </group>
-                        <group>
-                            <title>MD-SAL Common API (deprecated)</title>
-                            <packages>org.opendaylight.controller.md.sal.common.*</packages>
-                        </group>
-                        <group>
-                            <title>MD-SAL Low-level (DOM) API (deprecated)</title>
-                            <packages>org.opendaylight.controller.md.sal.dom.api:org.opendaylight.controller.md.sal.dom.spi:org.opendaylight.controller.sal.core.*</packages>
-                        </group>
-                        <group>
-                            <title>MD-SAL Binding API (deprecated)</title>
-                            <packages>org.opendaylight.controller.sal.binding.api*:org.opendaylight.controller.md.sal.binding.api*:org.opendaylight.controller.md.sal.binding.spi:org.opendaylight.controller.md.sal.binding.compat:org.opendaylight.controller.md.sal.binding.util</packages>
-                        </group>
                         <group>
                             <title>Akka RAFT implementation</title>
                             <packages>org.opendaylight.controller.cluster.raft*</packages>
                         </group>
-                        <group>
-                            <title>MD-SAL Message Bus Bridge (experimental)</title>
-                            <packages>org.opendaylight.controller.messagebus.*:org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</packages>
-                        </group>
                         <group>
                             <title>MD-SAL Tracing Utilities</title>
                             <packages>org.opendaylight.controller.md.sal.trace.*:org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908*</packages>
diff --git a/features/benchmark/features-mdsal-benchmark/pom.xml b/features/benchmark/features-mdsal-benchmark/pom.xml
deleted file mode 100644 (file)
index 14021d1..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>feature-repo-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-mdsal-benchmark</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-benchmark-api</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-dsbenchmark</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-benchmark</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-ntfbenchmark</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/benchmark/pom.xml b/features/benchmark/pom.xml
deleted file mode 100644 (file)
index 1e6f2dc..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems and others. All rights reserved.
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-mdsal-benchmark-aggregator</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
-    <packaging>pom</packaging>
-    <name>${project.artifactId}</name>
-
-    <modules>
-        <module>features-mdsal-benchmark</module>
-        <module>odl-benchmark-api</module>
-        <module>odl-dsbenchmark</module>
-        <module>odl-mdsal-benchmark</module>
-        <module>odl-ntfbenchmark</module>
-    </modules>
-</project>
diff --git a/features/config-netty/features-config-netty/pom.xml b/features/config-netty/features-config-netty/pom.xml
deleted file mode 100644 (file)
index 2cbf46e..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>feature-repo-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-config-netty</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-config-netty</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/config-netty/odl-config-netty/pom.xml b/features/config-netty/odl-config-netty/pom.xml
deleted file mode 100644 (file)
index 7e382ee..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-config-netty</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <name>OpenDaylight :: Config-Netty</name>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>config-artifacts</artifactId>
-                <version>${project.version}</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-netty-4</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-guava</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>netty-event-executor-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>netty-threadgroup-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>netty-timer-config</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>threadpool-config-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>threadpool-config-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>blueprint</artifactId>
-            <version>0.11.0-SNAPSHOT</version>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/config-netty/odl-config-netty/src/main/feature/feature.xml b/features/config-netty/odl-config-netty/src/main/feature/feature.xml
deleted file mode 100644 (file)
index e4048a9..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!--
- Copyright © 2018 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-config-netty">
-    <feature name="odl-config-netty">
-        <feature version="[4,5)">odl-guava</feature>
-        <feature version="[4,5)">odl-netty-4</feature>
-    </feature>
-</features>
diff --git a/features/extras/odl-extras-all/pom.xml b/features/extras/odl-extras-all/pom.xml
deleted file mode 100644 (file)
index c4326f5..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-extras-all</artifactId>
-    <version>1.13.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <name>OpenDaylight :: Extras :: All</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-jolokia</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/extras/odl-jolokia/src/main/feature/feature.xml b/features/extras/odl-jolokia/src/main/feature/feature.xml
deleted file mode 100644 (file)
index 7885b02..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<features name="odl-extras-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
-    <feature name="odl-jolokia" version="${project.version}">
-        <configfile finalname="/etc/org.jolokia.osgi.cfg">mvn:org.opendaylight.controller/odl-jolokia-osgi/${odl-jolokia-osgi.version}/cfg/configuration</configfile>
-        <feature>http</feature>
-    </feature>
-</features>
diff --git a/features/extras/pom.xml b/features/extras/pom.xml
deleted file mode 100644 (file)
index 2311080..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-extras-aggregator</artifactId>
-    <version>1.13.0-SNAPSHOT</version>
-    <packaging>pom</packaging>
-
-    <modules>
-        <module>features-extras</module>
-        <module>odl-extras-all</module>
-        <module>odl-jolokia</module>
-    </modules>
-
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=summary</url>
-    </scm>
-</project>
similarity index 55%
rename from features/extras/features-extras/pom.xml
rename to features/features-controller-experimental/pom.xml
index d7c78e83154a73206eb437a5a06c7038180b9bb9..6401adb5fe235bc7a5e3d2fd0b95a59e4c3665f5 100644 (file)
@@ -8,34 +8,38 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>feature-repo-parent</artifactId>
-        <version>4.0.9</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-extras</artifactId>
-    <version>1.13.0-SNAPSHOT</version>
+    <artifactId>features-controller-experimental</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
+    <name>OpenDaylight :: Controller :: Experimental Features</name>
+    <description>Controller Experimental Features</description>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.opendaylight.controller</groupId>
+                <artifactId>controller-artifacts</artifactId>
+                <version>${project.version}</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-extras-all</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-jolokia</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-toaster</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
     </dependencies>
-
 </project>
similarity index 61%
rename from features/mdsal/odl-mdsal-model-inventory/pom.xml
rename to features/features-controller-testing/pom.xml
index 22c3aa9e918ca9fb9899a09d7e976dbaabb9ba5c..fb095fe8a15a78ea5c7dd237bde0d96266b68f87 100644 (file)
@@ -8,33 +8,25 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
+        <artifactId>feature-repo-parent</artifactId>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-model-inventory</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <artifactId>features-controller-testing</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
-
-    <name>OpenDaylight :: MD-SAL :: Inventory Model</name>
+    <name>OpenDaylight :: Controller :: Features to support CSIT testing</name>
+    <description>Controller CSIT Features</description>
 
     <dependencyManagement>
         <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
+                <artifactId>controller-artifacts</artifactId>
                 <version>${project.version}</version>
                 <type>pom</type>
                 <scope>import</scope>
 
     <dependencies>
         <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>odl-mdsal-model-rfc6991</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-clustering-test-app</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
-
         <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-inventory</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-benchmark</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
     </dependencies>
 </project>
similarity index 53%
rename from features/mdsal/odl-message-bus-collector/pom.xml
rename to features/features-controller/pom.xml
index 17c5b8ad5bacd3da2d5d1d6984e18f1e3fbec7b1..64d72cf7c7ac45ab52ee7d683551303c01584440 100644 (file)
@@ -8,57 +8,45 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
+        <artifactId>feature-repo-parent</artifactId>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-message-bus-collector</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <artifactId>features-controller</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>feature</packaging>
-
-    <properties>
-        <config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
-    </properties>
+    <name>OpenDaylight :: Controller :: Features</name>
+    <description>Controller Production Features</description>
 
     <dependencyManagement>
         <dependencies>
             <dependency>
                 <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
+                <artifactId>controller-artifacts</artifactId>
                 <version>${project.version}</version>
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
         </dependencies>
     </dependencyManagement>
 
     <dependencies>
-        <!-- FIXME: Bug 4202: Switch to MD-SAL provided odl-mdsal-binding-base -->
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-mdsal-model-inventory</artifactId>
+            <artifactId>odl-controller-akka</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-controller-scala</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
-
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-mdsal-broker</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-api</artifactId>
+            <artifactId>odl-controller-broker-local</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-spi</artifactId>
+            <artifactId>odl-mdsal-clustering-commons</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-util</artifactId>
+            <artifactId>odl-controller-mdsal-common</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-impl</artifactId>
+            <artifactId>odl-mdsal-distributed-datastore</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-remoterpc-connector</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-controller-blueprint</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-jolokia</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
     </dependencies>
 </project>
diff --git a/features/mdsal/features-mdsal/pom.xml b/features/mdsal/features-mdsal/pom.xml
deleted file mode 100644 (file)
index 3b6fba4..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>feature-repo-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-mdsal</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-clustering-test-app</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-all</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-clustering</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-clustering-commons</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-controller-mdsal-common</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-distributed-datastore</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-model-inventory</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-controller-model-topology</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-remoterpc-connector</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-message-bus-collector</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-toaster</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/features/mdsal/odl-controller-mdsal-common/pom.xml b/features/mdsal/odl-controller-mdsal-common/pom.xml
deleted file mode 100644 (file)
index af63eab..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-controller-mdsal-common</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <name>OpenDaylight :: MDSAL :: Common</name>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-common</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-binding-runtime</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-common-api</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-common-impl</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-common-util</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/mdsal/odl-controller-model-topology/pom.xml b/features/mdsal/odl-controller-model-topology/pom.xml
deleted file mode 100644 (file)
index 3a35a45..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2018 Ericsson India Global Services Pvt Ltd. and others.  All rights reserved.
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-controller-model-topology</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <name>OpenDaylight :: MD-SAL :: Topology Model</name>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-            <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>${project.version}</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>odl-mdsal-model-draft-clemm-netmod-yang-network-topo-01</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-mdsal-model-inventory</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-topology</artifactId>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/features/mdsal/odl-mdsal-broker-local/pom.xml b/features/mdsal/odl-mdsal-broker-local/pom.xml
deleted file mode 100644 (file)
index 6beb66e..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-broker-local</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <name>OpenDaylight :: MDSAL :: Broker</name>
-
-    <properties>
-        <blueprint.version>0.11.0-SNAPSHOT</blueprint.version>
-    </properties>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.yangtools</groupId>
-                <artifactId>yangtools-artifacts</artifactId>
-                <version>2.1.8</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-            <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>1.10.0-SNAPSHOT</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>odl-yangtools-codec</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-binding-dom-adapter</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-config-netty</artifactId>
-            <version>0.11.0-SNAPSHOT</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-lmax-3</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-controller-mdsal-common</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <!-- FIXME: Bug 4202: Add MD-SAL provided odl-mdsal-binding-adapter -->
-        <!-- FIXME: Bug 4202: Add MD-SAL provided odl-mdsal-dom-broker -->
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-dom</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-eos-dom</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-eos-binding</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-singleton-dom</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-core-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-core-spi</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-broker-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-binding-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-binding-broker-impl</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-binding-util</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-connector-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-inmemory-datastore</artifactId>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/features/mdsal/odl-mdsal-broker/pom.xml b/features/mdsal/odl-mdsal-broker/pom.xml
deleted file mode 100644 (file)
index 753dfc6..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-broker</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-remoterpc-connector</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-distributed-datastore</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <!-- finalname="configuration/initial/akka.conf" -->
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>akkaconf</classifier>
-        </dependency>
-        <dependency>
-            <!-- finalname="configuration/factory/akka.conf" override="true" -->
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>factoryakkaconf</classifier>
-        </dependency>
-        <dependency>
-            <!-- finalname="configuration/initial/module-shards.conf" -->
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>moduleshardconf</classifier>
-        </dependency>
-        <dependency>
-            <!-- finalname="configuration/initial/modules.conf" -->
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>moduleconf</classifier>
-        </dependency>
-        <dependency>
-            <!-- finalname="etc/org.opendaylight.controller.cluster.datastore.cfg" -->
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-clustering-config</artifactId>
-            <version>${project.version}</version>
-            <type>cfg</type>
-            <classifier>datastore</classifier>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/features/mdsal/odl-mdsal-distributed-datastore/pom.xml b/features/mdsal/odl-mdsal-distributed-datastore/pom.xml
deleted file mode 100644 (file)
index 1bc682c..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-distributed-datastore</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>1.10.0-SNAPSHOT</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-apache-commons-text</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-clustering-commons</artifactId>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>cds-access-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>cds-access-client</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>cds-dom-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-distributed-datastore</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-cluster-admin-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-cluster-admin-impl</artifactId>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/features/mdsal/odl-mdsal-distributed-datastore/src/main/feature/feature.xml b/features/mdsal/odl-mdsal-distributed-datastore/src/main/feature/feature.xml
deleted file mode 100644 (file)
index 7716205..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Copyright © 2017 Red Hat, Inc. and others.
-  ~
-  ~ This program and the accompanying materials are made available under the
-  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
-  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
-  -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
-    <feature name="odl-mdsal-distributed-datastore" version="${project.version}">
-        <feature version="[4,5)">odl-apache-commons-text</feature>
-    </feature>
-</features>
diff --git a/features/mdsal/odl-mdsal-remoterpc-connector/src/main/feature/feature.xml b/features/mdsal/odl-mdsal-remoterpc-connector/src/main/feature/feature.xml
deleted file mode 100644 (file)
index 9079aa9..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  ~ Copyright © 2017 Red Hat, Inc. and others.
-  ~
-  ~ This program and the accompanying materials are made available under the
-  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
-  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
-  -->
-<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
-    <feature name="odl-mdsal-remoterpc-connector" version="${project.version}">
-        <feature version="[4,5)">odl-akka-leveldb-0.10</feature>
-    </feature>
-</features>
diff --git a/features/mdsal/pom.xml b/features/mdsal/pom.xml
deleted file mode 100644 (file)
index 7dbb5de..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-mdsal-aggregator</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-
-    <packaging>pom</packaging>
-
-    <modules>
-        <module>features-mdsal</module>
-        <module>odl-clustering-test-app</module>
-        <module>odl-controller-mdsal-common</module>
-        <module>odl-mdsal-all</module>
-        <module>odl-mdsal-broker</module>
-        <module>odl-mdsal-broker-local</module>
-        <module>odl-mdsal-clustering</module>
-        <module>odl-mdsal-clustering-commons</module>
-        <module>odl-mdsal-distributed-datastore</module>
-        <module>odl-mdsal-model-inventory</module>
-        <module>odl-controller-model-topology</module>
-        <module>odl-mdsal-remoterpc-connector</module>
-        <module>odl-message-bus-collector</module>
-        <module>odl-toaster</module>
-    </modules>
-
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-    </scm>
-</project>
similarity index 66%
rename from features/mdsal/odl-clustering-test-app/pom.xml
rename to features/odl-clustering-test-app/pom.xml
index 1a461fd6bdbc78ce2818a7388e7828c02bf54db2..672ac82c36119d5c792c81dd7233d2a7bc20cf0b 100644 (file)
@@ -8,31 +8,16 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>odl-clustering-test-app</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>feature</packaging>
 
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.mdsal</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>3.0.6</version>
-                <scope>import</scope>
-                <type>pom</type>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-
     <dependencies>
         <dependency>
             <groupId>org.opendaylight.mdsal.model</groupId>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-mdsal-broker</artifactId>
-            <version>${project.version}</version>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>clustering-it-model</artifactId>
-            <version>${project.version}</version>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>clustering-it-provider</artifactId>
-            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller.samples</groupId>
+            <artifactId>clustering-it-karaf-cli</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-cluster-admin-karaf-cli</artifactId>
         </dependency>
     </dependencies>
-
 </project>
diff --git a/features/odl-clustering-test-app/src/main/feature/feature.xml b/features/odl-clustering-test-app/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..69d3b61
--- /dev/null
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2020 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-clustering-test-app-${project.version}">
+    <feature name="odl-clustering-test-app" version="${project.version}">
+        <feature version="[13,14)">odl-mdsal-model-rfc6991</feature>
+    </feature>
+</features>
diff --git a/features/odl-controller-akka/pom.xml b/features/odl-controller-akka/pom.xml
new file mode 100644 (file)
index 0000000..b1f43ef
--- /dev/null
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>single-feature-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
+    </parent>
+
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>odl-controller-akka</artifactId>
+    <packaging>feature</packaging>
+
+    <name>Akka Runtime for OpenDaylight</name>
+
+    <properties>
+        <checkDependencyChange>true</checkDependencyChange>
+        <failOnDependencyChange>true</failOnDependencyChange>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-controller-scala</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/features/odl-controller-akka/src/main/history/dependencies.xml b/features/odl-controller-akka/src/main/history/dependencies.xml
new file mode 100644 (file)
index 0000000..4e7493f
--- /dev/null
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-akka">
+    <feature version="0.0.0">
+        <feature>odl-controller-scala</feature>
+        <bundle>mvn:com.typesafe/config/1.4.2</bundle>
+        <bundle>mvn:com.typesafe/ssl-config-core_2.13/0.4.3</bundle>
+        <bundle>mvn:io.aeron/aeron-client/1.38.1</bundle>
+        <bundle>mvn:io.aeron/aeron-driver/1.38.1</bundle>
+        <bundle>mvn:io.netty/netty/3.10.6.Final</bundle>
+        <bundle>mvn:org.agrona/agrona/1.15.2</bundle>
+        <bundle>mvn:org.opendaylight.controller/repackaged-akka/${project.version}</bundle>
+        <bundle>mvn:org.reactivestreams/reactive-streams/1.0.4</bundle>
+        <feature>wrap</feature>
+        <bundle>wrap:mvn:org.lmdbjava/lmdbjava/0.7.0</bundle>
+    </feature>
+</features>
similarity index 56%
rename from features/benchmark/odl-mdsal-benchmark/pom.xml
rename to features/odl-controller-blueprint/pom.xml
index 069636546469fdc80ab7fc619e743f1f8f75714a..b6bba0b97c7e83fc2be0173e1c51316587ebb610 100644 (file)
     <modelVersion>4.0.0</modelVersion>
 
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-benchmark</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
+    <artifactId>odl-controller-blueprint</artifactId>
     <packaging>feature</packaging>
 
-    <name>OpenDaylight :: MD-SAL :: benchmark</name>
+    <name>OpenDaylight :: Controller :: Blueprint</name>
+    <description>OpenDaylight Controller Blueprint Extension</description>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
-            <version>1.10.0-SNAPSHOT</version>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>odl-yangtools-codec</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-benchmark-api</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-binding-api</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-dsbenchmark</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-binding-runtime</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-ntfbenchmark</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-dom-api</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>rpcbenchmark</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>blueprint</artifactId>
+            <!-- Note: aligned with feature.xml content -->
             <version>${project.version}</version>
         </dependency>
     </dependencies>
diff --git a/features/odl-controller-blueprint/src/main/feature/feature.xml b/features/odl-controller-blueprint/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..82dfaca
--- /dev/null
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2019 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-blueprint-${project.version}">
+    <feature name="odl-controller-blueprint" version="${project.version}">
+        <feature version="[13,14)">odl-yangtools-codec</feature>
+        <feature version="[13,14)">odl-mdsal-binding-api</feature>
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
+        <feature version="[13,14)">odl-mdsal-dom-api</feature>
+        <bundle start-level="40">mvn:org.opendaylight.controller/blueprint/${project.version}</bundle>
+    </feature>
+</features>
similarity index 50%
rename from features/mdsal/odl-mdsal-all/pom.xml
rename to features/odl-controller-broker-local/pom.xml
index b70a21d53c847a28fecd4b480b189a8bb9c99fe7..9bb7bc3a649e4d7eafe868d0263138a0b3671221 100644 (file)
@@ -8,55 +8,54 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-all</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <artifactId>odl-controller-broker-local</artifactId>
     <packaging>feature</packaging>
 
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>${project.version}</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
+    <name>OpenDaylight :: Controller :: Broker (local)</name>
+    <description>OpenDaylight Controller stack without clustering</description>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-dom</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-eos-dom</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-model-inventory</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-eos-binding</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-toaster</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-singleton-dom</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
+
+        <!-- Single-node Entity Ownership Service -->
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-eos-dom-simple</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-dom-inmemory-datastore</artifactId>
+        </dependency>
     </dependencies>
 </project>
similarity index 56%
rename from features/mdsal/odl-mdsal-broker-local/src/main/feature/feature.xml
rename to features/odl-controller-broker-local/src/main/feature/feature.xml
index accea8b64b3e479f1b85d1b0b139cda945c5a9fa..2068ab79f94f735afd99092299f07d2e034639d6 100644 (file)
@@ -7,9 +7,10 @@
  and is available at http://www.eclipse.org/legal/epl-v10.html
  -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
-    <feature name="odl-mdsal-broker-local" version="${project.version}">
-        <feature version="[4,5)">odl-lmax-3</feature>
-        <feature version="[2.1,3)">odl-yangtools-codec</feature>
-        <bundle start-level="40">mvn:org.opendaylight.controller/blueprint/${blueprint.version}</bundle>
+    <feature name="odl-controller-broker-local" version="${project.version}">
+        <feature version="[13,14)">odl-mdsal-dom</feature>
+        <feature version="[13,14)">odl-mdsal-eos-binding</feature>
+        <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+        <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
     </feature>
 </features>
similarity index 57%
rename from features/benchmark/odl-dsbenchmark/pom.xml
rename to features/odl-controller-mdsal-common/pom.xml
index 55cacd90bbf1b142a657562577d2bfaf8f806055..be8cc89a13637664a0b9eca7b453169d3f426889 100644 (file)
@@ -8,40 +8,33 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-dsbenchmark</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
+    <artifactId>odl-controller-mdsal-common</artifactId>
     <packaging>feature</packaging>
-
-    <name>OpenDaylight :: dsbenchmark</name>
+    <name>OpenDaylight :: MDSAL :: Common</name>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
-            <version>1.10.0-SNAPSHOT</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-common</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-benchmark-api</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-binding-runtime</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>dsbenchmark</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-common-util</artifactId>
         </dependency>
     </dependencies>
 </project>
diff --git a/features/odl-controller-mdsal-common/src/main/feature/feature.xml b/features/odl-controller-mdsal-common/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..7ae191c
--- /dev/null
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2019 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-controller-mdsal-common-${project.version}">
+    <feature name="odl-controller-mdsal-common" version="${project.version}">
+        <feature version="[13,14)">odl-mdsal-common</feature>
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
+    </feature>
+</features>
diff --git a/features/odl-controller-scala/pom.xml b/features/odl-controller-scala/pom.xml
new file mode 100644 (file)
index 0000000..5653979
--- /dev/null
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2016 Red Hat, Inc. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>single-feature-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
+    </parent>
+
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>odl-controller-scala</artifactId>
+    <packaging>feature</packaging>
+
+    <name>Scala Runtime for OpenDaylight</name>
+
+    <properties>
+        <checkDependencyChange>true</checkDependencyChange>
+        <failOnDependencyChange>true</failOnDependencyChange>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-reflect</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang.modules</groupId>
+            <artifactId>scala-java8-compat_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang.modules</groupId>
+            <artifactId>scala-parser-combinators_2.13</artifactId>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/features/odl-controller-scala/src/main/history/dependencies.xml b/features/odl-controller-scala/src/main/history/dependencies.xml
new file mode 100644 (file)
index 0000000..73764f6
--- /dev/null
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.6.0" name="odl-controller-scala">
+    <feature version="0.0.0">
+        <bundle>mvn:org.scala-lang.modules/scala-java8-compat_2.13/1.0.2</bundle>
+        <bundle>mvn:org.scala-lang.modules/scala-parser-combinators_2.13/1.1.2</bundle>
+        <bundle>mvn:org.scala-lang/scala-library/2.13.13</bundle>
+        <bundle>mvn:org.scala-lang/scala-reflect/2.13.13</bundle>
+    </feature>
+</features>
similarity index 62%
rename from features/extras/odl-jolokia/pom.xml
rename to features/odl-jolokia/pom.xml
index 980bd610fa1fce06376b7cfe362dd6c96c832110..9ea689a4a034e6e2e943ee67cefc0afb9312ca28 100644 (file)
@@ -8,38 +8,40 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>odl-jolokia</artifactId>
-    <version>1.13.0-SNAPSHOT</version>
     <packaging>feature</packaging>
-
     <name>Jolokia JMX/HTTP bridge</name>
 
-    <properties>
-        <odl-jolokia-osgi.version>0.10.0-SNAPSHOT</odl-jolokia-osgi.version>
-    </properties>
-
     <dependencies>
         <dependency>
             <groupId>org.jolokia</groupId>
             <artifactId>jolokia-osgi</artifactId>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.jolokia</groupId>
+                    <artifactId>jolokia-core</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.jolokia</groupId>
+                    <artifactId>jolokia-jsr160</artifactId>
+                </exclusion>
+            </exclusions>
         </dependency>
         <dependency>
             <!-- finalname="/etc/org.jolokia.osgi.cfg" -->
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-jolokia-osgi</artifactId>
-            <version>${odl-jolokia-osgi.version}</version>
+            <!-- Note: aligned with feature.xml content -->
+            <version>${project.version}</version>
             <type>cfg</type>
             <classifier>configuration</classifier>
         </dependency>
     </dependencies>
-
 </project>
diff --git a/features/odl-jolokia/src/main/feature/feature.xml b/features/odl-jolokia/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..eb4d1d5
--- /dev/null
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2016, 2018 Red Hat, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features name="odl-extras-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
+    <feature name="odl-jolokia" version="${project.version}">
+        <configfile finalname="/etc/org.jolokia.osgi.cfg">mvn:org.opendaylight.controller/odl-jolokia-osgi/${project.version}/cfg/configuration</configfile>
+        <feature>http</feature>
+    </feature>
+</features>
similarity index 54%
rename from features/benchmark/odl-ntfbenchmark/pom.xml
rename to features/odl-mdsal-benchmark/pom.xml
index 62c089b6635914ff91b01c516b73947993f11c83..42b2d9e4014654852e72ea7df243a17951149bd9 100644 (file)
@@ -8,41 +8,40 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-ntfbenchmark</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
+    <artifactId>odl-mdsal-benchmark</artifactId>
     <packaging>feature</packaging>
-
-    <name>OpenDaylight :: ntfbenchmark</name>
+    <name>OpenDaylight :: Controller :: Benchmarks</name>
+    <description>Controller benchmarks</description>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-mdsal-broker</artifactId>
-            <version>1.10.0-SNAPSHOT</version>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-benchmark-api</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>benchmark-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>dsbenchmark</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>ntfbenchmark</artifactId>
-            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>rpcbenchmark</artifactId>
         </dependency>
     </dependencies>
-
 </project>
similarity index 55%
rename from features/mdsal/odl-mdsal-remoterpc-connector/pom.xml
rename to features/odl-mdsal-broker/pom.xml
index 20813fe400b00bc6f6eb40ea5275ec25aa2bca7a..686971b49f27b6ff823add1920828a31e4113279 100644 (file)
@@ -8,45 +8,44 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-remoterpc-connector</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <artifactId>odl-mdsal-broker</artifactId>
     <packaging>feature</packaging>
 
+    <name>OpenDaylight :: MDSAL :: Broker (clustered)</name>
+    <description>OpenDaylight Controller stack with clustering</description>
+
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-eos-binding</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-clustering-commons</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-singleton-dom</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
+
         <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-akka-leveldb-0.10</artifactId>
-            <version>4.0.9</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-remoterpc-connector</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>sal-remoterpc-connector</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-distributed-datastore</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
         </dependency>
     </dependencies>
 </project>
diff --git a/features/odl-mdsal-broker/src/main/feature/feature.xml b/features/odl-mdsal-broker/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..ffa724c
--- /dev/null
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2019 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-mdsal-${project.version}">
+    <feature name="odl-mdsal-broker" version="${project.version}">
+        <feature version="[13,14)">odl-mdsal-singleton-dom</feature>
+        <feature version="[13,14)">odl-mdsal-eos-binding</feature>
+    </feature>
+</features>
similarity index 59%
rename from features/mdsal/odl-mdsal-clustering-commons/pom.xml
rename to features/odl-mdsal-clustering-commons/pom.xml
index b5cf579d2c98c919de73ac1fd4bed121fdfadf7b..0d98ae70987780a11766827b2b485be08e4887f5 100644 (file)
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>odl-mdsal-clustering-commons</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>feature</packaging>
+    <name>OpenDaylight :: Controller :: Clustering Commons</name>
+    <description>Common Clustering utilities</description>
 
     <dependencies>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-controller-akka</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-akka-system-2.5</artifactId>
+            <artifactId>odl-apache-commons-lang3</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-akka-persistence-2.5</artifactId>
+            <artifactId>odl-dropwizard-metrics</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-akka-clustering-2.5</artifactId>
+            <artifactId>odl-netty-4</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-apache-commons-lang3</artifactId>
+            <artifactId>odl-servlet-api</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-dropwizard-metrics</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>odl-yangtools-data</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.odlparent</groupId>
-            <artifactId>odl-servlet-api</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>odl-yangtools-codec</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
+
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-clustering-commons</artifactId>
-            <version>${project.version}</version>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-akka-raft</artifactId>
-            <version>${project.version}</version>
         </dependency>
 
         <!-- Segmented Journal for Akka, including Kryo and asm-5.2 -->
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-akka-segmented-journal</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>kryo</artifactId>
-            <version>4.0.2</version>
-        </dependency>
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>minlog</artifactId>
-            <version>1.3.1</version>
-        </dependency>
-        <dependency>
-            <groupId>com.esotericsoftware</groupId>
-            <artifactId>reflectasm</artifactId>
-            <version>1.11.8</version>
-        </dependency>
-        <dependency>
-            <groupId>org.ow2.asm</groupId>
-            <artifactId>asm</artifactId>
-            <version>5.2</version>
         </dependency>
     </dependencies>
-
 </project>
similarity index 58%
rename from features/mdsal/odl-mdsal-clustering-commons/src/main/feature/feature.xml
rename to features/odl-mdsal-clustering-commons/src/main/feature/feature.xml
index 00c2e52f848463659d0f89fad9eb31b9d26e08ab..7a41fc13bbfa9a4a0bc6c29e1835f3b590e75355 100644 (file)
@@ -8,11 +8,11 @@
   -->
 <features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
     <feature name="odl-mdsal-clustering-commons" version="${project.version}">
-        <feature version="[4,5)">odl-akka-system-2.5</feature>
-        <feature version="[4,5)">odl-akka-persistence-2.5</feature>
-        <feature version="[4,5)">odl-akka-clustering-2.5</feature>
-        <feature version="[4,5)">odl-apache-commons-lang3</feature>
-        <feature version="[4.0.8,5)">odl-dropwizard-metrics</feature>
-        <feature version="[4,5)">odl-servlet-api</feature>
+        <feature version="[13,14)">odl-apache-commons-lang3</feature>
+        <feature version="[13,14)">odl-dropwizard-metrics</feature>
+        <feature version="[13,14)">odl-netty-4</feature>
+        <feature version="[13,14)">odl-servlet-api</feature>
+        <feature version="[13,14)">odl-yangtools-data</feature>
+        <feature version="[13,14)">odl-yangtools-codec</feature>
     </feature>
 </features>
diff --git a/features/odl-mdsal-distributed-datastore/pom.xml b/features/odl-mdsal-distributed-datastore/pom.xml
new file mode 100644 (file)
index 0000000..35b1d52
--- /dev/null
@@ -0,0 +1,131 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2016, 2017 Red Hat, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>single-feature-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
+    </parent>
+
+    <artifactId>odl-mdsal-distributed-datastore</artifactId>
+    <packaging>feature</packaging>
+    <name>OpenDaylight :: Controller :: Clustered Datastore</name>
+    <description>Controller Clustered Datastore (akka-based)</description>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.odlparent</groupId>
+            <artifactId>odl-apache-commons-text</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>odl-yangtools-codec</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-eos-dom</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-dom-broker</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>odl-mdsal-binding-dom-adapter</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-clustering-commons</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-controller-mdsal-common</artifactId>
+            <type>xml</type>
+            <classifier>features</classifier>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>cds-access-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>cds-access-client</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>cds-dom-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-distributed-datastore</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>eos-dom-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-cluster-admin-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-cluster-admin-impl</artifactId>
+        </dependency>
+
+        <dependency>
+            <!-- finalname="configuration/initial/akka.conf" -->
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-config</artifactId>
+            <type>xml</type>
+            <classifier>akkaconf</classifier>
+        </dependency>
+        <dependency>
+            <!-- finalname="configuration/factory/akka.conf" override="true" -->
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-config</artifactId>
+            <type>xml</type>
+            <classifier>factoryakkaconf</classifier>
+        </dependency>
+        <dependency>
+            <!-- finalname="configuration/initial/module-shards.conf" -->
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-config</artifactId>
+            <type>xml</type>
+            <classifier>moduleshardconf</classifier>
+        </dependency>
+        <dependency>
+            <!-- finalname="configuration/initial/modules.conf" -->
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-config</artifactId>
+            <type>xml</type>
+            <classifier>moduleconf</classifier>
+        </dependency>
+        <dependency>
+            <!-- finalname="etc/org.opendaylight.controller.cluster.datastore.cfg" -->
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-config</artifactId>
+            <type>cfg</type>
+            <classifier>datastore</classifier>
+        </dependency>
+    </dependencies>
+</project>
similarity index 56%
rename from features/mdsal/odl-mdsal-broker/src/main/feature/feature.xml
rename to features/odl-mdsal-distributed-datastore/src/main/feature/feature.xml
index 8474cc9b669c9a83dc373fa75dfdbc5f21b3d734..bf7451b93d4e7f421775592685d269ef309cf8a7 100644 (file)
@@ -1,13 +1,18 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<features name="odl-mdsal-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
-    <feature name="odl-mdsal-broker" version="${project.version}">
+  ~ Copyright © 2017 Red Hat, Inc. and others.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.4.0" name="odl-controller-${project.version}">
+    <feature name="odl-mdsal-distributed-datastore" version="${project.version}">
+        <feature version="[13,14)">odl-apache-commons-text</feature>
+        <feature version="[13,14)">odl-yangtools-codec</feature>
+        <feature version="[13,14)">odl-mdsal-eos-dom</feature>
+        <feature version="[13,14)">odl-mdsal-dom-broker</feature>
+        <feature version="[13,14)">odl-mdsal-binding-dom-adapter</feature>
         <configfile finalname="configuration/initial/akka.conf">
             mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/akkaconf
         </configfile>
similarity index 59%
rename from features/benchmark/odl-benchmark-api/pom.xml
rename to features/odl-mdsal-remoterpc-connector/pom.xml
index 247bfe2f5a1e2a8f1004f0b75fe062c6603b7aac..68033905be04c53fbc578292873149454ce68af5 100644 (file)
@@ -8,33 +8,28 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-benchmark-api</artifactId>
-    <version>1.8.0-SNAPSHOT</version>
+    <artifactId>odl-mdsal-remoterpc-connector</artifactId>
     <packaging>feature</packaging>
-
-    <name>OpenDaylight :: Benchmark :: API</name>
+    <name>OpenDaylight :: Controller :: Remote RPC Connector</name>
+    <description>Clustering remote RPC support</description>
 
     <dependencies>
         <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>odl-mdsal-binding-base</artifactId>
-            <version>3.0.6</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>odl-mdsal-distributed-datastore</artifactId>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>benchmark-api</artifactId>
-            <version>${project.version}</version>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-remoterpc-connector</artifactId>
         </dependency>
     </dependencies>
 </project>
similarity index 77%
rename from features/mdsal/odl-toaster/pom.xml
rename to features/odl-toaster/pom.xml
index 51811fafe31ad94b8e741f593b82a15de7db92ec..01bfd40440011097c518f5857f82b73c9d9c2871 100644 (file)
@@ -8,50 +8,41 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
+        <groupId>org.opendaylight.controller</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../single-feature-parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>odl-toaster</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>feature</packaging>
-
     <name>OpenDaylight :: Toaster</name>
 
     <dependencies>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>odl-mdsal-binding-runtime</artifactId>
-            <version>3.0.6</version>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
-            <groupId>${project.groupId}</groupId>
+            <groupId>org.opendaylight.controller</groupId>
             <artifactId>odl-mdsal-broker</artifactId>
-            <version>${project.version}</version>
             <type>xml</type>
             <classifier>features</classifier>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>sample-toaster</artifactId>
-            <version>${project.version}</version>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>sample-toaster-consumer</artifactId>
-            <version>${project.version}</version>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>sample-toaster-provider</artifactId>
-            <version>${project.version}</version>
         </dependency>
     </dependencies>
 </project>
diff --git a/features/odl-toaster/src/main/feature/feature.xml b/features/odl-toaster/src/main/feature/feature.xml
new file mode 100644 (file)
index 0000000..9a6b3e7
--- /dev/null
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2020 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0" name="odl-toaster-${project.version}">
+    <feature name="odl-toaster" version="${project.version}">
+        <feature version="[13,14)">odl-mdsal-binding-runtime</feature>
+    </feature>
+</features>
index 90b265da8558fc88b38c221af5668579e79376b9..97ade422e27ceb93f12e2209f598f19fff641e95 100644 (file)
@@ -1,22 +1,58 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2014, 2018 Cisco Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
-  <artifactId>features-controller</artifactId>
-  <version>1.13.0-SNAPSHOT</version>
+  <artifactId>features-aggregator</artifactId>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
+  <properties>
+    <maven.deploy.skip>true</maven.deploy.skip>
+    <maven.install.skip>true</maven.install.skip>
+  </properties>
+
   <modules>
-    <module>config-netty</module>
-    <module>mdsal</module>
-    <module>extras</module>
-    <module>benchmark</module>
+    <!-- Common infra to cut down definitions -->
+    <module>single-feature-parent</module>
+
+    <!-- Production features -->
+    <module>features-controller</module>
+
+    <!-- Experimental features -->
+    <module>features-controller-experimental</module>
+    <module>odl-toaster</module>
+
+    <!-- CSIT features -->
+    <module>features-controller-testing</module>
+    <module>odl-clustering-test-app</module>
+    <module>odl-mdsal-benchmark</module>
+
+    <!-- Scala/Akka features -->
+    <module>odl-controller-scala</module>
+    <module>odl-controller-akka</module>
+
+    <!-- Single features, to be cleaned up -->
+    <module>odl-controller-blueprint</module>
+    <module>odl-controller-broker-local</module>
+    <module>odl-controller-mdsal-common</module>
+    <module>odl-jolokia</module>
+    <module>odl-mdsal-broker</module>
+    <module>odl-mdsal-clustering-commons</module>
+    <module>odl-mdsal-distributed-datastore</module>
+    <module>odl-mdsal-remoterpc-connector</module>
   </modules>
 </project>
similarity index 55%
rename from features/mdsal/odl-mdsal-clustering/pom.xml
rename to features/single-feature-parent/pom.xml
index b7c0bf38a63711175d4e574a8745db1af1b00a9f..556d3acd7632b2313b4d00dcf771314c51fc3df0 100644 (file)
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!--
- Copyright © 2016, 2017 Red Hat, Inc. and others.
+ Copyright © 2020 PANTHEON.tech, s.r.o. and others.
 
  This program and the accompanying materials are made available under the
  terms of the Eclipse Public License v1.0 which accompanies this distribution,
@@ -8,27 +8,27 @@
  -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.odlparent</groupId>
         <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
+        <version>13.0.11</version>
         <relativePath/>
     </parent>
 
     <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-clustering</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>feature</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>${project.groupId}</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
+    <artifactId>single-feature-parent</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
+    <packaging>pom</packaging>
 
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.opendaylight.controller</groupId>
+                <artifactId>bundle-parent</artifactId>
+                <version>9.0.3-SNAPSHOT</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
 </project>
similarity index 81%
rename from opendaylight/commons/jolokia/pom.xml
rename to jolokia/pom.xml
index a277a07c242f0eab515316929d67727db1cc9bdb..1d98663b1f762f3e9c255ab26d9ee0182be1ebe9 100644 (file)
@@ -1,16 +1,23 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2015, 2018 Brocade Communications Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>odl-jolokia-osgi</artifactId>
-  <version>0.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <build>
index 833b83e30817d76f4d9905ea9e549db01871d46e..56869cc048d81cf36757c86e5c7dd09b046d994f 100644 (file)
@@ -1,18 +1,37 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2014, 2018 Cisco Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>karaf4-parent</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
-  <artifactId>distribution.opendaylight-karaf</artifactId>
-  <version>1.13.0-SNAPSHOT</version>
+  <artifactId>controller-test-karaf</artifactId>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
+  <dependencyManagement>
+    <dependencies>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>controller-artifacts</artifactId>
+        <version>${project.version}</version>
+        <type>pom</type>
+        <scope>import</scope>
+      </dependency>
+    </dependencies>
+  </dependencyManagement>
+
   <dependencies>
     <dependency>
       <!-- scope is compile so all features (there is only one) are installed
           Note: Nothing should go here that is not locked
           down with testing... ie, no broken feature repos
     -->
-
-    <!-- MD-SAL Related Features -->
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-mdsal</artifactId>
-      <version>1.10.0-SNAPSHOT</version>
+      <artifactId>features-controller</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-extras</artifactId>
-      <version>${project.version}</version>
+      <artifactId>features-controller-experimental</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>features-mdsal</artifactId>
-      <version>3.0.6</version>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-controller-testing</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
diff --git a/opendaylight/archetypes/opendaylight-startup/pom.xml b/opendaylight/archetypes/opendaylight-startup/pom.xml
deleted file mode 100644 (file)
index 98863ee..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-     <groupId>org.opendaylight.controller.archetypes</groupId>
-     <artifactId>archetypes-parent</artifactId>
-     <version>0.10.0-SNAPSHOT</version>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>opendaylight-startup-archetype</artifactId>
-  <version>1.8.0-SNAPSHOT</version>
-  <packaging>maven-archetype</packaging>
-  <name>${project.artifactId}</name>
-
-</project>
diff --git a/opendaylight/archetypes/opendaylight-startup/src/main/resources/META-INF/maven/archetype-metadata.xml b/opendaylight/archetypes/opendaylight-startup/src/main/resources/META-INF/maven/archetype-metadata.xml
deleted file mode 100644 (file)
index 19336c7..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<archetype-descriptor xsi:schemaLocation="http://maven.apache.org/plugins/maven-archetype-plugin/archetype-descriptor/1.0.0 http://maven.apache.org/xsd/archetype-descriptor-1.0.0.xsd" name="example-aggregator"
-    xmlns="http://maven.apache.org/plugins/maven-archetype-plugin/archetype-descriptor/1.0.0"
-    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
-  <fileSets>
-    <fileSet encoding="UTF-8">
-      <directory></directory>
-      <includes>
-        <include>README.txt</include>
-      </includes>
-    </fileSet>
-  </fileSets>
-</archetype-descriptor>
diff --git a/opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/README.txt b/opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/README.txt
deleted file mode 100644 (file)
index 0a54630..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-NOTICE
-
-The org.opendaylight.controller:opendaylight-startup-archetype 
-
-has been replaced by the org.opendaylight.archetypes:opendaylight-startup-archetype
-
-(note how the archetypeGroupId does not contain "controller" anymore, but ends with ".archetypes"),
-
-and has a different version now.
diff --git a/opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/pom.xml b/opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/pom.xml
deleted file mode 100644 (file)
index 2214fc8..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>${groupId}</groupId>
-  <artifactId>${artifactId}-aggregator</artifactId>
-  <version>${version}</version>
-  <name>${artifactId}</name>
-  <packaging>pom</packaging>
-
-</project>
diff --git a/opendaylight/archetypes/pom.xml b/opendaylight/archetypes/pom.xml
deleted file mode 100644 (file)
index 2e9b995..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller.archetypes</groupId>
-  <artifactId>archetypes-parent</artifactId>
-  <version>0.10.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <build>
-    <extensions>
-      <extension>
-        <groupId>org.apache.maven.archetype</groupId>
-        <artifactId>archetype-packaging</artifactId>
-        <version>${maven.archetype.plugin.version}</version>
-      </extension>
-    </extensions>
-  </build>
-
-  <modules>
-    <module>opendaylight-startup</module>
-  </modules>
-</project>
index 10699ed8836265bbe55c824aa6076f781493c6d2..2b7bfeb106fd238f80a03c86e354f6ebcd426ee0 100644 (file)
@@ -1,11 +1,18 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2016, 2018 Brocade Communications Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <artifactId>blueprint</artifactId>
   <packaging>bundle</packaging>
   <name>${project.artifactId}</name>
-  <version>0.11.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
 
   <dependencyManagement>
     <dependencies>
       <dependency>
-        <groupId>org.opendaylight.mdsal</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>3.0.6</version>
+        <groupId>org.opendaylight.yangtools</groupId>
+        <artifactId>yangtools-artifacts</artifactId>
+        <version>13.0.2</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
       <dependency>
-        <groupId>org.opendaylight.controller</groupId>
+        <groupId>org.opendaylight.mdsal</groupId>
         <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>13.0.1</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
       <dependency>
-        <groupId>org.opendaylight.yangtools</groupId>
-        <artifactId>yangtools-artifacts</artifactId>
-        <version>2.1.8</version>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>controller-artifacts</artifactId>
+        <version>${project.version}</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.apache.aries.blueprint</groupId>
       <artifactId>org.apache.aries.blueprint.core</artifactId>
     <dependency>
       <groupId>org.apache.aries</groupId>
       <artifactId>org.apache.aries.util</artifactId>
-      <version>1.1.3</version>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-spi</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-xml</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-common-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-codec</artifactId>
+      <artifactId>mdsal-binding-dom-codec-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-codec-xml</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-spec-util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>yang-binding</artifactId>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.compendium</artifactId>
+      <artifactId>org.osgi.framework</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.cm</artifactId>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
       <artifactId>org.osgi.service.event</artifactId>
     </dependency>
     <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.util.tracker</artifactId>
     </dependency>
+
     <dependency>
-      <groupId>com.google.truth</groupId>
-      <artifactId>truth</artifactId>
-      <scope>test</scope>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-test-model</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-test-model</artifactId>
+      <artifactId>mdsal-binding-dom-adapter</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-adapter</artifactId>
-      <scope>test</scope>
+      <artifactId>mdsal-binding-test-utils</artifactId>
     </dependency>
   </dependencies>
 
           </instructions>
         </configuration>
       </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <failOnError>true</failOnError>
-        </configuration>
-      </plugin>
     </plugins>
   </build>
 
index ebc697592348b2815b9de511f1bc639ff3bc0890..55994ca1f093ddf4b86a02ccc8847166d679dc8d 100644 (file)
@@ -11,11 +11,10 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Dictionary;
 import java.util.Enumeration;
 import java.util.HashSet;
-import java.util.Hashtable;
 import java.util.List;
+import java.util.Map;
 import org.apache.aries.blueprint.NamespaceHandler;
 import org.apache.aries.blueprint.services.BlueprintExtenderService;
 import org.apache.aries.quiesce.participant.QuiesceParticipant;
@@ -27,6 +26,7 @@ import org.osgi.framework.Bundle;
 import org.osgi.framework.BundleActivator;
 import org.osgi.framework.BundleContext;
 import org.osgi.framework.BundleEvent;
+import org.osgi.framework.FrameworkUtil;
 import org.osgi.framework.ServiceReference;
 import org.osgi.framework.ServiceRegistration;
 import org.osgi.framework.SynchronousBundleListener;
@@ -88,7 +88,7 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
 
         bundleTracker = new BundleTracker<>(context, Bundle.ACTIVE, this);
 
-        blueprintExtenderServiceTracker = new ServiceTracker<>(context, BlueprintExtenderService.class.getName(),
+        blueprintExtenderServiceTracker = new ServiceTracker<>(context, BlueprintExtenderService.class,
                 new ServiceTrackerCustomizer<BlueprintExtenderService, BlueprintExtenderService>() {
                     @Override
                     public BlueprintExtenderService addingService(
@@ -108,7 +108,7 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
                 });
         blueprintExtenderServiceTracker.open();
 
-        quiesceParticipantTracker = new ServiceTracker<>(context, QuiesceParticipant.class.getName(),
+        quiesceParticipantTracker = new ServiceTracker<>(context, QuiesceParticipant.class,
                 new ServiceTrackerCustomizer<QuiesceParticipant, QuiesceParticipant>() {
                     @Override
                     public QuiesceParticipant addingService(
@@ -150,21 +150,20 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
 
         restartService.setBlueprintExtenderService(blueprintExtenderService);
 
-        blueprintContainerRestartReg = bundleContext.registerService(
-                BlueprintContainerRestartService.class.getName(), restartService, new Hashtable<>());
+        blueprintContainerRestartReg = bundleContext.registerService(BlueprintContainerRestartService.class,
+            restartService, null);
 
         return blueprintExtenderService;
     }
 
     private void registerNamespaceHandler(final BundleContext context) {
-        Dictionary<String, Object> props = new Hashtable<>();
-        props.put("osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0);
-        namespaceReg = context.registerService(NamespaceHandler.class.getName(),
-                new OpendaylightNamespaceHandler(), props);
+        namespaceReg = context.registerService(NamespaceHandler.class, new OpendaylightNamespaceHandler(),
+            FrameworkUtil.asDictionary(Map.of(
+                "osgi.service.blueprint.namespace", OpendaylightNamespaceHandler.NAMESPACE_1_0_0)));
     }
 
     private void registerBlueprintEventHandler(final BundleContext context) {
-        eventHandlerReg = context.registerService(BlueprintListener.class.getName(), this, new Hashtable<>());
+        eventHandlerReg = context.registerService(BlueprintListener.class, this, null);
     }
 
     /**
@@ -264,13 +263,12 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         return !paths.isEmpty() ? paths : findBlueprintPaths(bundle, ODL_CUSTOM_BLUEPRINT_FILE_PATH);
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     private static List<Object> findBlueprintPaths(final Bundle bundle, final String path) {
         Enumeration<?> rntries = bundle.findEntries(path, BLUEPRINT_FLE_PATTERN, false);
         if (rntries == null) {
-            return Collections.emptyList();
+            return List.of();
         } else {
-            return Collections.list((Enumeration)rntries);
+            return List.copyOf(Collections.list(rntries));
         }
     }
 
@@ -297,7 +295,7 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         LOG.info("Shutdown of blueprint containers complete");
     }
 
-    private List<Bundle> getBundlesToDestroy(final Collection<Bundle> containerBundles) {
+    private static List<Bundle> getBundlesToDestroy(final Collection<Bundle> containerBundles) {
         List<Bundle> bundlesToDestroy = new ArrayList<>();
 
         // Find all container bundles that either have no registered services or whose services are no
@@ -342,7 +340,7 @@ public class BlueprintBundleTracker implements BundleActivator, BundleTrackerCus
         return bundlesToDestroy;
     }
 
-    private @Nullable Bundle findBundleWithHighestUsedServiceId(final Collection<Bundle> containerBundles) {
+    private static @Nullable Bundle findBundleWithHighestUsedServiceId(final Collection<Bundle> containerBundles) {
         ServiceReference<?> highestServiceRef = null;
         for (Bundle bundle : containerBundles) {
             ServiceReference<?>[] references = bundle.getRegisteredServices();
index 2b2af15df979cabdbf1452ea40eb247a49684a07..4a8c0157d7d9ff072d6554c15b9257ff44eda530 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.blueprint;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.util.ArrayDeque;
@@ -15,7 +16,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Deque;
-import java.util.Hashtable;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Set;
@@ -84,8 +84,8 @@ class BlueprintContainerRestartServiceImpl implements AutoCloseable, BlueprintCo
     }
 
     private void restartContainerAndDependentsInternal(final Bundle forBundle) {
-        Preconditions.checkNotNull(blueprintExtenderService);
-        Preconditions.checkNotNull(quiesceParticipant);
+        requireNonNull(blueprintExtenderService);
+        requireNonNull(quiesceParticipant);
 
         // We use a LinkedHashSet to preserve insertion order as we walk the service usage hierarchy.
         Set<Bundle> containerBundlesSet = new LinkedHashSet<>();
@@ -223,9 +223,9 @@ class BlueprintContainerRestartServiceImpl implements AutoCloseable, BlueprintCo
         }
     }
 
-    private ServiceRegistration<?> registerEventHandler(final BundleContext bundleContext,
+    private static ServiceRegistration<?> registerEventHandler(final BundleContext bundleContext,
             final BlueprintListener listener) {
-        return bundleContext.registerService(BlueprintListener.class.getName(), listener, new Hashtable<>());
+        return bundleContext.registerService(BlueprintListener.class, listener, null);
     }
 
     @Override
index b7953e4d6f2c413a719f66abd2dea824f7378f9f..07bfd8fae862f97002281470aafa85a4ef2bb6b2 100644 (file)
@@ -7,19 +7,20 @@
  */
 package org.opendaylight.controller.blueprint.ext;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.GuardedBy;
 import org.apache.aries.blueprint.di.AbstractRecipe;
 import org.apache.aries.blueprint.di.ExecutionContext;
 import org.apache.aries.blueprint.di.Recipe;
 import org.apache.aries.blueprint.ext.DependentComponentFactoryMetadata;
 import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
+import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
 import org.osgi.framework.ServiceReference;
@@ -50,7 +51,7 @@ abstract class AbstractDependentComponentFactoryMetadata implements DependentCom
     private boolean stoppedServiceRecipes;
 
     protected AbstractDependentComponentFactoryMetadata(final String id) {
-        this.id = Preconditions.checkNotNull(id);
+        this.id = requireNonNull(id);
     }
 
     @Override
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/AbstractInvokableServiceMetadata.java
deleted file mode 100644 (file)
index 10ec16f..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import java.util.function.Predicate;
-import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-
-abstract class AbstractInvokableServiceMetadata extends AbstractDependentComponentFactoryMetadata {
-    private final String interfaceName;
-
-    private ListenerRegistration<DOMRpcAvailabilityListener> rpcListenerReg;
-    private RpcConsumerRegistry rpcRegistry;
-    private Class<RpcService> rpcInterface;
-    private Set<SchemaPath> rpcSchemaPaths;
-
-    AbstractInvokableServiceMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = Preconditions.checkNotNull(interfaceName);
-    }
-
-    Class<RpcService> rpcInterface() {
-        return rpcInterface;
-    }
-
-    @SuppressWarnings({ "checkstyle:IllegalCatch", "unchecked" })
-    @Override
-    public final void init(final ExtendedBlueprintContainer container) {
-        super.init(container);
-
-        final Class<?> interfaceClass;
-        try {
-            interfaceClass = container().getBundleContext().getBundle().loadClass(interfaceName);
-        } catch (final Exception e) {
-            throw new ComponentDefinitionException(String.format("%s: Error obtaining interface class %s",
-                    logName(), interfaceName), e);
-        }
-
-        if (!RpcService.class.isAssignableFrom(interfaceClass)) {
-            throw new ComponentDefinitionException(String.format(
-                "%s: The specified interface %s is not an RpcService", logName(), interfaceName));
-        }
-
-        rpcInterface = (Class<RpcService>)interfaceClass;
-    }
-
-    @Override
-    protected final void startTracking() {
-        // Request RpcProviderRegistry first ...
-        retrieveService("RpcConsumerRegistry", RpcConsumerRegistry.class, this::onRpcRegistry);
-    }
-
-    private void onRpcRegistry(final Object service) {
-        log.debug("{}: Retrieved RpcProviderRegistry {}", logName(), service);
-        rpcRegistry = (RpcConsumerRegistry)service;
-
-        // Now acquire SchemaService...
-        retrieveService("SchemaService", DOMSchemaService.class, this::onSchemaService);
-    }
-
-    private void onSchemaService(final Object service) {
-        log.debug("{}: Retrieved SchemaService {}", logName(), service);
-
-        // Now get the SchemaContext and trigger RPC resolution
-        retrievedSchemaContext(((DOMSchemaService)service).getGlobalContext());
-    }
-
-    private void retrievedSchemaContext(final SchemaContext schemaContext) {
-        log.debug("{}: retrievedSchemaContext", logName());
-
-        final Collection<SchemaPath> schemaPaths = RpcUtil.decomposeRpcService(rpcInterface, schemaContext,
-            rpcFilter());
-        if (schemaPaths.isEmpty()) {
-            log.debug("{}: interface {} has no acceptable entries, assuming it is satisfied", logName(), rpcInterface);
-            setSatisfied();
-            return;
-        }
-
-        rpcSchemaPaths = ImmutableSet.copyOf(schemaPaths);
-        log.debug("{}: Got SchemaPaths: {}", logName(), rpcSchemaPaths);
-
-        // First get the DOMRpcService OSGi service. This will be used to register a listener to be notified
-        // when the underlying DOM RPC service is available.
-        retrieveService("DOMRpcService", DOMRpcService.class, this::retrievedDOMRpcService);
-    }
-
-    private void retrievedDOMRpcService(final Object service) {
-        log.debug("{}: retrievedDOMRpcService {}", logName(), service);
-        final DOMRpcService domRpcService = (DOMRpcService)service;
-
-        setDependencyDesc("Available DOM RPC for binding RPC: " + rpcInterface);
-        rpcListenerReg = domRpcService.registerRpcListener(new DOMRpcAvailabilityListener() {
-            @Override
-            public void onRpcAvailable(final Collection<DOMRpcIdentifier> rpcs) {
-                onRpcsAvailable(rpcs);
-            }
-
-            @Override
-            public void onRpcUnavailable(final Collection<DOMRpcIdentifier> rpcs) {
-            }
-        });
-    }
-
-    abstract Predicate<RpcRoutingStrategy> rpcFilter();
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    @Override
-    public final Object create() throws ComponentDefinitionException {
-        log.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
-        super.onCreate();
-
-        try {
-            RpcService rpcService = rpcRegistry.getRpcService(rpcInterface);
-
-            log.debug("{}: create returning service {}", logName(), rpcService);
-
-            return rpcService;
-        } catch (final RuntimeException e) {
-            throw new ComponentDefinitionException("Error getting RPC service for " + interfaceName, e);
-        }
-    }
-
-    protected final void onRpcsAvailable(final Collection<DOMRpcIdentifier> rpcs) {
-        for (DOMRpcIdentifier identifier : rpcs) {
-            if (rpcSchemaPaths.contains(identifier.getType())) {
-                log.debug("{}: onRpcsAvailable - found SchemaPath {}", logName(), identifier.getType());
-                setSatisfied();
-                break;
-            }
-        }
-    }
-
-    @Override
-    public final void stopTracking() {
-        super.stopTracking();
-        closeRpcListenerReg();
-    }
-
-    private void closeRpcListenerReg() {
-        if (rpcListenerReg != null) {
-            rpcListenerReg.close();
-            rpcListenerReg = null;
-        }
-    }
-
-    @Override
-    public final void destroy(final Object instance) {
-        super.destroy(instance);
-        closeRpcListenerReg();
-    }
-
-    @Override
-    public final String toString() {
-        return MoreObjects.toStringHelper(this).add("id", getId()).add("interfaceName", interfaceName).toString();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionProviderBean.java
deleted file mode 100644 (file)
index af943f9..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
-import java.util.Set;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "action-provider" element that registers the promise to instantiate action
- * instances with RpcProviderRegistry.
- *
- * <p>
- * This bean has two distinct facets:
- * - if a reference bean is provided, it registers it with {@link RpcProviderService}
- * - if a reference bean is not provided, it registers the corresponding no-op implementation with
- *   {@link DOMRpcProviderService} for all action (Routed RPC) elements in the provided interface
- *
- * @author Robert Varga
- */
-public class ActionProviderBean {
-    static final String ACTION_PROVIDER = "action-provider";
-
-    private static final Logger LOG = LoggerFactory.getLogger(ActionProviderBean.class);
-
-    private DOMRpcProviderService domRpcProvider;
-    private RpcProviderService bindingRpcProvider;
-    private DOMSchemaService schemaService;
-    private RpcService implementation;
-    private String interfaceName;
-    private Registration reg;
-    private Bundle bundle;
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void setInterfaceName(final String interfaceName) {
-        this.interfaceName = interfaceName;
-    }
-
-    public void setImplementation(final RpcService implementation) {
-        this.implementation = implementation;
-    }
-
-    public void setDomRpcProvider(final DOMRpcProviderService rpcProviderService) {
-        this.domRpcProvider = rpcProviderService;
-    }
-
-    public void setBindingRpcProvider(final RpcProviderService rpcProvider) {
-        this.bindingRpcProvider = rpcProvider;
-    }
-
-    public void setSchemaService(final DOMSchemaService schemaService) {
-        this.schemaService = schemaService;
-    }
-
-    public void init() {
-        // First resolve the interface class
-        final Class<RpcService> interfaceClass = getRpcClass();
-
-        LOG.debug("{}: resolved interface {} to {}", ACTION_PROVIDER, interfaceName, interfaceClass);
-
-        if (implementation != null) {
-            registerImplementation(interfaceClass);
-        } else {
-            registerFallback(interfaceClass);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void destroy() {
-        if (reg != null) {
-            try {
-                reg.close();
-            } catch (final Exception e) {
-                LOG.warn("{}: error while unregistering", ACTION_PROVIDER, e);
-            } finally {
-                reg = null;
-            }
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    private Class<RpcService> getRpcClass() {
-        final Class<?> iface;
-
-        try {
-            iface = bundle.loadClass(interfaceName);
-        } catch (final ClassNotFoundException e) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" for %s \"%s\" does not refer to an available class", interfaceName,
-                ACTION_PROVIDER), e);
-        }
-        if (!RpcService.class.isAssignableFrom(iface)) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" %s for \"%s\" is not an RpcService", interfaceName, ACTION_PROVIDER));
-        }
-
-        return (Class<RpcService>) iface;
-    }
-
-    private void registerFallback(final Class<RpcService> interfaceClass) {
-        final Collection<SchemaPath> paths = RpcUtil.decomposeRpcService(interfaceClass,
-            schemaService.getGlobalContext(), RpcRoutingStrategy::isContextBasedRouted);
-        if (paths.isEmpty()) {
-            LOG.warn("{}: interface {} has no actions defined", ACTION_PROVIDER, interfaceClass);
-            return;
-        }
-
-        final Set<DOMRpcIdentifier> rpcs = ImmutableSet.copyOf(Collections2.transform(paths, DOMRpcIdentifier::create));
-        reg = domRpcProvider.registerRpcImplementation(
-            (rpc, input) -> FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException(
-                "Action %s has no instance matching %s", rpc, input)), rpcs);
-        LOG.debug("Registered provider for {}", interfaceName);
-    }
-
-    private void registerImplementation(final Class<RpcService> interfaceClass) {
-        if (!interfaceClass.isInstance(implementation)) {
-            throw new ComponentDefinitionException(String.format(
-                "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
-                interfaceName, ACTION_PROVIDER, implementation.getClass()));
-        }
-
-        reg = bindingRpcProvider.registerRpcImplementation(interfaceClass, implementation);
-        LOG.debug("Registered implementation {} for {}", implementation, interfaceName);
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/ActionServiceMetadata.java
deleted file mode 100644 (file)
index 5bb3f14..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "action-service" element. It waits for a DOM promise of registration
- * to appear in the {@link DOMRpcService} and then acquires a dynamic proxy via RpcProviderRegistry.
- *
- * @author Robert Varga
- */
-final class ActionServiceMetadata extends AbstractInvokableServiceMetadata {
-    /*
-     * Implementation note:
-     *
-     * This implementation assumes Binding V1 semantics for actions, which means actions are packaged along with RPCs
-     * into a single interface. This has interesting implications on working with RpcServiceMetadata, which only
-     * handles the RPC side of the contract.
-     *
-     * Further interesting interactions stem from the fact that in DOM world each action is a separate entity, so the
-     * interface contract can let some actions to be invoked, while failing for others. This is a shortcoming of the
-     * Binding Specification and will be addressed in Binding V2 -- where each action is its own interface.
-     */
-    ActionServiceMetadata(final String id, final String interfaceName) {
-        super(id, interfaceName);
-    }
-
-    @Override
-    Predicate<RpcRoutingStrategy> rpcFilter() {
-        return RpcRoutingStrategy::isContextBasedRouted;
-    }
-}
index 93c46b4c30e7509c916642d106890e6dc5bd1c97..67905aeaf86b3ebcb8f8d1c58cc9fce5549584df 100644 (file)
@@ -7,33 +7,39 @@
  */
 package org.opendaylight.controller.blueprint.ext;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
 import com.google.common.base.Strings;
+import com.google.common.collect.Iterables;
 import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.net.URISyntaxException;
-import java.util.List;
-import javax.xml.parsers.ParserConfigurationException;
+import java.util.Set;
 import javax.xml.stream.XMLStreamException;
 import javax.xml.transform.dom.DOMSource;
 import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
 import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.Identifier;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Key;
+import org.opendaylight.yangtools.yang.binding.KeyAware;
+import org.opendaylight.yangtools.yang.binding.contract.Naming;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
 import org.opendaylight.yangtools.yang.data.codec.xml.XmlParserStream;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.NormalizedNodeResult;
+import org.opendaylight.yangtools.yang.data.impl.schema.NormalizationResultHolder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
+import org.opendaylight.yangtools.yang.model.api.stmt.KeyEffectiveStatement;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaTreeEffectiveStatement;
 import org.osgi.service.blueprint.container.ComponentDefinitionException;
 import org.w3c.dom.Element;
 import org.xml.sax.SAXException;
@@ -44,11 +50,9 @@ import org.xml.sax.SAXException;
  * @author Thomas Pantelis (originally; re-factored by Michael Vorburger.ch)
  */
 public abstract class BindingContext {
-    private static String GET_KEY_METHOD = "key";
-
     public static BindingContext create(final String logName, final Class<? extends DataObject> klass,
             final String appConfigListKeyValue) {
-        if (Identifiable.class.isAssignableFrom(klass)) {
+        if (KeyAware.class.isAssignableFrom(klass)) {
             // The binding class corresponds to a yang list.
             if (Strings.isNullOrEmpty(appConfigListKeyValue)) {
                 throw new ComponentDefinitionException(String.format(
@@ -71,12 +75,12 @@ public abstract class BindingContext {
     }
 
     public final InstanceIdentifier<DataObject> appConfigPath;
-    public final Class<DataObject> appConfigBindingClass;
+    public final Class<?> appConfigBindingClass;
     public final Class<? extends DataSchemaNode> schemaType;
     public final QName bindingQName;
 
-    private BindingContext(final Class<DataObject> appConfigBindingClass,
-            final InstanceIdentifier<DataObject> appConfigPath, final Class<? extends DataSchemaNode> schemaType) {
+    private BindingContext(final Class<?> appConfigBindingClass, final InstanceIdentifier<DataObject> appConfigPath,
+            final Class<? extends DataSchemaNode> schemaType) {
         this.appConfigBindingClass = appConfigBindingClass;
         this.appConfigPath = appConfigPath;
         this.schemaType = schemaType;
@@ -84,25 +88,18 @@ public abstract class BindingContext {
         bindingQName = BindingReflections.findQName(appConfigBindingClass);
     }
 
-    public NormalizedNode<?, ?> parseDataElement(final Element element, final DataSchemaNode dataSchema,
-            final SchemaContext schemaContext) throws XMLStreamException, IOException, ParserConfigurationException,
-            SAXException, URISyntaxException {
-        final NormalizedNodeResult resultHolder = new NormalizedNodeResult();
+    public NormalizedNode parseDataElement(final Element element, final SchemaTreeInference dataSchema)
+            throws XMLStreamException, IOException, SAXException, URISyntaxException {
+        final NormalizationResultHolder resultHolder = new NormalizationResultHolder();
         final NormalizedNodeStreamWriter writer = ImmutableNormalizedNodeStreamWriter.from(resultHolder);
-        final XmlParserStream xmlParser = XmlParserStream.create(writer, schemaContext, dataSchema);
+        final XmlParserStream xmlParser = XmlParserStream.create(writer, dataSchema);
         xmlParser.traverse(new DOMSource(element));
 
-        final NormalizedNode<?, ?> result = resultHolder.getResult();
-        if (result instanceof MapNode) {
-            final MapNode mapNode = (MapNode) result;
-            final MapEntryNode mapEntryNode = mapNode.getValue().iterator().next();
-            return mapEntryNode;
-        }
-
-        return result;
+        final NormalizedNode result = resultHolder.getResult().data();
+        return result instanceof MapNode mapNode ? mapNode.body().iterator().next() : result;
     }
 
-    public abstract NormalizedNode<?, ?> newDefaultNode(DataSchemaNode dataSchema);
+    public abstract NormalizedNode newDefaultNode(SchemaTreeInference dataSchema);
 
     /**
      * BindingContext implementation for a container binding.
@@ -110,13 +107,13 @@ public abstract class BindingContext {
     private static class ContainerBindingContext extends BindingContext {
         @SuppressWarnings("unchecked")
         ContainerBindingContext(final Class<? extends DataObject> appConfigBindingClass) {
-            super((Class<DataObject>) appConfigBindingClass,
-                    InstanceIdentifier.create((Class<DataObject>) appConfigBindingClass), ContainerSchemaNode.class);
+            super(appConfigBindingClass, InstanceIdentifier.create((Class) appConfigBindingClass),
+                ContainerSchemaNode.class);
         }
 
         @Override
-        public NormalizedNode<?, ?> newDefaultNode(final DataSchemaNode dataSchema) {
-            return ImmutableNodes.containerNode(bindingQName);
+        public ContainerNode newDefaultNode(final SchemaTreeInference dataSchema) {
+            return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(bindingQName)).build();
         }
     }
 
@@ -135,23 +132,30 @@ public abstract class BindingContext {
         }
 
         @SuppressWarnings({ "rawtypes", "unchecked" })
-        private static ListBindingContext newInstance(final Class<? extends DataObject> bindingClass,
+        static ListBindingContext newInstance(final Class<? extends DataObject> bindingClass,
                 final String listKeyValue) throws InstantiationException, IllegalAccessException,
                 IllegalArgumentException, InvocationTargetException, NoSuchMethodException, SecurityException {
             // We assume the yang list key type is string.
-            Identifier keyInstance = (Identifier) bindingClass.getMethod(GET_KEY_METHOD).getReturnType()
-                    .getConstructor(String.class).newInstance(listKeyValue);
+            Key keyInstance = (Key) bindingClass.getMethod(Naming.KEY_AWARE_KEY_NAME)
+                .getReturnType().getConstructor(String.class).newInstance(listKeyValue);
             InstanceIdentifier appConfigPath = InstanceIdentifier.builder((Class)bindingClass, keyInstance).build();
             return new ListBindingContext(bindingClass, appConfigPath, listKeyValue);
         }
 
         @Override
-        public NormalizedNode<?, ?> newDefaultNode(final DataSchemaNode dataSchema) {
+        public NormalizedNode newDefaultNode(final SchemaTreeInference dataSchema) {
+            final SchemaTreeEffectiveStatement<?> stmt = Iterables.getLast(dataSchema.statementPath());
+
             // We assume there's only one key for the list.
-            List<QName> keys = ((ListSchemaNode)dataSchema).getKeyDefinition();
-            Preconditions.checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass);
-            QName listKeyQName = keys.get(0);
-            return ImmutableNodes.mapEntryBuilder(bindingQName, listKeyQName, appConfigListKeyValue).build();
+            final Set<QName> keys = stmt.findFirstEffectiveSubstatementArgument(KeyEffectiveStatement.class)
+                .orElseThrow();
+
+            checkArgument(keys.size() == 1, "Expected only 1 key for list %s", appConfigBindingClass);
+            QName listKeyQName = keys.iterator().next();
+            return ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(bindingQName, listKeyQName, appConfigListKeyValue))
+                .withChild(ImmutableNodes.leafNode(listKeyQName, appConfigListKeyValue))
+                .build();
         }
     }
 }
index 84847a14b1687f87cc7b94bdb58fb4b4eff2f7be..8e8d98ff365d62389aba90459f7abe0ccc6601ee 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.blueprint.ext;
 import com.google.common.base.Strings;
 import java.util.ArrayList;
 import java.util.Dictionary;
-import java.util.Hashtable;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.aries.blueprint.ComponentDefinitionRegistry;
@@ -23,9 +23,8 @@ import org.apache.aries.util.AriesFrameworkUtil;
 import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
 import org.osgi.framework.Bundle;
 import org.osgi.framework.Constants;
+import org.osgi.framework.FrameworkUtil;
 import org.osgi.framework.ServiceRegistration;
-import org.osgi.service.blueprint.reflect.BeanProperty;
-import org.osgi.service.blueprint.reflect.ComponentMetadata;
 import org.osgi.service.blueprint.reflect.ValueMetadata;
 import org.osgi.service.cm.ManagedService;
 import org.slf4j.Logger;
@@ -44,8 +43,8 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     private static final String CM_PERSISTENT_ID_PROPERTY = "persistentId";
 
     private final List<ServiceRegistration<?>> managedServiceRegs = new ArrayList<>();
-    private Bundle bundle;
-    private BlueprintContainerRestartService blueprintContainerRestartService;
+    private Bundle bundle = null;
+    private BlueprintContainerRestartService blueprintContainerRestartService = null;
     private boolean restartDependentsOnUpdates;
     private boolean useDefaultForReferenceTypes;
 
@@ -54,7 +53,7 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     }
 
     public void setBlueprintContainerRestartService(final BlueprintContainerRestartService restartService) {
-        this.blueprintContainerRestartService = restartService;
+        blueprintContainerRestartService = restartService;
     }
 
     public void setRestartDependentsOnUpdates(final boolean restartDependentsOnUpdates) {
@@ -66,21 +65,19 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     }
 
     public void destroy() {
-        for (ServiceRegistration<?> reg: managedServiceRegs) {
-            AriesFrameworkUtil.safeUnregisterService(reg);
-        }
+        managedServiceRegs.forEach(AriesFrameworkUtil::safeUnregisterService);
     }
 
     @Override
     public void process(final ComponentDefinitionRegistry registry) {
         LOG.debug("{}: In process",  logName());
 
-        for (String name : registry.getComponentDefinitionNames()) {
-            ComponentMetadata component = registry.getComponentDefinition(name);
-            if (component instanceof MutableBeanMetadata) {
-                processMutableBeanMetadata((MutableBeanMetadata) component);
-            } else if (component instanceof MutableServiceReferenceMetadata) {
-                processServiceReferenceMetadata((MutableServiceReferenceMetadata)component);
+        for (var name : registry.getComponentDefinitionNames()) {
+            final var component = registry.getComponentDefinition(name);
+            if (component instanceof MutableBeanMetadata bean) {
+                processMutableBeanMetadata(bean);
+            } else if (component instanceof MutableServiceReferenceMetadata serviceRef) {
+                processServiceReferenceMetadata(serviceRef);
             }
         }
     }
@@ -111,18 +108,15 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
             LOG.debug("{}: Found PropertyPlaceholder bean: {}, runtime {}", logName(), bean.getId(),
                     bean.getRuntimeClass());
 
-            for (BeanProperty prop : bean.getProperties()) {
+            for (var prop : bean.getProperties()) {
                 if (CM_PERSISTENT_ID_PROPERTY.equals(prop.getName())) {
-                    if (prop.getValue() instanceof ValueMetadata) {
-                        ValueMetadata persistentId = (ValueMetadata)prop.getValue();
-
-                        LOG.debug("{}: Found {} property, value : {}", logName(),
-                                CM_PERSISTENT_ID_PROPERTY, persistentId.getStringValue());
-
+                    if (prop.getValue() instanceof ValueMetadata persistentId) {
+                        LOG.debug("{}: Found {} property, value : {}", logName(), CM_PERSISTENT_ID_PROPERTY,
+                            persistentId.getStringValue());
                         registerManagedService(persistentId.getStringValue());
                     } else {
-                        LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata",
-                                logName(), CM_PERSISTENT_ID_PROPERTY, prop.getValue());
+                        LOG.debug("{}: {} property metadata {} is not instanceof ValueMetadata", logName(),
+                            CM_PERSISTENT_ID_PROPERTY, prop.getValue());
                     }
 
                     break;
@@ -134,7 +128,7 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
     private void registerManagedService(final String persistentId) {
         // Register a ManagedService so we get updates from the ConfigAdmin when the cfg file corresponding
         // to the persistentId changes.
-        final ManagedService managedService = new ManagedService() {
+        final var managedService = new ManagedService() {
             private final AtomicBoolean initialUpdate = new AtomicBoolean(true);
             private volatile Dictionary<String, ?> previousProperties;
 
@@ -154,12 +148,11 @@ public class ComponentProcessor implements ComponentDefinitionRegistryProcessor
             }
         };
 
-        Dictionary<String, Object> props = new Hashtable<>();
-        props.put(Constants.SERVICE_PID, persistentId);
-        props.put(Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName());
-        props.put(Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION));
-        managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class.getName(),
-                managedService, props));
+        managedServiceRegs.add(bundle.getBundleContext().registerService(ManagedService.class, managedService,
+            FrameworkUtil.asDictionary(Map.of(
+                Constants.SERVICE_PID, persistentId,
+                Constants.BUNDLE_SYMBOLICNAME, bundle.getSymbolicName(),
+                Constants.BUNDLE_VERSION, bundle.getHeaders().get(Constants.BUNDLE_VERSION)))));
     }
 
     private String logName() {
index 9b55b360d66bab09a531e0174c3ad5fa7166d2d2..4dea3404f9f073677541678671786e7627f9293b 100644 (file)
@@ -14,18 +14,18 @@ import java.io.InputStream;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.util.Optional;
-import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLStreamException;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaTreeEffectiveStatement;
+import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
@@ -52,8 +52,8 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
 
     @FunctionalInterface
     public interface FallbackConfigProvider {
-        NormalizedNode<?,?> get(SchemaContext schemaContext, DataSchemaNode dataSchema) throws IOException,
-                XMLStreamException, ParserConfigurationException, SAXException, URISyntaxException;
+        NormalizedNode get(SchemaTreeInference dataSchema)
+            throws IOException, XMLStreamException, SAXException, URISyntaxException;
     }
 
     @FunctionalInterface
@@ -92,17 +92,17 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
         return Resources.getResource(testClass, defaultAppConfigFileName);
     }
 
-    public T createDefaultInstance() throws ConfigXMLReaderException, ParserConfigurationException, XMLStreamException,
-            IOException, SAXException, URISyntaxException {
-        return createDefaultInstance((schemaContext, dataSchema) -> {
-            throw new IllegalArgumentException("Failed to read XML "
-                    + "(not creating model from defaults as runtime would, for better clarity in tests)");
+    public T createDefaultInstance() throws ConfigXMLReaderException, XMLStreamException, IOException, SAXException,
+            URISyntaxException {
+        return createDefaultInstance(dataSchema -> {
+            throw new IllegalArgumentException(
+                "Failed to read XML (not creating model from defaults as runtime would, for better clarity in tests)");
         });
     }
 
     @SuppressWarnings("unchecked")
     public T createDefaultInstance(final FallbackConfigProvider fallback) throws ConfigXMLReaderException,
-            URISyntaxException, ParserConfigurationException, XMLStreamException, SAXException, IOException {
+            URISyntaxException, XMLStreamException, SAXException, IOException {
         YangInstanceIdentifier yangPath = bindingSerializer.toYangInstanceIdentifier(bindingContext.appConfigPath);
 
         LOG.debug("{}: Creating app config instance from path {}, Qname: {}", logName, yangPath,
@@ -110,22 +110,28 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
 
         checkNotNull(schemaService, "%s: Could not obtain the SchemaService OSGi service", logName);
 
-        SchemaContext schemaContext = schemaService.getGlobalContext();
+        EffectiveModelContext schemaContext = schemaService.getGlobalContext();
 
         Module module = schemaContext.findModule(bindingContext.bindingQName.getModule()).orElse(null);
         checkNotNull(module, "%s: Could not obtain the module schema for namespace %s, revision %s",
                 logName, bindingContext.bindingQName.getNamespace(), bindingContext.bindingQName.getRevision());
 
-        DataSchemaNode dataSchema = module.getDataChildByName(bindingContext.bindingQName);
-        checkNotNull(dataSchema, "%s: Could not obtain the schema for %s", logName, bindingContext.bindingQName);
+        final SchemaInferenceStack schemaStack = SchemaInferenceStack.of(schemaContext);
+        final SchemaTreeEffectiveStatement<?> dataSchema;
+        try {
+            dataSchema = schemaStack.enterSchemaTree(bindingContext.bindingQName);
+        } catch (IllegalArgumentException e) {
+            throw new ConfigXMLReaderException(
+                logName + ": Could not obtain the schema for " + bindingContext.bindingQName, e);
+        }
 
-        checkCondition(bindingContext.schemaType.isAssignableFrom(dataSchema.getClass()),
+        checkCondition(bindingContext.schemaType.isInstance(dataSchema),
                 "%s: Expected schema type %s for %s but actual type is %s", logName,
                 bindingContext.schemaType, bindingContext.bindingQName, dataSchema.getClass());
 
-        NormalizedNode<?, ?> dataNode = parsePossibleDefaultAppConfigXMLFile(schemaContext, dataSchema);
+        NormalizedNode dataNode = parsePossibleDefaultAppConfigXMLFile(schemaStack);
         if (dataNode == null) {
-            dataNode = fallback.get(schemaService.getGlobalContext(), dataSchema);
+            dataNode = fallback.get(schemaStack.toSchemaTreeInference());
         }
 
         DataObject appConfig = bindingSerializer.fromNormalizedNode(yangPath, dataNode).getValue();
@@ -149,12 +155,12 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
         }
     }
 
-    private NormalizedNode<?, ?> parsePossibleDefaultAppConfigXMLFile(final SchemaContext schemaContext,
-            final DataSchemaNode dataSchema) throws ConfigXMLReaderException {
-
+    private NormalizedNode parsePossibleDefaultAppConfigXMLFile(final SchemaInferenceStack schemaStack)
+            throws ConfigXMLReaderException {
         String appConfigFileName = defaultAppConfigFileName;
         if (Strings.isNullOrEmpty(appConfigFileName)) {
-            String moduleName = findYangModuleName(bindingContext.bindingQName, schemaContext);
+            String moduleName = schemaStack.currentModule().argument().getLocalName();
+
             appConfigFileName = moduleName + "_" + bindingContext.bindingQName.getLocalName() + ".xml";
         }
 
@@ -169,32 +175,19 @@ public class DataStoreAppConfigDefaultXMLReader<T extends DataObject> {
         if (!optionalURL.isPresent()) {
             return null;
         }
-        URL url = optionalURL.get();
+        URL url = optionalURL.orElseThrow();
         try (InputStream is = url.openStream()) {
             Document root = UntrustedXML.newDocumentBuilder().parse(is);
-            NormalizedNode<?, ?> dataNode = bindingContext.parseDataElement(root.getDocumentElement(), dataSchema,
-                    schemaContext);
+            NormalizedNode dataNode = bindingContext.parseDataElement(root.getDocumentElement(),
+                schemaStack.toSchemaTreeInference());
 
             LOG.debug("{}: Parsed data node: {}", logName, dataNode);
 
             return dataNode;
-        } catch (final IOException | SAXException | XMLStreamException | ParserConfigurationException
-                | URISyntaxException e) {
+        } catch (final IOException | SAXException | XMLStreamException | URISyntaxException e) {
             String msg = String.format("%s: Could not read/parse app config %s", logName, url);
             LOG.error(msg, e);
             throw new ConfigXMLReaderException(msg, e);
         }
     }
-
-    private String findYangModuleName(final QName qname, final SchemaContext schemaContext)
-            throws ConfigXMLReaderException {
-        for (Module m : schemaContext.getModules()) {
-            if (qname.getModule().equals(m.getQNameModule())) {
-                return m.getName();
-            }
-        }
-        throw new ConfigXMLReaderException(
-                String.format("%s: Could not find yang module for QName %s", logName, qname));
-    }
-
 }
index b018fc493a386276f37abc5749b5b3aee26bcb15..35cdf03e4c69e2b747f059a9f3e2c6cc81658525 100644 (file)
@@ -17,13 +17,11 @@ import java.util.Collection;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicBoolean;
-import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.stream.XMLStreamException;
 import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader.ConfigURLProvider;
-import org.opendaylight.mdsal.binding.api.ClusteredDataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.DataObjectModification;
 import org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType;
@@ -33,11 +31,11 @@ import org.opendaylight.mdsal.binding.api.ReadTransaction;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaTreeInference;
 import org.osgi.service.blueprint.container.ComponentDefinitionException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,7 +69,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
     private final AtomicBoolean readingInitialAppConfig = new AtomicBoolean(true);
 
     private volatile BindingContext bindingContext;
-    private volatile ListenerRegistration<?> appConfigChangeListenerReg;
+    private volatile Registration appConfigChangeListenerReg;
     private volatile DataObject currentAppConfig;
 
     // Note: the BindingNormalizedNodeSerializer interface is annotated as deprecated because there's an
@@ -87,7 +85,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         this.defaultAppConfigFileName = defaultAppConfigFileName;
         this.appConfigBindingClassName = appConfigBindingClassName;
         this.appConfigListKeyValue = appConfigListKeyValue;
-        this.appConfigUpdateStrategy = updateStrategyValue;
+        appConfigUpdateStrategy = updateStrategyValue;
     }
 
     @Override
@@ -98,10 +96,10 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         Class<DataObject> appConfigBindingClass;
         try {
             Class<?> bindingClass = container.getBundleContext().getBundle().loadClass(appConfigBindingClassName);
-            if (!DataObject.class.isAssignableFrom(bindingClass)) {
+            if (!ChildOf.class.isAssignableFrom(bindingClass)) {
                 throw new ComponentDefinitionException(String.format(
                         "%s: Specified app config binding class %s does not extend %s",
-                        logName(), appConfigBindingClassName, DataObject.class.getName()));
+                        logName(), appConfigBindingClassName, ChildOf.class.getName()));
             }
 
             appConfigBindingClass = (Class<DataObject>) bindingClass;
@@ -144,15 +142,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
 
         setDependencyDesc("Initial app config " + bindingContext.appConfigBindingClass.getSimpleName());
 
-        // We register a DTCL to get updates and also read the app config data from the data store. If
-        // the app config data is present then both the read and initial DTCN update will return it. If the
-        // the data isn't present, we won't get an initial DTCN update so the read will indicate the data
-        // isn't present.
-
-        DataTreeIdentifier<DataObject> dataTreeId = DataTreeIdentifier.create(LogicalDatastoreType.CONFIGURATION,
-                bindingContext.appConfigPath);
-        appConfigChangeListenerReg = dataBroker.registerDataTreeChangeListener(dataTreeId,
-                (ClusteredDataTreeChangeListener<DataObject>) this::onAppConfigChanged);
+        // We register a DTCL to get updates and also read the app config data from the data store. If the app config
+        // data is present then both the read and initial DTCN update will return it. If the the data isn't present, we
+        // will not get an initial DTCN update so the read will indicate the data is not present.
+        appConfigChangeListenerReg = dataBroker.registerTreeChangeListener(
+            DataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, bindingContext.appConfigPath),
+            this::onAppConfigChanged);
 
         readInitialAppConfig(dataBroker);
     }
@@ -188,12 +183,12 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
     private void onAppConfigChanged(final Collection<DataTreeModification<DataObject>> changes) {
         for (DataTreeModification<DataObject> change: changes) {
             DataObjectModification<DataObject> changeRoot = change.getRootNode();
-            ModificationType type = changeRoot.getModificationType();
+            ModificationType type = changeRoot.modificationType();
 
             LOG.debug("{}: onAppConfigChanged: {}, {}", logName(), type, change.getRootPath());
 
             if (type == ModificationType.SUBTREE_MODIFIED || type == ModificationType.WRITE) {
-                DataObject newAppConfig = changeRoot.getDataAfter();
+                DataObject newAppConfig = changeRoot.dataAfter();
 
                 LOG.debug("New app config instance: {}, previous: {}", newAppConfig, currentAppConfig);
 
@@ -220,7 +215,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         if (result) {
             DataObject localAppConfig;
             if (possibleAppConfig.isPresent()) {
-                localAppConfig = possibleAppConfig.get();
+                localAppConfig = possibleAppConfig.orElseThrow();
             } else {
                 // No app config data is present so create an empty instance via the bindingSerializer service.
                 // This will also return default values for leafs that haven't been explicitly set.
@@ -257,9 +252,9 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
             DataStoreAppConfigDefaultXMLReader<?> reader = new DataStoreAppConfigDefaultXMLReader<>(logName(),
                     defaultAppConfigFileName, getOSGiService(DOMSchemaService.class), bindingSerializer, bindingContext,
                     inputStreamProvider);
-            return reader.createDefaultInstance((schemaContext, dataSchema) -> {
+            return reader.createDefaultInstance(dataSchema -> {
                 // Fallback if file cannot be read, try XML from Config
-                NormalizedNode<?, ?> dataNode = parsePossibleDefaultAppConfigElement(schemaContext, dataSchema);
+                NormalizedNode dataNode = parsePossibleDefaultAppConfigElement(dataSchema);
                 if (dataNode == null) {
                     // or, as last resort, defaults from the model
                     return bindingContext.newDefaultNode(dataSchema);
@@ -268,8 +263,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
                 }
             });
 
-        } catch (final ConfigXMLReaderException | IOException | SAXException | XMLStreamException
-                | ParserConfigurationException | URISyntaxException e) {
+        } catch (ConfigXMLReaderException | IOException | SAXException | XMLStreamException | URISyntaxException e) {
             if (e.getCause() == null) {
                 setFailureMessage(e.getMessage());
             } else {
@@ -279,9 +273,8 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
         }
     }
 
-    private @Nullable NormalizedNode<?, ?> parsePossibleDefaultAppConfigElement(final SchemaContext schemaContext,
-            final DataSchemaNode dataSchema) throws URISyntaxException, IOException, ParserConfigurationException,
-            SAXException, XMLStreamException {
+    private @Nullable NormalizedNode parsePossibleDefaultAppConfigElement(final SchemaTreeInference dataSchema)
+            throws URISyntaxException, IOException, SAXException, XMLStreamException {
         if (defaultAppConfigElement == null) {
             return null;
         }
@@ -290,8 +283,7 @@ public class DataStoreAppConfigMetadata extends AbstractDependentComponentFactor
 
         LOG.debug("{}: Got app config schema: {}", logName(), dataSchema);
 
-        NormalizedNode<?, ?> dataNode = bindingContext.parseDataElement(defaultAppConfigElement, dataSchema,
-                schemaContext);
+        NormalizedNode dataNode = bindingContext.parseDataElement(defaultAppConfigElement, dataSchema);
 
         LOG.debug("{}: Parsed data node: {}", logName(), dataNode);
 
index 8139e1de52285dc76df7cb2daae020ed9e89ebdd..eaa1ce22c5e51c47ca9f3055b6bcb16ea668d1a1 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.blueprint.ext;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -24,7 +25,7 @@ class MandatoryServiceReferenceMetadata implements ServiceReferenceMetadata {
     private final String id;
 
     MandatoryServiceReferenceMetadata(final String id, final String interfaceClass) {
-        this.id = Preconditions.checkNotNull(id);
+        this.id = requireNonNull(id);
         this.interfaceClass = interfaceClass;
     }
 
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/NotificationListenerBean.java
deleted file mode 100644 (file)
index 74c2956..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.osgi.framework.Bundle;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "notification-listener" element that registers a NotificationListener
- * with the NotificationService.
- *
- * @author Thomas Pantelis
- */
-public class NotificationListenerBean {
-    private static final Logger LOG = LoggerFactory.getLogger(NotificationListenerBean.class);
-    static final String NOTIFICATION_LISTENER = "notification-listener";
-
-    private Bundle bundle;
-    private NotificationService notificationService;
-    private NotificationListener notificationListener;
-    private ListenerRegistration<?> registration;
-
-    public void setNotificationService(final NotificationService notificationService) {
-        this.notificationService = notificationService;
-    }
-
-    public void setNotificationListener(final NotificationListener notificationListener) {
-        this.notificationListener = notificationListener;
-    }
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void init() {
-        LOG.debug("{}: init - registering NotificationListener {}", bundle.getSymbolicName(), notificationListener);
-
-        registration = notificationService.registerNotificationListener(notificationListener);
-    }
-
-    public void destroy() {
-        if (registration != null) {
-            LOG.debug("{}: destroy - closing ListenerRegistration {}", bundle.getSymbolicName(), notificationListener);
-            registration.close();
-        } else {
-            LOG.debug("{}: destroy - listener was not registered", bundle.getSymbolicName());
-        }
-    }
-}
index ff22f1bc30c64bb5af4375b0e79d50291275f962..371b7efecdec1fa6e67f700acce75e10aa4beebd 100644 (file)
@@ -11,12 +11,10 @@ import com.google.common.base.Strings;
 import java.io.IOException;
 import java.io.StringReader;
 import java.net.URL;
-import java.util.Collections;
 import java.util.Set;
 import org.apache.aries.blueprint.ComponentDefinitionRegistry;
 import org.apache.aries.blueprint.NamespaceHandler;
 import org.apache.aries.blueprint.ParserContext;
-import org.apache.aries.blueprint.ext.ComponentFactoryMetadata;
 import org.apache.aries.blueprint.mutable.MutableBeanMetadata;
 import org.apache.aries.blueprint.mutable.MutableRefMetadata;
 import org.apache.aries.blueprint.mutable.MutableReferenceMetadata;
@@ -24,11 +22,6 @@ import org.apache.aries.blueprint.mutable.MutableServiceMetadata;
 import org.apache.aries.blueprint.mutable.MutableServiceReferenceMetadata;
 import org.apache.aries.blueprint.mutable.MutableValueMetadata;
 import org.opendaylight.controller.blueprint.BlueprintContainerRestartService;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.mdsal.binding.api.NotificationService;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.osgi.service.blueprint.container.ComponentDefinitionException;
 import org.osgi.service.blueprint.reflect.BeanMetadata;
@@ -55,32 +48,20 @@ import org.xml.sax.SAXException;
  */
 public final class OpendaylightNamespaceHandler implements NamespaceHandler {
     public static final String NAMESPACE_1_0_0 = "http://opendaylight.org/xmlns/blueprint/v1.0.0";
-    static final String ROUTED_RPC_REG_CONVERTER_NAME = "org.opendaylight.blueprint.RoutedRpcRegConverter";
-    static final String DOM_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.DOMRpcProviderService";
-    static final String RPC_REGISTRY_NAME = "org.opendaylight.blueprint.RpcRegistry";
-    static final String BINDING_RPC_PROVIDER_SERVICE_NAME = "org.opendaylight.blueprint.RpcProviderService";
-    static final String SCHEMA_SERVICE_NAME = "org.opendaylight.blueprint.SchemaService";
-    static final String NOTIFICATION_SERVICE_NAME = "org.opendaylight.blueprint.NotificationService";
-    static final String TYPE_ATTR = "type";
-    static final String UPDATE_STRATEGY_ATTR = "update-strategy";
 
     private static final Logger LOG = LoggerFactory.getLogger(OpendaylightNamespaceHandler.class);
+    private static final String TYPE_ATTR = "type";
+    private static final String UPDATE_STRATEGY_ATTR = "update-strategy";
     private static final String COMPONENT_PROCESSOR_NAME = ComponentProcessor.class.getName();
     private static final String RESTART_DEPENDENTS_ON_UPDATES = "restart-dependents-on-updates";
     private static final String USE_DEFAULT_FOR_REFERENCE_TYPES = "use-default-for-reference-types";
     private static final String CLUSTERED_APP_CONFIG = "clustered-app-config";
-    private static final String INTERFACE = "interface";
-    private static final String REF_ATTR = "ref";
     private static final String ID_ATTR = "id";
-    private static final String RPC_SERVICE = "rpc-service";
-    private static final String ACTION_SERVICE = "action-service";
-    private static final String SPECIFIC_SERVICE_REF_LIST = "specific-reference-list";
-    private static final String STATIC_REFERENCE = "static-reference";
 
     @SuppressWarnings("rawtypes")
     @Override
     public Set<Class> getManagedClasses() {
-        return Collections.emptySet();
+        return Set.of();
     }
 
     @Override
@@ -98,24 +79,8 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
     public Metadata parse(final Element element, final ParserContext context) {
         LOG.debug("In parse for {}", element);
 
-        if (nodeNameEquals(element, RpcImplementationBean.RPC_IMPLEMENTATION)) {
-            return parseRpcImplementation(element, context);
-        } else if (nodeNameEquals(element, RoutedRpcMetadata.ROUTED_RPC_IMPLEMENTATION)) {
-            return parseRoutedRpcImplementation(element, context);
-        } else if (nodeNameEquals(element, RPC_SERVICE)) {
-            return parseRpcService(element, context);
-        } else if (nodeNameEquals(element, NotificationListenerBean.NOTIFICATION_LISTENER)) {
-            return parseNotificationListener(element, context);
-        } else if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
+        if (nodeNameEquals(element, CLUSTERED_APP_CONFIG)) {
             return parseClusteredAppConfig(element, context);
-        } else if (nodeNameEquals(element, SPECIFIC_SERVICE_REF_LIST)) {
-            return parseSpecificReferenceList(element, context);
-        } else if (nodeNameEquals(element, STATIC_REFERENCE)) {
-            return parseStaticReference(element, context);
-        } else if (nodeNameEquals(element, ACTION_SERVICE)) {
-            return parseActionService(element, context);
-        } else if (nodeNameEquals(element, ActionProviderBean.ACTION_PROVIDER)) {
-            return parseActionProvider(element, context);
         }
 
         throw new ComponentDefinitionException("Unsupported standalone element: " + element.getNodeName());
@@ -147,12 +112,10 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
 
     private static ComponentMetadata decorateServiceType(final Attr attr, final ComponentMetadata component,
             final ParserContext context) {
-        if (!(component instanceof MutableServiceMetadata)) {
+        if (!(component instanceof MutableServiceMetadata service)) {
             throw new ComponentDefinitionException("Expected an instanceof MutableServiceMetadata");
         }
 
-        MutableServiceMetadata service = (MutableServiceMetadata)component;
-
         LOG.debug("decorateServiceType for {} - adding type property {}", service.getId(), attr.getValue());
 
         service.addServiceProperty(createValue(context, TYPE_ATTR), createValue(context, attr.getValue()));
@@ -238,129 +201,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
         return metadata;
     }
 
-    private static Metadata parseActionProvider(final Element element, final ParserContext context) {
-        registerDomRpcProviderServiceRefBean(context);
-        registerBindingRpcProviderServiceRefBean(context);
-        registerSchemaServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), ActionProviderBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("domRpcProvider", createRef(context, DOM_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("bindingRpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("schemaService", createRef(context, SCHEMA_SERVICE_NAME));
-        metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
-
-        if (element.hasAttribute(REF_ATTR)) {
-            metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
-        }
-
-        LOG.debug("parseActionProvider returning {}", metadata);
-        return metadata;
-    }
-
-
-    private static Metadata parseRpcImplementation(final Element element, final ParserContext context) {
-        registerBindingRpcProviderServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), RpcImplementationBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("rpcProvider", createRef(context, BINDING_RPC_PROVIDER_SERVICE_NAME));
-        metadata.addProperty("implementation", createRef(context, element.getAttribute(REF_ATTR)));
-
-        if (element.hasAttribute(INTERFACE)) {
-            metadata.addProperty("interfaceName", createValue(context, element.getAttribute(INTERFACE)));
-        }
-
-        LOG.debug("parseRpcImplementation returning {}", metadata);
-        return metadata;
-    }
-
-    private static Metadata parseRoutedRpcImplementation(final Element element, final ParserContext context) {
-        registerRefBean(context, RPC_REGISTRY_NAME, RpcProviderRegistry.class);
-        registerRoutedRpcRegistrationConverter(context);
-
-        ComponentFactoryMetadata metadata = new RoutedRpcMetadata(getId(context, element),
-                element.getAttribute(INTERFACE), element.getAttribute(REF_ATTR));
-
-        LOG.debug("parseRoutedRpcImplementation returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static Metadata parseActionService(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new ActionServiceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseActionService returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static Metadata parseRpcService(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new RpcServiceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseRpcService returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static void registerRoutedRpcRegistrationConverter(final ParserContext context) {
-        ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
-        if (registry.getComponentDefinition(ROUTED_RPC_REG_CONVERTER_NAME) == null) {
-            MutableBeanMetadata metadata = createBeanMetadata(context, ROUTED_RPC_REG_CONVERTER_NAME,
-                    RoutedRpcRegistrationConverter.class, false, false);
-            metadata.setActivation(ReferenceMetadata.ACTIVATION_LAZY);
-            registry.registerTypeConverter(metadata);
-        }
-    }
-
-    private static void registerDomRpcProviderServiceRefBean(final ParserContext context) {
-        registerRefBean(context, DOM_RPC_PROVIDER_SERVICE_NAME, DOMRpcProviderService.class);
-    }
-
-    private static void registerBindingRpcProviderServiceRefBean(final ParserContext context) {
-        registerRefBean(context, BINDING_RPC_PROVIDER_SERVICE_NAME, RpcProviderService.class);
-    }
-
-    private static void registerSchemaServiceRefBean(final ParserContext context) {
-        registerRefBean(context, SCHEMA_SERVICE_NAME, DOMSchemaService.class);
-    }
-
-    private static void registerRefBean(final ParserContext context, final String name, final Class<?> clazz) {
-        ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
-        if (registry.getComponentDefinition(name) == null) {
-            MutableReferenceMetadata metadata = createServiceRef(context, clazz, null);
-            metadata.setId(name);
-            registry.registerComponentDefinition(metadata);
-        }
-    }
-
-    private static Metadata parseNotificationListener(final Element element, final ParserContext context) {
-        registerNotificationServiceRefBean(context);
-
-        MutableBeanMetadata metadata = createBeanMetadata(context, context.generateId(), NotificationListenerBean.class,
-                true, true);
-        addBlueprintBundleRefProperty(context, metadata);
-        metadata.addProperty("notificationService", createRef(context, NOTIFICATION_SERVICE_NAME));
-        metadata.addProperty("notificationListener", createRef(context, element.getAttribute(REF_ATTR)));
-
-        LOG.debug("parseNotificationListener returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static void registerNotificationServiceRefBean(final ParserContext context) {
-        ComponentDefinitionRegistry registry = context.getComponentDefinitionRegistry();
-        if (registry.getComponentDefinition(NOTIFICATION_SERVICE_NAME) == null) {
-            MutableReferenceMetadata metadata = createServiceRef(context, NotificationService.class, null);
-            metadata.setId(NOTIFICATION_SERVICE_NAME);
-            registry.registerComponentDefinition(metadata);
-        }
-    }
-
     private static Metadata parseClusteredAppConfig(final Element element, final ParserContext context) {
         LOG.debug("parseClusteredAppConfig");
 
@@ -408,24 +248,6 @@ public final class OpendaylightNamespaceHandler implements NamespaceHandler {
         }
     }
 
-    private static Metadata parseSpecificReferenceList(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new SpecificReferenceListMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseSpecificReferenceList returning {}", metadata);
-
-        return metadata;
-    }
-
-    private static Metadata parseStaticReference(final Element element, final ParserContext context) {
-        ComponentFactoryMetadata metadata = new StaticReferenceMetadata(getId(context, element),
-                element.getAttribute(INTERFACE));
-
-        LOG.debug("parseStaticReference returning {}", metadata);
-
-        return metadata;
-    }
-
     private static Element parseXML(final String name, final String xml) {
         try {
             return UntrustedXML.newDocumentBuilder().parse(new InputSource(new StringReader(xml))).getDocumentElement();
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcMetadata.java
deleted file mode 100644 (file)
index 725e568..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.Arrays;
-import java.util.List;
-import org.apache.aries.blueprint.ext.ComponentFactoryMetadata;
-import org.apache.aries.blueprint.services.ExtendedBlueprintContainer;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "routed-rpc-implementation" element that registers an RPC
- * implementation with the RpcProviderRegistry and provides the RoutedRpcRegistration instance to the
- * Blueprint container.
- *
- * @author Thomas Pantelis
- */
-class RoutedRpcMetadata implements ComponentFactoryMetadata {
-    private static final Logger LOG = LoggerFactory.getLogger(RoutedRpcMetadata.class);
-    static final String ROUTED_RPC_IMPLEMENTATION = "routed-rpc-implementation";
-
-    private final String id;
-    private final String interfaceName;
-    private final String implementationRefId;
-    private ExtendedBlueprintContainer container;
-
-    RoutedRpcMetadata(final String id, final String interfaceName, final String implementationRefId) {
-        this.id = id;
-        this.interfaceName = interfaceName;
-        this.implementationRefId = implementationRefId;
-    }
-
-    @Override
-    public String getId() {
-        return id;
-    }
-
-    @Override
-    public int getActivation() {
-        return ACTIVATION_LAZY;
-    }
-
-    @Override
-    public List<String> getDependsOn() {
-        return Arrays.asList(OpendaylightNamespaceHandler.RPC_REGISTRY_NAME, implementationRefId);
-    }
-
-    @Override
-    public void init(final ExtendedBlueprintContainer newContainer) {
-        this.container = newContainer;
-
-        LOG.debug("{}: In init", logName());
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    @Override
-    public Object create() throws ComponentDefinitionException {
-        RpcProviderRegistry rpcRegistry = (RpcProviderRegistry) container.getComponentInstance(
-                OpendaylightNamespaceHandler.RPC_REGISTRY_NAME);
-
-        Object implementation = container.getComponentInstance(implementationRefId);
-
-        try {
-            if (!RpcService.class.isAssignableFrom(implementation.getClass())) {
-                throw new ComponentDefinitionException(String.format(
-                        "Implementation \"ref\" instance %s for \"%s\" is not an RpcService",
-                        implementation.getClass(), ROUTED_RPC_IMPLEMENTATION));
-            }
-
-            List<Class<RpcService>> rpcInterfaces = RpcImplementationBean.getImplementedRpcServiceInterfaces(
-                    interfaceName, implementation.getClass(), container.getBundleContext().getBundle(),
-                    ROUTED_RPC_IMPLEMENTATION);
-
-            if (rpcInterfaces.size() > 1) {
-                throw new ComponentDefinitionException(String.format(
-                        "Implementation \"ref\" instance %s for \"%s\" implements more than one RpcService "
-                        + "interface (%s). Please specify the exact \"interface\"", implementation.getClass(),
-                        ROUTED_RPC_IMPLEMENTATION, rpcInterfaces));
-            }
-
-            Class<RpcService> rpcInterface = rpcInterfaces.iterator().next();
-
-            LOG.debug("{}: create - adding routed implementation {} for RpcService {}", logName(),
-                    implementation, rpcInterface);
-
-            return rpcRegistry.addRoutedRpcImplementation(rpcInterface, (RpcService)implementation);
-        } catch (final ComponentDefinitionException e) {
-            throw e;
-        } catch (final Exception e) {
-            throw new ComponentDefinitionException(String.format(
-                    "Error processing \"%s\" for %s", ROUTED_RPC_IMPLEMENTATION, implementation.getClass()), e);
-        }
-    }
-
-    @Override
-    public void destroy(final Object instance) {
-        LOG.debug("{}: In destroy: instance: {}", logName(), instance);
-
-        ((RoutedRpcRegistration<?>)instance).close();
-    }
-
-    private String logName() {
-        return (container != null ? container.getBundleContext().getBundle().getSymbolicName() : "") + " (" + id + ")";
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcRegistrationConverter.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RoutedRpcRegistrationConverter.java
deleted file mode 100644 (file)
index 6617443..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.osgi.service.blueprint.container.Converter;
-import org.osgi.service.blueprint.container.ReifiedType;
-
-/**
- * Implements a Converter that converts RoutedRpcRegistration instances. This is to work around an issue
- * when injecting a RoutedRpcRegistration instance into a bean where Aries is not able to convert the instance
- * returned from the RpcRegistryProvider to the desired generic RoutedRpcRegistration type specified in the
- * bean's setter method. This is because the actual instance class specifies a generic type variable T and,
- * even though it extends RpcService and should match, Aries doesn't handle it correctly.
- *
- * @author Thomas Pantelis
- */
-public class RoutedRpcRegistrationConverter implements Converter {
-    @Override
-    public boolean canConvert(final Object sourceObject, final ReifiedType targetType) {
-        return sourceObject instanceof RoutedRpcRegistration
-                && RoutedRpcRegistration.class.isAssignableFrom(targetType.getRawClass());
-    }
-
-    @Override
-    public Object convert(final Object sourceObject, final ReifiedType targetType) {
-        return sourceObject;
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcImplementationBean.java
deleted file mode 100644 (file)
index 94d5b3b..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.base.Strings;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.Bundle;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Blueprint bean corresponding to the "rpc-implementation" element that registers an RPC implementation with
- * the RpcProviderRegistry.
- *
- * @author Thomas Pantelis
- */
-public class RpcImplementationBean {
-    private static final Logger LOG = LoggerFactory.getLogger(RpcImplementationBean.class);
-    static final String RPC_IMPLEMENTATION = "rpc-implementation";
-
-    private RpcProviderService rpcProvider;
-    private Bundle bundle;
-    private String interfaceName;
-    private RpcService implementation;
-    private final List<ObjectRegistration<RpcService>> rpcRegistrations = new ArrayList<>();
-
-    public void setRpcProvider(final RpcProviderService rpcProvider) {
-        this.rpcProvider = rpcProvider;
-    }
-
-    public void setBundle(final Bundle bundle) {
-        this.bundle = bundle;
-    }
-
-    public void setInterfaceName(final String interfaceName) {
-        this.interfaceName = interfaceName;
-    }
-
-    public void setImplementation(final RpcService implementation) {
-        this.implementation = implementation;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void init() {
-        try {
-            List<Class<RpcService>> rpcInterfaces = getImplementedRpcServiceInterfaces(interfaceName,
-                    implementation.getClass(), bundle, RPC_IMPLEMENTATION);
-
-            LOG.debug("{}: init - adding implementation {} for RpcService interface(s) {}", bundle.getSymbolicName(),
-                    implementation, rpcInterfaces);
-
-            for (Class<RpcService> rpcInterface : rpcInterfaces) {
-                rpcRegistrations.add(rpcProvider.registerRpcImplementation(rpcInterface, implementation));
-            }
-        } catch (final ComponentDefinitionException e) {
-            throw e;
-        } catch (final Exception e) {
-            throw new ComponentDefinitionException(String.format(
-                    "Error processing \"%s\" for %s", RPC_IMPLEMENTATION, implementation.getClass()), e);
-        }
-    }
-
-    public void destroy() {
-        for (ObjectRegistration<RpcService> reg: rpcRegistrations) {
-            reg.close();
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    static List<Class<RpcService>> getImplementedRpcServiceInterfaces(final String interfaceName,
-            final Class<?> implementationClass, final Bundle bundle, final String logName)
-            throws ClassNotFoundException {
-        if (!Strings.isNullOrEmpty(interfaceName)) {
-            Class<?> rpcInterface = bundle.loadClass(interfaceName);
-
-            if (!rpcInterface.isAssignableFrom(implementationClass)) {
-                throw new ComponentDefinitionException(String.format(
-                        "The specified \"interface\" %s for \"%s\" is not implemented by RpcService \"ref\" %s",
-                        interfaceName, logName, implementationClass));
-            }
-
-            return Collections.singletonList((Class<RpcService>)rpcInterface);
-        }
-
-        List<Class<RpcService>> rpcInterfaces = new ArrayList<>();
-        for (Class<?> intface : implementationClass.getInterfaces()) {
-            if (RpcService.class.isAssignableFrom(intface)) {
-                rpcInterfaces.add((Class<RpcService>) intface);
-            }
-        }
-
-        if (rpcInterfaces.isEmpty()) {
-            throw new ComponentDefinitionException(String.format(
-                    "The \"ref\" instance %s for \"%s\" does not implemented any RpcService interfaces",
-                    implementationClass, logName));
-        }
-
-        return rpcInterfaces;
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcServiceMetadata.java
deleted file mode 100644 (file)
index 4ab3867..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-
-/**
- * Factory metadata corresponding to the "rpc-service" element that gets an RPC service implementation from
- * the RpcProviderRegistry and provides it to the Blueprint container.
- *
- * @author Thomas Pantelis
- */
-final class RpcServiceMetadata extends AbstractInvokableServiceMetadata {
-    RpcServiceMetadata(final String id, final String interfaceName) {
-        super(id, interfaceName);
-    }
-
-    @Override
-    Predicate<RpcRoutingStrategy> rpcFilter() {
-        return s -> !s.isContextBasedRouted();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/RpcUtil.java
deleted file mode 100644 (file)
index 97b816d..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.function.Predicate;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.mdsal.dom.spi.RpcRoutingStrategy;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utility methods for dealing with various aspects of RPCs and actions.
- *
- * @author Robert Varga
- */
-final class RpcUtil {
-    private static final Logger LOG = LoggerFactory.getLogger(RpcUtil.class);
-
-    private RpcUtil() {
-        throw new UnsupportedOperationException();
-    }
-
-    static Collection<SchemaPath> decomposeRpcService(final Class<RpcService> service,
-            final SchemaContext schemaContext, final Predicate<RpcRoutingStrategy> filter) {
-        final QNameModule moduleName = BindingReflections.getQNameModule(service);
-        final Module module = schemaContext.findModule(moduleName).orElseThrow(() -> new IllegalArgumentException(
-                "Module not found in SchemaContext: " + moduleName + "; service: " + service));
-        LOG.debug("Resolved service {} to module {}", service, module);
-
-        final Collection<RpcDefinition> rpcs = module.getRpcs();
-        final Collection<SchemaPath> ret = new ArrayList<>(rpcs.size());
-        for (RpcDefinition rpc : rpcs) {
-            final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(rpc);
-            if (filter.test(strategy)) {
-                ret.add(rpc.getPath());
-            }
-        }
-
-        return ret;
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/SpecificReferenceListMetadata.java
deleted file mode 100644 (file)
index e781ddd..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.io.Resources;
-import java.io.IOException;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleEvent;
-import org.osgi.framework.ServiceReference;
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.osgi.util.tracker.BundleTracker;
-import org.osgi.util.tracker.BundleTrackerCustomizer;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "specific-reference-list" element that obtains a specific list
- * of service instances from the OSGi registry for a given interface. The specific list is learned by first
- * extracting the list of expected service types by inspecting RESOLVED bundles for a resource file under
- * META-INF/services with the same name as the given interface. The type(s) listed in the resource file
- * must match the "type" property of the advertised service(s). In this manner, an app bundle announces the
- * service type(s) that it will advertise so that this class knows which services to expect up front. Once
- * all the expected services are obtained, the container is notified that all dependencies of this component
- * factory are satisfied.
- *
- * @author Thomas Pantelis
- */
-class SpecificReferenceListMetadata extends AbstractDependentComponentFactoryMetadata {
-    private static final Logger LOG = LoggerFactory.getLogger(SpecificReferenceListMetadata.class);
-
-    private final String interfaceName;
-    private final String serviceResourcePath;
-    private final Collection<String> expectedServiceTypes = new ConcurrentSkipListSet<>();
-    private final Collection<String> retrievedServiceTypes = new ConcurrentSkipListSet<>();
-    private final Collection<Object> retrievedServices = Collections.synchronizedList(new ArrayList<>());
-    private volatile BundleTracker<Bundle> bundleTracker;
-    private volatile ServiceTracker<Object, Object> serviceTracker;
-
-    SpecificReferenceListMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = interfaceName;
-        serviceResourcePath = "META-INF/services/" + interfaceName;
-    }
-
-    @Override
-    protected void startTracking() {
-        BundleTrackerCustomizer<Bundle> bundleListener = new BundleTrackerCustomizer<Bundle>() {
-            @Override
-            public Bundle addingBundle(final Bundle bundle, final BundleEvent event) {
-                bundleAdded(bundle);
-                return bundle;
-            }
-
-            @Override
-            public void modifiedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
-            }
-
-            @Override
-            public void removedBundle(final Bundle bundle, final BundleEvent event, final Bundle object) {
-            }
-        };
-
-        bundleTracker = new BundleTracker<>(container().getBundleContext(), Bundle.RESOLVED | Bundle.STARTING
-                | Bundle.STOPPING | Bundle.ACTIVE, bundleListener);
-
-        // This will get the list of all current RESOLVED+ bundles.
-        bundleTracker.open();
-
-        if (expectedServiceTypes.isEmpty()) {
-            setSatisfied();
-            return;
-        }
-
-        ServiceTrackerCustomizer<Object, Object> serviceListener = new ServiceTrackerCustomizer<Object, Object>() {
-            @Override
-            public Object addingService(final ServiceReference<Object> reference) {
-                return serviceAdded(reference);
-            }
-
-            @Override
-            public void modifiedService(final ServiceReference<Object> reference, final Object service) {
-            }
-
-            @Override
-            public void removedService(final ServiceReference<Object> reference, final Object service) {
-                container().getBundleContext().ungetService(reference);
-            }
-        };
-
-        setDependencyDesc(interfaceName + " services with types " + expectedServiceTypes);
-
-        serviceTracker = new ServiceTracker<>(container().getBundleContext(), interfaceName, serviceListener);
-        serviceTracker.open();
-    }
-
-    private void bundleAdded(final Bundle bundle) {
-        URL resource = bundle.getEntry(serviceResourcePath);
-        if (resource == null) {
-            return;
-        }
-
-        LOG.debug("{}: Found {} resource in bundle {}", logName(), resource, bundle.getSymbolicName());
-
-        try {
-            for (String line : Resources.readLines(resource, StandardCharsets.UTF_8)) {
-                int ci = line.indexOf('#');
-                if (ci >= 0) {
-                    line = line.substring(0, ci);
-                }
-
-                line = line.trim();
-                if (line.isEmpty()) {
-                    continue;
-                }
-
-                String serviceType = line;
-                LOG.debug("{}: Retrieved service type {}", logName(), serviceType);
-                expectedServiceTypes.add(serviceType);
-            }
-        } catch (final IOException e) {
-            setFailure(String.format("%s: Error reading resource %s from bundle %s", logName(), resource,
-                    bundle.getSymbolicName()), e);
-        }
-    }
-
-    private Object serviceAdded(final ServiceReference<Object> reference) {
-        Object service = container().getBundleContext().getService(reference);
-        String serviceType = (String) reference.getProperty(OpendaylightNamespaceHandler.TYPE_ATTR);
-
-        LOG.debug("{}: Service type {} added from bundle {}", logName(), serviceType,
-                reference.getBundle().getSymbolicName());
-
-        if (serviceType == null) {
-            LOG.error("{}: Missing OSGi service property '{}' for service interface {} in bundle {}", logName(),
-                    OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName,  reference.getBundle().getSymbolicName());
-            return service;
-        }
-
-        if (!expectedServiceTypes.contains(serviceType)) {
-            LOG.error("{}: OSGi service property '{}' for service interface {} in bundle {} was not found in the "
-                    + "expected service types {} obtained via {} bundle resources. Is the bundle resource missing or "
-                    + "the service type misspelled?", logName(), OpendaylightNamespaceHandler.TYPE_ATTR, interfaceName,
-                    reference.getBundle().getSymbolicName(), expectedServiceTypes, serviceResourcePath);
-            return service;
-        }
-
-        // If already satisfied, meaning we got all initial services, then a new bundle must've been
-        // dynamically installed or a prior service's blueprint container was restarted, in which case we
-        // restart our container.
-        if (isSatisfied()) {
-            restartContainer();
-        } else {
-            retrievedServiceTypes.add(serviceType);
-            retrievedServices.add(service);
-
-            if (retrievedServiceTypes.equals(expectedServiceTypes)) {
-                LOG.debug("{}: Got all expected service types", logName());
-                setSatisfied();
-            } else {
-                Set<String> remaining = new HashSet<>(expectedServiceTypes);
-                remaining.removeAll(retrievedServiceTypes);
-                setDependencyDesc(interfaceName + " services with types " + remaining);
-            }
-        }
-
-        return service;
-    }
-
-    @Override
-    public Object create() throws ComponentDefinitionException {
-        LOG.debug("{}: In create: interfaceName: {}", logName(), interfaceName);
-
-        super.onCreate();
-
-        LOG.debug("{}: create returning service list {}", logName(), retrievedServices);
-
-        synchronized (retrievedServices) {
-            return ImmutableList.copyOf(retrievedServices);
-        }
-    }
-
-    @Override
-    public void destroy(final Object instance) {
-        super.destroy(instance);
-
-        if (bundleTracker != null) {
-            bundleTracker.close();
-            bundleTracker = null;
-        }
-
-        if (serviceTracker != null) {
-            serviceTracker.close();
-            serviceTracker = null;
-        }
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder builder = new StringBuilder();
-        builder.append("SpecificReferenceListMetadata [interfaceName=").append(interfaceName)
-                .append(", serviceResourcePath=").append(serviceResourcePath).append("]");
-        return builder.toString();
-    }
-}
diff --git a/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java b/opendaylight/blueprint/src/main/java/org/opendaylight/controller/blueprint/ext/StaticReferenceMetadata.java
deleted file mode 100644 (file)
index 97c04af..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.blueprint.ext;
-
-import org.osgi.service.blueprint.container.ComponentDefinitionException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Factory metadata corresponding to the "static-reference" element that obtains an OSGi service and
- * returns the actual instance. This differs from the standard "reference" element that returns a dynamic
- * proxy whose underlying service instance can come and go.
- *
- * @author Thomas Pantelis
- */
-class StaticReferenceMetadata extends AbstractDependentComponentFactoryMetadata {
-    private static final Logger LOG = LoggerFactory.getLogger(StaticReferenceMetadata.class);
-
-    private final String interfaceName;
-    private volatile Object retrievedService;
-
-    StaticReferenceMetadata(final String id, final String interfaceName) {
-        super(id);
-        this.interfaceName = interfaceName;
-    }
-
-    @Override
-    protected void startTracking() {
-        retrieveService(interfaceName, interfaceName, service -> {
-            retrievedService = service;
-            setSatisfied();
-        });
-    }
-
-    @Override
-    public Object create() throws ComponentDefinitionException {
-        super.onCreate();
-
-        LOG.debug("{}: create returning service {}", logName(), retrievedService);
-
-        return retrievedService;
-    }
-
-    @Override
-    public String toString() {
-        StringBuilder builder = new StringBuilder();
-        builder.append("StaticReferenceMetadata [interfaceName=").append(interfaceName).append("]");
-        return builder.toString();
-    }
-}
index 43ad997ea42cdd50ec522fba6072c9fe2222c98e..26246f9d08876a1785a675a1bb108f230f398be3 100644 (file)
@@ -7,7 +7,9 @@
  */
 package org.opendaylight.controller.blueprint.ext;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collections;
 import java.util.function.Consumer;
 import org.apache.aries.blueprint.container.AbstractServiceReferenceRecipe;
@@ -27,10 +29,6 @@ import org.slf4j.LoggerFactory;
 class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
     private static final Logger LOG = LoggerFactory.getLogger(StaticServiceReferenceRecipe.class);
 
-    private static final SatisfactionListener NOOP_LISTENER = satisfiable -> {
-        // Intentional NOOP
-    };
-
     private volatile ServiceReference<?> trackedServiceReference;
     private volatile Object trackedService;
     private Consumer<Object> serviceSatisfiedCallback;
@@ -42,8 +40,10 @@ class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
     }
 
     void startTracking(final Consumer<Object> newServiceSatisfiedCallback) {
-        this.serviceSatisfiedCallback = newServiceSatisfiedCallback;
-        super.start(NOOP_LISTENER);
+        serviceSatisfiedCallback = newServiceSatisfiedCallback;
+        super.start(satisfiable -> {
+            // Intentional NOOP
+        });
     }
 
     @SuppressWarnings("rawtypes")
@@ -106,15 +106,12 @@ class StaticServiceReferenceRecipe extends AbstractServiceReferenceRecipe {
             return trackedService;
         }
 
-        Preconditions.checkNotNull(localTrackedServiceReference, "trackedServiceReference is null");
-
-        trackedService = getServiceSecurely(localTrackedServiceReference);
+        trackedService = getServiceSecurely(requireNonNull(localTrackedServiceReference,
+            "trackedServiceReference is null"));
 
         LOG.debug("{}: Returning service instance: {}", getName(), trackedService);
 
-        Preconditions.checkNotNull(trackedService, "getService() returned null for %s", localTrackedServiceReference);
-
-        return trackedService;
+        return checkNotNull(trackedService, "getService() returned null for %s", localTrackedServiceReference);
     }
 
     @Override
index 5bd8ed042cb32a69fd231a0ddf9b6d1142e05802..297bbb439404c903c0921b9a99747da4ceeedbc3 100644 (file)
@@ -3,6 +3,14 @@
     targetNamespace="http://opendaylight.org/xmlns/blueprint/v1.0.0" elementFormDefault="qualified"
     attributeFormDefault="unqualified" version="1.0.0">
 
+  <!--
+   Copyright © 2016, 2018 Brocade Communications Systems, Inc. and others.
+
+   This program and the accompanying materials are made available under the
+   terms of the Eclipse Public License v1.0 which accompanies this distribution,
+   and is available at http://www.eclipse.org/legal/epl-v10.html
+   -->
+
   <xsd:import namespace="http://www.osgi.org/xmlns/blueprint/v1.0.0"/>
 
   <xsd:attribute name="restart-dependents-on-updates" type="xsd:boolean"/>
   </xsd:complexType>
   <xsd:element name="rpc-implementation" type="TrpcImplementation"/>
 
-  <!--
-       To be deprecated. This interface contract is fulfilled by
-       action-implementation instead
-   -->
-  <xsd:complexType name="TroutedRpcImplementation">
-    <xsd:attribute name="interface" type="bp:Tclass" use="optional"/>
-    <xsd:attribute name="ref" type="bp:Tidref" use="required"/>
-    <xsd:attribute name="id" type="xsd:ID"/>
-  </xsd:complexType>
-  <xsd:element name="routed-rpc-implementation" type="TroutedRpcImplementation"/>
-
   <xsd:complexType name="TrpcService">
     <xsd:attribute name="interface" type="bp:Tclass" use="required"/>
     <xsd:attribute name="id" type="xsd:ID"/>
index a1cf3d729b6dc634d14ad58e95588d831ab87ff1..dedf8e1c59b2a3aaadd1eb7daa0971d461a7c396 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.blueprint.tests;
 
-import static com.google.common.truth.Truth.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 
 import org.junit.Test;
 import org.opendaylight.controller.blueprint.ext.DataStoreAppConfigDefaultXMLReader;
@@ -21,26 +22,26 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
  * @author Michael Vorburger.ch
  */
 public class DataStoreAppConfigDefaultXMLReaderTest extends AbstractConcurrentDataBrokerTest {
-
     @Test
     public void testConfigXML() throws Exception {
-        Lists lists = new DataStoreAppConfigDefaultXMLReader<>(
-                getClass(), "/opendaylight-sal-test-store-config.xml",
-                getDataBrokerTestCustomizer().getSchemaService(),
-                getDataBrokerTestCustomizer().getBindingToNormalized(),
-                Lists.class).createDefaultInstance();
+        Lists lists = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/opendaylight-sal-test-store-config.xml",
+            getDataBrokerTestCustomizer().getSchemaService(),
+            getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class)
+            .createDefaultInstance();
 
-        UnorderedList element = lists.getUnorderedContainer().getUnorderedList().get(0);
-        assertThat(element.getName()).isEqualTo("someName");
-        assertThat(element.getValue()).isEqualTo("someValue");
+        UnorderedList element = lists.nonnullUnorderedContainer().nonnullUnorderedList().values().iterator().next();
+        assertEquals("someName", element.getName());
+        assertEquals("someValue", element.getValue());
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test
     public void testBadXMLName() throws Exception {
-        new DataStoreAppConfigDefaultXMLReader<>(
-                getClass(), "/badname.xml",
-                getDataBrokerTestCustomizer().getSchemaService(),
-                getDataBrokerTestCustomizer().getBindingToNormalized(),
-                Lists.class).createDefaultInstance();
+        final var reader = new DataStoreAppConfigDefaultXMLReader<>(getClass(), "/badname.xml",
+            getDataBrokerTestCustomizer().getSchemaService(),
+            getDataBrokerTestCustomizer().getAdapterContext().currentSerializer(), Lists.class);
+
+        final String message = assertThrows(IllegalArgumentException.class, reader::createDefaultInstance).getMessage();
+        assertEquals("resource /badname.xml relative to " + DataStoreAppConfigDefaultXMLReaderTest.class.getName()
+            + " not found.", message);
     }
 }
index b2744b2fea9178b69666abda9f5eeb5db3aaa18e..f2fa832663fd951dd76f56d1a05ede7ced8a1e61 100644 (file)
@@ -1,3 +1,10 @@
+<!--
+ Copyright © 2016, 2018 Brocade Communications Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <lists xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:test:store">
   <unordered-container>
     <unordered-list>
diff --git a/opendaylight/config/config-artifacts/pom.xml b/opendaylight/config/config-artifacts/pom.xml
deleted file mode 100644 (file)
index b684ce3..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
-    Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-
-    This program and the accompanying materials are made available under the
-    terms of the Eclipse Public License v1.0 which accompanies this distribution,
-    and is available at http://www.eclipse.org/legal/epl-v10.html
- -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-artifacts</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <packaging>pom</packaging>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                 <groupId>${project.groupId}</groupId>
-                 <artifactId>netty-config-api</artifactId>
-                 <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-event-executor-config</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-threadgroup-config</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>netty-timer-config</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>threadpool-config-api</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-            <dependency>
-                <groupId>${project.groupId}</groupId>
-                <artifactId>threadpool-config-impl</artifactId>
-                <version>${project.version}</version>
-            </dependency>
-        </dependencies>
-    </dependencyManagement>
-</project>
-
diff --git a/opendaylight/config/netty-config-api/pom.xml b/opendaylight/config/netty-config-api/pom.xml
deleted file mode 100644 (file)
index 2baa40d..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>netty-config-api</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-
-  <dependencies>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-transport</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-event-executor-config/pom.xml b/opendaylight/config/netty-event-executor-config/pom.xml
deleted file mode 100644 (file)
index 9995f56..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>netty-event-executor-config</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's event executor</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>netty-config-api</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java b/opendaylight/config/netty-event-executor-config/src/main/java/org/opendaylight/controller/config/yang/netty/eventexecutor/AutoCloseableEventExecutor.java
deleted file mode 100644 (file)
index 7162eb6..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.eventexecutor;
-
-import com.google.common.reflect.AbstractInvocationHandler;
-import com.google.common.reflect.Reflection;
-import io.netty.util.concurrent.EventExecutor;
-import io.netty.util.concurrent.GlobalEventExecutor;
-import io.netty.util.concurrent.ImmediateEventExecutor;
-import java.lang.reflect.Method;
-import java.util.concurrent.TimeUnit;
-
-public interface AutoCloseableEventExecutor extends EventExecutor, AutoCloseable {
-
-    static AutoCloseableEventExecutor globalEventExecutor() {
-        return CloseableEventExecutorMixin.createCloseableProxy(GlobalEventExecutor.INSTANCE);
-    }
-
-    static AutoCloseableEventExecutor immediateEventExecutor() {
-        return CloseableEventExecutorMixin.createCloseableProxy(ImmediateEventExecutor.INSTANCE);
-    }
-
-    class CloseableEventExecutorMixin implements AutoCloseable {
-        public static final int DEFAULT_SHUTDOWN_SECONDS = 1;
-        private final EventExecutor eventExecutor;
-
-        public CloseableEventExecutorMixin(final EventExecutor eventExecutor) {
-            this.eventExecutor = eventExecutor;
-        }
-
-        @Override
-        public void close() throws Exception {
-            eventExecutor.shutdownGracefully(0, DEFAULT_SHUTDOWN_SECONDS, TimeUnit.SECONDS);
-        }
-
-
-        private static AutoCloseableEventExecutor createCloseableProxy(final EventExecutor eventExecutor) {
-            final CloseableEventExecutorMixin closeableEventExecutor = new CloseableEventExecutorMixin(eventExecutor);
-            return Reflection.newProxy(AutoCloseableEventExecutor.class, new AbstractInvocationHandler() {
-                @Override
-                protected Object handleInvocation(final Object proxy, final Method method, final Object[] args)
-                        throws Throwable {
-                    if (method.getName().equals("close")) {
-                        closeableEventExecutor.close();
-                        return null;
-                    } else {
-                        return method.invoke(closeableEventExecutor.eventExecutor, args);
-                    }
-                }
-            });
-        }
-    }
-}
diff --git a/opendaylight/config/netty-event-executor-config/src/main/resources/OSGI-INF/blueprint/netty-event-executor.xml b/opendaylight/config/netty-event-executor-config/src/main/resources/OSGI-INF/blueprint/netty-event-executor.xml
deleted file mode 100644 (file)
index 0e845df..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0">
-
-  <bean id="executor" class="org.opendaylight.controller.config.yang.netty.eventexecutor.AutoCloseableEventExecutor"
-          factory-method="globalEventExecutor" destroy-method="close"/>
-
-  <service ref="executor" interface="io.netty.util.concurrent.EventExecutor"
-        odl:type="global-event-executor"/>
-
-</blueprint>
diff --git a/opendaylight/config/netty-threadgroup-config/pom.xml b/opendaylight/config/netty-threadgroup-config/pom.xml
deleted file mode 100644 (file)
index 835d53f..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>netty-threadgroup-config</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's event group</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>netty-config-api</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/NioEventLoopGroupCloseable.java b/opendaylight/config/netty-threadgroup-config/src/main/java/org/opendaylight/controller/config/yang/netty/threadgroup/NioEventLoopGroupCloseable.java
deleted file mode 100644 (file)
index 51bf1f0..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.threadgroup;
-
-import io.netty.channel.nio.NioEventLoopGroup;
-import java.util.concurrent.TimeUnit;
-
-public class NioEventLoopGroupCloseable extends NioEventLoopGroup implements AutoCloseable {
-    private NioEventLoopGroupCloseable(final int threadCount) {
-        super(threadCount);
-    }
-
-    private NioEventLoopGroupCloseable() {
-    }
-
-    @Override
-    public void close() {
-        shutdownGracefully(0, 1, TimeUnit.SECONDS);
-    }
-
-    public static NioEventLoopGroupCloseable newInstance(final Integer threadCount) {
-        if(threadCount == null || threadCount <= 0) {
-            return new NioEventLoopGroupCloseable();
-        }
-
-        return new NioEventLoopGroupCloseable(threadCount);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/config/netty-threadgroup-config/src/main/resources/OSGI-INF/blueprint/netty-threadgroup.xml b/opendaylight/config/netty-threadgroup-config/src/main/resources/OSGI-INF/blueprint/netty-threadgroup.xml
deleted file mode 100644 (file)
index 77e2556..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="false">
-
-  <cm:property-placeholder persistent-id="org.opendaylight.netty.threadgroup" update-strategy="none">
-    <cm:default-properties>
-      <!-- 0 means use the default number of threads which is 2 * number of CPUs -->
-      <cm:property name="global-boss-group-thread-count" value="0"/>
-      <cm:property name="global-worker-group-thread-count" value="0"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <bean id="globalBossGroup" class="org.opendaylight.controller.config.yang.netty.threadgroup.NioEventLoopGroupCloseable"
-          factory-method="newInstance">
-    <argument value="${global-boss-group-thread-count}"/>
-  </bean>
-
-  <service ref="globalBossGroup" interface="io.netty.channel.EventLoopGroup" odl:type="global-boss-group">
-  <service-properties>
-      <entry key="config-module-namespace" value="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup"/>
-      <entry key="config-module-name" value="netty-threadgroup-fixed"/>
-      <entry key="config-instance-name" value="global-boss-group"/>
-    </service-properties>
-  </service>
-
-  <bean id="globalWorkerGroup" class="org.opendaylight.controller.config.yang.netty.threadgroup.NioEventLoopGroupCloseable"
-          factory-method="newInstance">
-    <argument value="${global-worker-group-thread-count}"/>
-  </bean>
-
-  <service ref="globalWorkerGroup" interface="io.netty.channel.EventLoopGroup" odl:type="global-worker-group">
-  <service-properties>
-      <entry key="config-module-namespace" value="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup"/>
-      <entry key="config-module-name" value="netty-threadgroup-fixed"/>
-      <entry key="config-instance-name" value="global-worker-group"/>
-    </service-properties>
-  </service>
-
-</blueprint>
diff --git a/opendaylight/config/netty-timer-config/pom.xml b/opendaylight/config/netty-timer-config/pom.xml
deleted file mode 100644 (file)
index c784c76..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>netty-timer-config</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-  <description>Configuration Wrapper around netty's timer</description>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>netty-config-api</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java b/opendaylight/config/netty-timer-config/src/main/java/org/opendaylight/controller/config/yang/netty/timer/HashedWheelTimerCloseable.java
deleted file mode 100644 (file)
index b519af1..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.netty.timer;
-
-import io.netty.util.HashedWheelTimer;
-import io.netty.util.Timeout;
-import io.netty.util.Timer;
-import io.netty.util.TimerTask;
-import java.util.Set;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import org.eclipse.jdt.annotation.Nullable;
-
-public final class HashedWheelTimerCloseable implements AutoCloseable, Timer {
-
-    private final Timer timer;
-
-    private HashedWheelTimerCloseable(final Timer timer) {
-        this.timer = timer;
-    }
-
-    @Override
-    public void close() {
-        stop();
-    }
-
-    @Override
-    public Timeout newTimeout(final TimerTask task, final long delay, final TimeUnit unit) {
-        return this.timer.newTimeout(task, delay, unit);
-    }
-
-    @Override
-    public Set<Timeout> stop() {
-        return this.timer.stop();
-    }
-
-    public static HashedWheelTimerCloseable newInstance(final @Nullable Long duration,
-            final @Nullable Integer ticksPerWheel) {
-        return newInstance(null, duration, ticksPerWheel);
-    }
-
-    public static HashedWheelTimerCloseable newInstance(final @Nullable ThreadFactory threadFactory,
-            final @Nullable Long duration, final @Nullable Integer ticksPerWheel) {
-        TimeUnit unit = TimeUnit.MILLISECONDS;
-        if(!nullOrNonPositive(duration) && threadFactory == null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit));
-        }
-
-        if(!nullOrNonPositive(duration) && threadFactory == null && !nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(duration, unit, ticksPerWheel));
-        }
-
-        if(nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(new HashedWheelTimer(threadFactory));
-        }
-
-        if(!nullOrNonPositive(duration) && threadFactory != null && nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(
-                    new HashedWheelTimer(threadFactory, duration, unit));
-        }
-
-        if(!nullOrNonPositive(duration) && threadFactory != null && !nullOrNonPositive(ticksPerWheel)) {
-            return new HashedWheelTimerCloseable(
-                    new HashedWheelTimer(threadFactory, duration, unit, ticksPerWheel));
-        }
-
-        return new HashedWheelTimerCloseable(new HashedWheelTimer());
-    }
-
-    private static boolean nullOrNonPositive(final Number n) {
-        return n == null || n.longValue() <= 0;
-    }
-}
diff --git a/opendaylight/config/netty-timer-config/src/main/resources/OSGI-INF/blueprint/netty-timer.xml b/opendaylight/config/netty-timer-config/src/main/resources/OSGI-INF/blueprint/netty-timer.xml
deleted file mode 100644 (file)
index b198449..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="false">
-
-  <cm:property-placeholder persistent-id="org.opendaylight.netty.timer" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="tick-duration" value="0"/>
-      <cm:property name="ticks-per-wheel" value="0"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <bean id="timer" class="org.opendaylight.controller.config.yang.netty.timer.HashedWheelTimerCloseable"
-          factory-method="newInstance">
-    <argument value="${tick-duration}"/>
-    <argument value="${ticks-per-wheel}"/>
-  </bean>
-
-  <service ref="timer" interface="io.netty.util.Timer" odl:type="global-timer"/>
-
-</blueprint>
diff --git a/opendaylight/config/pom.xml b/opendaylight/config/pom.xml
deleted file mode 100644 (file)
index 629c8c0..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.mdsal</groupId>
-    <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>config-subsystem</artifactId>
-  <version>0.11.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-  <name>${project.artifactId}</name>
-
-  <modules>
-    <module>threadpool-config-api</module>
-    <module>netty-config-api</module>
-    <module>threadpool-config-impl</module>
-    <module>netty-threadgroup-config</module>
-    <module>netty-event-executor-config</module>
-    <module>netty-timer-config</module>
-    <module>config-artifacts</module>
-  </modules>
-
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>config-artifacts</artifactId>
-        <version>0.11.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-</project>
diff --git a/opendaylight/config/threadpool-config-api/pom.xml b/opendaylight/config/threadpool-config-api/pom.xml
deleted file mode 100644 (file)
index 3db8512..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>threadpool-config-api</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-</project>
diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ScheduledThreadPool.java
deleted file mode 100644 (file)
index 79ed26b..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ScheduledExecutorService;
-
-/**
- * Interface representing scheduled {@link ThreadPool}.
- */
-public interface ScheduledThreadPool extends ThreadPool {
-
-    @Override
-    ScheduledExecutorService getExecutor();
-}
\ No newline at end of file
diff --git a/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java b/opendaylight/config/threadpool-config-api/src/main/java/org/opendaylight/controller/config/threadpool/ThreadPool.java
deleted file mode 100644 (file)
index 68ecb80..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool;
-
-import java.util.concurrent.ExecutorService;
-
-/**
- * Interface representing thread pool.
- */
-public interface ThreadPool {
-
-    ExecutorService getExecutor();
-
-    int getMaxThreadCount();
-}
\ No newline at end of file
diff --git a/opendaylight/config/threadpool-config-impl/pom.xml b/opendaylight/config/threadpool-config-impl/pom.xml
deleted file mode 100644 (file)
index 4ff0db1..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>config-subsystem</artifactId>
-    <version>0.11.0-SNAPSHOT</version>
-    <relativePath>../</relativePath>
-  </parent>
-  <artifactId>threadpool-config-impl</artifactId>
-  <packaging>bundle</packaging>
-  <name>${project.artifactId}</name>
-
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>threadpool-config-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-  </dependencies>
-</project>
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FixedThreadPoolWrapper.java
deleted file mode 100644 (file)
index 2dad264..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using fixed number of threads wraps
- * {@link ExecutorService}.
- */
-public class FixedThreadPoolWrapper implements ThreadPool, Closeable {
-
-    private final ThreadPoolExecutor executor;
-
-    public FixedThreadPoolWrapper(int threadCount, ThreadFactory factory) {
-        this.executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadCount, factory);
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public ExecutorService getExecutor() {
-        return Executors.unconfigurableExecutorService(executor);
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return executor.getMaximumPoolSize();
-    }
-
-    public void setMaxThreadCount(int maxThreadCount) {
-        executor.setCorePoolSize(maxThreadCount);
-        executor.setMaximumPoolSize(maxThreadCount);
-    }
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/FlexibleThreadPoolWrapper.java
deleted file mode 100644 (file)
index b89657a..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import com.google.common.base.Optional;
-import java.io.Closeable;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.RejectedExecutionHandler;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.config.threadpool.ThreadPool;
-
-/**
- * Implementation of {@link ThreadPool} using flexible number of threads wraps
- * {@link ExecutorService}.
- */
-public class FlexibleThreadPoolWrapper implements ThreadPool, Closeable {
-    private final ThreadPoolExecutor executor;
-
-    public FlexibleThreadPoolWrapper(int minThreadCount, int maxThreadCount, long keepAlive, TimeUnit timeUnit,
-            ThreadFactory threadFactory) {
-        this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(Optional.<Integer>absent()));
-    }
-
-    public FlexibleThreadPoolWrapper(int minThreadCount, int maxThreadCount, long keepAlive, TimeUnit timeUnit,
-            ThreadFactory threadFactory, Optional<Integer> queueCapacity) {
-        this(minThreadCount, maxThreadCount, keepAlive, timeUnit, threadFactory, getQueue(queueCapacity));
-    }
-
-    private FlexibleThreadPoolWrapper(int minThreadCount, int maxThreadCount, long keepAlive, TimeUnit timeUnit,
-            ThreadFactory threadFactory, BlockingQueue<Runnable> queue) {
-
-        executor = new ThreadPoolExecutor(minThreadCount, maxThreadCount, keepAlive, timeUnit,
-                queue, threadFactory, new FlexibleRejectionHandler());
-        executor.prestartAllCoreThreads();
-    }
-
-    /**
-     * Overriding the queue:
-     * ThreadPoolExecutor would not create new threads if the queue is not full, thus adding
-     * occurs in RejectedExecutionHandler.
-     * This impl saturates threadpool first, then queue. When both are full caller will get blocked.
-     */
-    private static ForwardingBlockingQueue getQueue(Optional<Integer> capacity) {
-        final BlockingQueue<Runnable> delegate = capacity.isPresent() ? new LinkedBlockingQueue<>(capacity.get()) : new LinkedBlockingQueue<>();
-        return new ForwardingBlockingQueue(delegate);
-    }
-
-    @Override
-    public ExecutorService getExecutor() {
-        return Executors.unconfigurableExecutorService(executor);
-    }
-
-    public int getMinThreadCount() {
-        return executor.getCorePoolSize();
-    }
-
-    public void setMinThreadCount(int minThreadCount) {
-        executor.setCorePoolSize(minThreadCount);
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return executor.getMaximumPoolSize();
-    }
-
-    public void setMaxThreadCount(int maxThreadCount) {
-        executor.setMaximumPoolSize(maxThreadCount);
-    }
-
-    public long getKeepAliveMillis() {
-        return executor.getKeepAliveTime(TimeUnit.MILLISECONDS);
-    }
-
-    public void setKeepAliveMillis(long keepAliveMillis) {
-        executor.setKeepAliveTime(keepAliveMillis, TimeUnit.MILLISECONDS);
-    }
-
-    public void setThreadFactory(ThreadFactory threadFactory) {
-        executor.setThreadFactory(threadFactory);
-    }
-
-    public void prestartAllCoreThreads() {
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    /**
-     * if the max threads are met, then it will raise a rejectedExecution. We then push to the queue.
-     */
-    private static class FlexibleRejectionHandler implements RejectedExecutionHandler {
-        @Override
-        public void rejectedExecution(final Runnable r, final ThreadPoolExecutor executor) {
-            try {
-                executor.getQueue().put(r);
-            } catch (InterruptedException e) {
-                throw new RejectedExecutionException("Interrupted while waiting on the queue", e);
-            }
-        }
-    }
-
-    private static class ForwardingBlockingQueue extends com.google.common.util.concurrent.ForwardingBlockingQueue<Runnable> {
-        private final BlockingQueue<Runnable> delegate;
-
-        public ForwardingBlockingQueue(BlockingQueue<Runnable> delegate) {
-            this.delegate = delegate;
-        }
-
-        @Override
-        protected BlockingQueue<Runnable> delegate() {
-            return delegate;
-        }
-
-        @Override
-        public boolean offer(final Runnable r) {
-            // ThreadPoolExecutor will spawn a new thread after core size is reached only
-            // if the queue.offer returns false.
-            return false;
-        }
-    }
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/NamingThreadPoolFactory.java
deleted file mode 100644 (file)
index 3f5a6dd..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import com.google.common.base.Preconditions;
-import java.io.Closeable;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.concurrent.ThreadSafe;
-
-/**
- * Implementation of {@link ThreadFactory}.
- */
-@ThreadSafe
-public class NamingThreadPoolFactory implements ThreadFactory, Closeable {
-
-    private final ThreadGroup group;
-    private final String namePrefix;
-    private final AtomicLong threadName = new AtomicLong();
-
-    public NamingThreadPoolFactory(String namePrefix) {
-        Preconditions.checkNotNull(namePrefix);
-        this.group = new ThreadGroup(namePrefix);
-        this.namePrefix = namePrefix;
-    }
-
-    @Override
-    public Thread newThread(Runnable r) {
-        return new Thread(group, r, String.format("%s-%d", group.getName(), threadName.incrementAndGet()));
-    }
-
-    @Override
-    public void close() {
-    }
-
-    public String getNamePrefix() {
-        return namePrefix;
-    }
-
-}
diff --git a/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java b/opendaylight/config/threadpool-config-impl/src/main/java/org/opendaylight/controller/config/threadpool/util/ScheduledThreadPoolWrapper.java
deleted file mode 100644 (file)
index 648bd82..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.config.threadpool.util;
-
-import java.io.Closeable;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.ThreadFactory;
-import org.opendaylight.controller.config.threadpool.ScheduledThreadPool;
-
-/**
- * Implementation of {@link ScheduledThreadPool} wraps
- * {@link ScheduledExecutorService}.
- */
-public class ScheduledThreadPoolWrapper implements ScheduledThreadPool, Closeable {
-
-    private final ScheduledThreadPoolExecutor executor;
-    private final int threadCount;
-
-    public ScheduledThreadPoolWrapper(int threadCount, ThreadFactory factory) {
-        this.threadCount = threadCount;
-        this.executor = new ScheduledThreadPoolExecutor(threadCount, factory);
-        executor.prestartAllCoreThreads();
-    }
-
-    @Override
-    public ScheduledExecutorService getExecutor() {
-        return Executors.unconfigurableScheduledExecutorService(executor);
-    }
-
-    @Override
-    public void close() {
-        executor.shutdown();
-    }
-
-    @Override
-    public int getMaxThreadCount() {
-        return threadCount;
-    }
-
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/pom.xml b/opendaylight/md-sal/benchmark-data-store/pom.xml
deleted file mode 100644 (file)
index 520bc56..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>benchmark-data-store</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>jar</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-core</artifactId>
-      <version>1.17.5</version>
-    </dependency>
-    <dependency>
-      <groupId>org.openjdk.jmh</groupId>
-      <artifactId>jmh-generator-annprocess</artifactId>
-      <version>1.17.5</version>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-inmemory-datastore</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-broker-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
-      <scope>compile</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <configuration>
-          <classpathScope>test</classpathScope>
-          <executable>java</executable>
-          <arguments>
-            <argument>-classpath</argument>
-            <classpath />
-            <argument>org.openjdk.jmh.Main</argument>
-            <argument>.*</argument>
-          </arguments>
-        </configuration>
-        <executions>
-          <execution>
-            <id>run-benchmarks</id>
-            <phase>integration-test</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryBrokerWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryBrokerWriteTransactionBenchmark.java
deleted file mode 100644 (file)
index 04298a6..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2013, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import java.util.concurrent.TimeUnit;
-
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Warmup;
-
-/**
- * Abstract class to handle transaction benchmarks.
- *
- * @author Lukas Sedlak
- */
-public abstract class AbstractInMemoryBrokerWriteTransactionBenchmark
-        extends AbstractInMemoryWriteTransactionBenchmark {
-
-    protected SerializedDOMDataBroker domBroker;
-
-    protected void initTestNode() throws Exception {
-        final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH).build();
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, testPath, provideOuterListNode());
-
-        writeTx.submit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
-
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_100K_PATHS[outerListKey],
-                    OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
-        }
-
-        writeTx.submit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
-            DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_100K_PATHS[outerListKey],
-                    OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
-
-            writeTx.submit().get();
-        }
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_50K_PATHS[outerListKey],
-                    OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
-        }
-
-        writeTx.submit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
-            DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_50K_PATHS[outerListKey],
-                    OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
-            writeTx.submit().get();
-        }
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_10K_PATHS[outerListKey],
-                    OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
-        }
-        writeTx.submit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
-            DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-            writeTx.put(LogicalDatastoreType.OPERATIONAL, OUTER_LIST_10K_PATHS[outerListKey],
-                    OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
-            writeTx.submit().get();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java
deleted file mode 100644 (file)
index 00925a5..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2013, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Measurement;
-import org.openjdk.jmh.annotations.Warmup;
-
-/**
- * Abstract class for in-memory Datastore transaction benchmarks.
- *
- * @author Lukas Sedlak
- */
-public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark
-        extends AbstractInMemoryWriteTransactionBenchmark {
-
-    protected InMemoryDOMDataStore domStore;
-
-    protected void initTestNode() throws Exception {
-        final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH).build();
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        writeTx.write(testPath, provideOuterListNode());
-
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        cohort.canCommit().get();
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
-            writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
-        }
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        cohort.canCommit().get();
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
-            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-            writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
-
-            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-            cohort.canCommit().get();
-            cohort.preCommit().get();
-            cohort.commit().get();
-        }
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
-            writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
-        }
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        cohort.canCommit().get();
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
-            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-            writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
-            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-            cohort.canCommit().get();
-            cohort.preCommit().get();
-            cohort.commit().get();
-        }
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
-            writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
-        }
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        cohort.canCommit().get();
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    @Benchmark
-    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
-    public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
-        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
-            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-            writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
-            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-            cohort.canCommit().get();
-            cohort.preCommit().get();
-            cohort.commit().get();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryWriteTransactionBenchmark.java
deleted file mode 100644 (file)
index a2414d7..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public abstract class AbstractInMemoryWriteTransactionBenchmark {
-    protected static final int OUTER_LIST_100K = 100000;
-    protected static final int OUTER_LIST_50K = 50000;
-    protected static final int OUTER_LIST_10K = 10000;
-
-    protected static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
-    protected static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
-    protected static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
-
-    private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
-        final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
-
-        for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
-            paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
-                    .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey).build();
-        }
-        return paths;
-    }
-
-    protected static final int WARMUP_ITERATIONS = 20;
-    protected static final int MEASUREMENT_ITERATIONS = 20;
-
-    protected static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
-    protected static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
-    protected static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
-
-    private static MapNode initInnerListItems(final int count) {
-        final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
-                .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
-
-        for (int i = 1; i <= count; ++i) {
-            mapEntryBuilder
-                    .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
-        }
-        return mapEntryBuilder.build();
-    }
-
-    protected static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K,
-            ONE_ITEM_INNER_LIST);
-    protected static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K,
-            TWO_ITEM_INNER_LIST);
-    protected static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K,
-            TEN_ITEM_INNER_LIST);
-
-    private static NormalizedNode<?, ?>[] initOuterListItems(final int outerListItemsCount, final MapNode innerList) {
-        final NormalizedNode<?, ?>[] outerListItems = new NormalizedNode[outerListItemsCount];
-
-        for (int i = 0; i < outerListItemsCount; ++i) {
-            int outerListKey = i;
-            outerListItems[i] = ImmutableNodes
-                    .mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
-                    .withChild(innerList).build();
-        }
-        return outerListItems;
-    }
-
-    protected SchemaContext schemaContext;
-
-    public abstract void setUp() throws Exception;
-
-    public abstract void tearDown();
-
-    protected static DataContainerChild<?, ?> provideOuterListNode() {
-        return ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
-                .withChild(ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME).build()).build();
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java
deleted file mode 100644 (file)
index d2d3b51..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2013, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import java.io.InputStream;
-import java.util.Collections;
-import java.util.List;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.parser.spi.meta.ReactorException;
-import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
-
-/**
- * Benchmark Model class loads the odl-datastore-test.yang model from resources.
- * <br>
- * This class serves as facilitator class which holds several references to initialized yang model as static final
- * members.
- *
- * @author Lukas Sedlak
- */
-public final class BenchmarkModel {
-
-    public static final QName TEST_QNAME = QName
-        .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
-    public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
-    public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
-    public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
-    public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
-    private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
-    public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
-    public static final YangInstanceIdentifier OUTER_LIST_PATH =
-            YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
-
-    private BenchmarkModel() {
-    }
-
-    private static InputStream getInputStream() {
-        return BenchmarkModel.class.getResourceAsStream(DATASTORE_TEST_YANG);
-    }
-
-    public static SchemaContext createTestContext() {
-        final SchemaContext schemaContext;
-        final List<InputStream> streams = Collections.singletonList(getInputStream());
-
-        try {
-            schemaContext = YangParserTestUtils.parseYangStreams(streams);
-        } catch (ReactorException e) {
-            throw new RuntimeException("Unable to build schema context from " + streams, e);
-        }
-        return schemaContext;
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryBrokerWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryBrokerWriteTransactionBenchmark.java
deleted file mode 100644 (file)
index 7c0b4b7..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2014, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-
-@State(Scope.Thread)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MILLISECONDS)
-@Fork(1)
-public class InMemoryBrokerWriteTransactionBenchmark extends AbstractInMemoryBrokerWriteTransactionBenchmark {
-    private ListeningExecutorService executor;
-
-    @Setup(Level.Trial)
-    @Override
-    public void setUp() throws Exception {
-        ListeningExecutorService dsExec = MoreExecutors.newDirectExecutorService();
-        executor = MoreExecutors.listeningDecorator(
-                MoreExecutors.getExitingExecutorService((ThreadPoolExecutor) Executors.newFixedThreadPool(1), 1L,
-                        TimeUnit.SECONDS));
-
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", dsExec);
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", dsExec);
-        Map<LogicalDatastoreType, DOMStore> datastores = ImmutableMap.of(
-            LogicalDatastoreType.OPERATIONAL, (DOMStore)operStore,
-            LogicalDatastoreType.CONFIGURATION, configStore);
-
-        domBroker = new SerializedDOMDataBroker(datastores, executor);
-        schemaContext = BenchmarkModel.createTestContext();
-        configStore.onGlobalContextUpdated(schemaContext);
-        operStore.onGlobalContextUpdated(schemaContext);
-        initTestNode();
-    }
-
-    @Override
-    public void tearDown() {
-        domBroker.close();
-        executor.shutdown();
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java
deleted file mode 100644 (file)
index 0370588..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2013, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-
-/**
- * Benchmark for testing of performance of write operations for
- * InMemoryDataStore. The instance of benchmark creates InMemoryDataStore with
- * Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
- * and DOM Store Executor Service as Same Thread Executor.
- *
- * @author Lukas Sedlak
- */
-@State(Scope.Thread)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MILLISECONDS)
-@Fork(1)
-public class InMemoryDataStoreWithSameThreadedExecutorBenchmark
-        extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
-
-    private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
-    private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
-
-    @Override
-    @Setup(Level.Trial)
-    public void setUp() throws Exception {
-        final String name = "DS_BENCHMARK";
-        final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
-                MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
-
-        domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", dataChangeListenerExecutor);
-        schemaContext = BenchmarkModel.createTestContext();
-        domStore.onGlobalContextUpdated(schemaContext);
-        initTestNode();
-    }
-
-    @Override
-    @TearDown
-    public void tearDown() {
-        schemaContext = null;
-        domStore = null;
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java
deleted file mode 100644 (file)
index fee7eca..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.benchmark;
-
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
-
-/**
- * Benchmark for testing of performance of write operations for
- * InMemoryDataStore. The instance of benchmark creates InMemoryDataStore with
- * Data Change Listener Executor Service as Same Thread Executor and DOM Store
- * Executor Service as Same Thread Executor.
- *
- * @author Lukas Sedlak
- */
-@State(Scope.Thread)
-@BenchmarkMode(Mode.AverageTime)
-@OutputTimeUnit(TimeUnit.MILLISECONDS)
-@Fork(1)
-public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
-
-    @Override
-    @Setup(Level.Trial)
-    public void setUp() throws Exception {
-        domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", Executors.newSingleThreadExecutor());
-        schemaContext = BenchmarkModel.createTestContext();
-        domStore.onGlobalContextUpdated(schemaContext);
-        initTestNode();
-    }
-
-    @Override
-    @TearDown
-    public void tearDown() {
-        schemaContext = null;
-        domStore = null;
-    }
-}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang b/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang
deleted file mode 100644 (file)
index 730ca17..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-module odl-datastore-test {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
-    prefix "store-test";
-    
-    revision "2014-03-13" {
-        description "Initial revision.";
-    }
-
-    container test {
-        list outer-list {
-            key id;
-            leaf id {
-                type int32;
-            }
-            choice outer-choice {
-                case one {
-                    leaf one {
-                        type string;
-                    }
-                }
-                case two-three {
-                    leaf two {
-                        type string;
-                    }
-                    leaf three {
-                        type string;
-                    }
-               }
-           }
-           list inner-list {
-                key name;
-                leaf name {
-                    type int32;
-                }
-                leaf value {
-                    type string;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file
index 15cf802e039279985b772a87db606a2ece49b396..fb0bc37268dda8a899fb8fcef769e65e6bd282e3 100644 (file)
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>cds-access-api</artifactId>
-    <version>1.6.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-actor_2.12</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
-
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>yang-data-api</artifactId>
         </dependency>
-
-        <!-- Needed for serialization of yang-data-api objects -->
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-impl</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-spi</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-codec-binfmt</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-tree-api</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-clustering-commons</artifactId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
         </dependency>
 
         <!-- Testing dependencies -->
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>mockito-configuration</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.12</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-tree-ri</artifactId>
             <scope>test</scope>
         </dependency>
         <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <scope>test</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
     </dependencies>
 
+    <build>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <artifactId>maven-javadoc-plugin</artifactId>
+                    <version>3.1.1</version>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <configuration>
+                    <instructions>
+                        <Automatic-Module-Name>org.opendaylight.controller.cluster.access.api</Automatic-Module-Name>
+                    </instructions>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
     <scm>
         <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
         <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
index 3eddf3eb564f29c7b20b7ae06cec560793dc6144..4658f3b754f8008f0b3c905bfc9056fd55371734 100644 (file)
@@ -9,22 +9,19 @@ package org.opendaylight.controller.cluster.access;
 
 import static com.google.common.base.Preconditions.checkArgument;
 
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableObject;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Enumeration of all ABI versions supported by this implementation of the client access API.
- *
- * @author Robert Varga
  */
-@Beta
 public enum ABIVersion implements WritableObject {
     // NOTE: enumeration values need to be sorted in ascending order of their version to keep Comparable working
 
@@ -32,19 +29,40 @@ public enum ABIVersion implements WritableObject {
      * Version which is older than any other version. This version exists purely for testing purposes.
      */
     @VisibleForTesting
-    TEST_PAST_VERSION(0),
+    TEST_PAST_VERSION(0) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            throw new UnsupportedOperationException();
+        }
+    },
+
+    // BORON was 5
+    // NEON_SR2 was 6
+    // SODIUM_SR1 was 7
+    // MAGNESIUM was 8
+    // CHLORINE_SR2 was 9
 
     /**
-     * Initial ABI version, as shipped with Boron Simultaneous release.
+     * Oldest ABI version we support. The messages remain the same as {@code CHLORINE_SR2}, the serialization proxies in
+     * use are flat objects without any superclasses. Data encoding does not include augmentations as separate objects.
      */
-    // We seed the initial version to be the same as DataStoreVersions.BORON-VERSION for compatibility reasons.
-    BORON(5),
+    POTASSIUM(10) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            return NormalizedNodeStreamVersion.POTASSIUM;
+        }
+    },
 
     /**
      * Version which is newer than any other version. This version exists purely for testing purposes.
      */
     @VisibleForTesting
-    TEST_FUTURE_VERSION(65535);
+    TEST_FUTURE_VERSION(65535) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            throw new UnsupportedOperationException();
+        }
+    };
 
     private static final Logger LOG = LoggerFactory.getLogger(ABIVersion.class);
 
@@ -71,7 +89,7 @@ public enum ABIVersion implements WritableObject {
      * @return Current {@link ABIVersion}
      */
     public static @NonNull ABIVersion current() {
-        return BORON;
+        return POTASSIUM;
     }
 
     /**
@@ -84,18 +102,22 @@ public enum ABIVersion implements WritableObject {
      * @throws PastVersionException if the specified integer identifies a past version which is no longer supported
      */
     public static @NonNull ABIVersion valueOf(final short value) throws FutureVersionException, PastVersionException {
-        switch (Short.toUnsignedInt(value)) {
-            case 0:
-            case 1:
-            case 2:
-            case 3:
-            case 4:
-                throw new PastVersionException(value, BORON);
-            case 5:
-                return BORON;
-            default:
-                throw new FutureVersionException(value, BORON);
-        }
+        return switch (Short.toUnsignedInt(value)) {
+            case 0, 1, 2, 3, 4, 6, 7, 8, 9 -> throw new PastVersionException(value, POTASSIUM);
+            case 10 -> POTASSIUM;
+            default -> throw new FutureVersionException(value, POTASSIUM);
+        };
+    }
+
+    /**
+     * Return {@code true} if this version is earier than some {@code other} version.
+     *
+     * @param other Other {@link ABIVersion}
+     * @return {@code true} if {@code other is later}
+     * @throws NullPointerException if {@code other} is null
+     */
+    public boolean lt(final @NonNull ABIVersion other) {
+        return compareTo(other) < 0;
     }
 
     @Override
@@ -103,6 +125,13 @@ public enum ABIVersion implements WritableObject {
         out.writeShort(value);
     }
 
+    /**
+     * Return the NormalizedNode stream version corresponding to this particular ABI.
+     *
+     * @return Stream Version to use for this ABI version
+     */
+    public abstract @NonNull NormalizedNodeStreamVersion getStreamVersion();
+
     /**
      * Read an {@link ABIVersion} from a {@link DataInput}. This method is provided for callers which do not have
      * a recovery strategy for dealing with unsupported versions.
index 0567ef119110f2841ebf6882edeb2a2a47578bb6..1555b59501b586f4ab812b8a595001d4c4f33e1e 100644 (file)
@@ -9,17 +9,15 @@ package org.opendaylight.controller.cluster.access;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Abstract base exception used for reporting version mismatches from {@link ABIVersion}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class AbstractVersionException extends Exception {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull ABIVersion closestVersion;
     private final int version;
 
@@ -34,7 +32,7 @@ public abstract class AbstractVersionException extends Exception {
      *
      * @return Numeric version
      */
-    public final int getVersion() {
+    public final int version() {
         return version;
     }
 
@@ -43,8 +41,7 @@ public abstract class AbstractVersionException extends Exception {
      *
      * @return Closest supported {@link ABIVersion}
      */
-    public final @NonNull ABIVersion getClosestVersion() {
+    public final @NonNull ABIVersion closestVersion() {
         return closestVersion;
     }
-
 }
index d5f132a7b8e73d74e5616d9bd9bf3d61a29eeb45..f0ceaa4890b61dbc38493075be9cbf7e3fc74213 100644 (file)
@@ -7,19 +7,15 @@
  */
 package org.opendaylight.controller.cluster.access;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too new to be supported
  * by the codebase.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FutureVersionException extends AbstractVersionException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    FutureVersionException(final short version, ABIVersion closest) {
+    FutureVersionException(final short version, final ABIVersion closest) {
         super("Version " + Short.toUnsignedInt(version) + " is too new", version, closest);
     }
 }
index c8cbe54b3d0ae9bf725dee6114f349fd00b671d2..c333d3495e163e494c4daa28480e058c0c3a38af 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Exception thrown from {@link ABIVersion#valueOf(short)} when the specified version is too old and no longer
  * supported by the codebase.
- *
- * @author Robert Varga
  */
-@Beta
 public final class PastVersionException extends AbstractVersionException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     PastVersionException(final short version, final ABIVersion closest) {
index 3898ee22b3e2d5b3972fd78a5fdc52b9278c49b2..9e2998c5b2536a8ef3d406de7e5c5f90c203dab2 100644 (file)
@@ -8,7 +8,6 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Request to abort a local transaction. Since local transactions do not introduce state on the backend until they
  * are ready, the purpose of this message is to inform the backend that a message identifier has been used. This is
  * not important for single transactions, but is critical to ensure transaction ordering within local histories.
- *
- * @author Robert Varga
  */
-@Beta
 public final class AbortLocalTransactionRequest extends AbstractLocalTransactionRequest<AbortLocalTransactionRequest> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public AbortLocalTransactionRequest(final @NonNull TransactionIdentifier identifier,
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractLocalHistoryRequestProxy.java
deleted file mode 100644 (file)
index f6ba2e7..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link LocalHistoryRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractLocalHistoryRequestProxy<T extends LocalHistoryRequest<T>>
-        extends AbstractRequestProxy<LocalHistoryIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractLocalHistoryRequestProxy() {
-        // For Externalizable
-    }
-
-    AbstractLocalHistoryRequestProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-}
index efc0e856b20c4d7ed8ebeb7f49dc9460bfa28767..5831e65c11a88aabea03d78ee595635a4490097e 100644 (file)
@@ -22,6 +22,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  */
 public abstract class AbstractLocalTransactionRequest<T extends AbstractLocalTransactionRequest<T>>
         extends TransactionRequest<T> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     AbstractLocalTransactionRequest(final TransactionIdentifier identifier, final long sequence,
@@ -30,7 +31,7 @@ public abstract class AbstractLocalTransactionRequest<T extends AbstractLocalTra
     }
 
     @Override
-    protected final AbstractTransactionRequestProxy<T> externalizableProxy(final ABIVersion version) {
+    protected final SerialForm<T> externalizableProxy(final ABIVersion version) {
         throw new UnsupportedOperationException("Local transaction request " + this + " should never be serialized");
     }
 
index fa53a599aa1cbfa32dbdaac87b1a54339ee6cff9..2b4ee0e7e8dd2ef8ffaa7a2e9b69a1783668b257 100644 (file)
@@ -10,12 +10,15 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
 
 /**
  * Abstract base class for {@link TransactionRequest}s accessing data as visible in the isolated context of a particular
@@ -25,13 +28,33 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * This class is visible outside of this package for the purpose of allowing common instanceof checks
  * and simplified codepaths.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class AbstractReadPathTransactionRequest<T extends AbstractReadPathTransactionRequest<T>>
         extends AbstractReadTransactionRequest<T> {
+    interface SerialForm<T extends AbstractReadPathTransactionRequest<T>>
+            extends AbstractReadTransactionRequest.SerialForm<T> {
+
+        @Override
+        default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+                final ActorRef replyTo, final boolean snapshotOnly) throws IOException {
+            return readExternal(in, target, sequence, replyTo, snapshotOnly,
+                NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier());
+        }
+
+        @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+            @NonNull ActorRef replyTo, boolean snapshotOnly, @NonNull YangInstanceIdentifier path) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+            AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+            try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                nnout.writeYangInstanceIdentifier(msg.getPath());
+            }
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull YangInstanceIdentifier path;
@@ -57,5 +80,5 @@ public abstract class AbstractReadPathTransactionRequest<T extends AbstractReadP
     }
 
     @Override
-    protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadPathTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index bafa3d9..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadPathTransactionRequestProxyV1<T extends AbstractReadPathTransactionRequest<T>>
-        extends AbstractReadTransactionRequestProxyV1<T> {
-    private static final long serialVersionUID = 1L;
-    private YangInstanceIdentifier path;
-
-    protected AbstractReadPathTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    AbstractReadPathTransactionRequestProxyV1(final T request) {
-        super(request);
-        path = request.getPath();
-    }
-
-    @Override
-    public final void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        try (NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(out)) {
-            nnout.writeYangInstanceIdentifier(path);
-        }
-    }
-
-    @Override
-    public final void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        path = NormalizedNodeInputOutput.newDataInput(in).readYangInstanceIdentifier();
-    }
-
-    @Override
-    protected final T createReadRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final boolean snapshotOnly) {
-        return createReadPathRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-
-    abstract T createReadPathRequest(TransactionIdentifier target, long sequence, ActorRef replyTo,
-            YangInstanceIdentifier requestPath, boolean snapshotOnly);
-}
index 3fc4821edf99e64eed0feec7b983b1015fc86329..23fdd85140db711919672496df7cb10745af7c4a 100644 (file)
@@ -8,8 +8,11 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -20,13 +23,28 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * This class is visible outside of this package for the purpose of allowing common instanceof checks
  * and simplified codepaths.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class AbstractReadTransactionRequest<T extends AbstractReadTransactionRequest<T>>
         extends TransactionRequest<T> {
+    interface SerialForm<T extends AbstractReadTransactionRequest<T>> extends TransactionRequest.SerialForm<T> {
+        @Override
+        default T readExternal(final ObjectInput in, final TransactionIdentifier target, final long sequence,
+                final ActorRef replyTo) throws IOException {
+            return readExternal(in, target, sequence, replyTo, in.readBoolean());
+        }
+
+        @NonNull T readExternal(@NonNull ObjectInput in, @NonNull TransactionIdentifier target, long sequence,
+            @NonNull ActorRef replyTo, boolean snapshotOnly) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final T msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+            out.writeBoolean(msg.isSnapshotOnly());
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final boolean snapshotOnly;
@@ -52,5 +70,5 @@ public abstract class AbstractReadTransactionRequest<T extends AbstractReadTrans
     }
 
     @Override
-    protected abstract AbstractReadTransactionRequestProxyV1<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractReadTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 88820bd..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link AbstractReadTransactionRequest}s. It implements
- * the initial (Boron) serialization format.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractReadTransactionRequestProxyV1<T extends AbstractReadTransactionRequest<T>>
-        extends AbstractTransactionRequestProxy<T> {
-    private static final long serialVersionUID = 1L;
-    private boolean snapshotOnly;
-
-    protected AbstractReadTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    AbstractReadTransactionRequestProxyV1(final T request) {
-        super(request);
-        snapshotOnly = request.isSnapshotOnly();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeBoolean(snapshotOnly);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        snapshotOnly = in.readBoolean();
-    }
-
-    @Override
-    protected final T createRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
-        return createReadRequest(target, sequence, replyTo, snapshotOnly);
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract T createReadRequest(TransactionIdentifier target, long sequence, ActorRef replyTo, boolean snapshotOnly);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionRequestProxy.java
deleted file mode 100644 (file)
index 437d281..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionRequest}s.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionRequestProxy<T extends TransactionRequest<T>>
-        extends AbstractRequestProxy<TransactionIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractTransactionRequestProxy() {
-        // For Externalizable
-    }
-
-    AbstractTransactionRequestProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/AbstractTransactionSuccessProxy.java
deleted file mode 100644 (file)
index a1284b7..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Abstract base class for serialization proxies associated with {@link TransactionSuccess}es.
- *
- * @author Robert Varga
- *
- * @param <T> Message type
- */
-abstract class AbstractTransactionSuccessProxy<T extends TransactionSuccess<T>>
-        extends AbstractSuccessProxy<TransactionIdentifier, T> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractTransactionSuccessProxy() {
-        // For Externalizable
-    }
-
-    AbstractTransactionSuccessProxy(final T request) {
-        super(request);
-    }
-
-    @Override
-    protected final TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCF.java
new file mode 100644 (file)
index 0000000..ea9c37e
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCF implements ConnectClientFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCF() {
+        // for Externalizable
+    }
+
+    CCF(final ConnectClientFailure request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCR.java
new file mode 100644 (file)
index 0000000..ace94d5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCR implements ConnectClientRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCR() {
+        // for Externalizable
+    }
+
+    CCR(final ConnectClientRequest request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CCS.java
new file mode 100644 (file)
index 0000000..ea425e5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class CCS implements ConnectClientSuccess.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ConnectClientSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CCS() {
+        // for Externalizable
+    }
+
+    CCS(final ConnectClientSuccess request) {
+        message = requireNonNull(request);
+    }
+
+    @Override
+    public ConnectClientSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ConnectClientSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CHR.java
new file mode 100644 (file)
index 0000000..da3fd13
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class CHR implements CreateLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private CreateLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CHR() {
+        // For Externalizable
+    }
+
+    CHR(final CreateLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public CreateLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final CreateLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ece472056473df653610cb9c51fe1e9918c28c55..67b1a40408f39067e6ec42b6bc99695d1177ee05 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
@@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  * been closed, either via a successful commit or abort (which is indicated via {@link #isSuccessful()}. This can
  * happen if the corresponding journal record is replicated, but the message to the frontend gets lost and the backed
  * leader moved before the frontend retried the corresponding request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ClosedTransactionException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final boolean successful;
index adef0c31bc022775889ad6d123954f6be955758d..7a3f771b474789bfe576a9a7465a9eacd4e55ad6 100644 (file)
@@ -10,29 +10,26 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Request to commit a local transaction. Since local transactions do not introduce state on the backend until they
  * are ready, this message carries a complete set of modifications.
- *
- * @author Robert Varga
  */
-@Beta
 public final class CommitLocalTransactionRequest
         extends AbstractLocalTransactionRequest<CommitLocalTransactionRequest> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
     private final DataTreeModification mod;
     private final Exception delayedFailure;
     private final boolean coordinated;
@@ -77,4 +74,19 @@ public final class CommitLocalTransactionRequest
         return super.addToStringAttributes(toStringHelper).add("coordinated", coordinated)
                 .add("delayedError", delayedFailure);
     }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
index 46b460ac0d364ec137113fcb65cea599ac724058..1157d1b6f888fc596e1cd42dcd2ca9796a889348 100644 (file)
@@ -7,20 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 
 /**
  * A {@link RequestFailure} reported when {@link ConnectClientRequest} fails.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientFailure extends RequestFailure<ClientIdentifier, ConnectClientFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<ClientIdentifier, ConnectClientFailure> {
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new ConnectClientFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     ConnectClientFailure(final ClientIdentifier target, final long sequence, final RequestException cause) {
@@ -32,9 +43,8 @@ public final class ConnectClientFailure extends RequestFailure<ClientIdentifier,
     }
 
     @Override
-    protected AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> externalizableProxy(
-            final ABIVersion version) {
-        return new ConnectClientFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCF(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientFailureProxyV1.java
deleted file mode 100644 (file)
index 55efb28..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Serialization proxy for use with {@link ConnectClientFailure}. This class implements initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientFailureProxyV1 extends AbstractRequestFailureProxy<ClientIdentifier, ConnectClientFailure> {
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientFailureProxyV1() {
-        // For Externalizable
-    }
-
-    ConnectClientFailureProxyV1(final ConnectClientFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected ConnectClientFailure createFailure(final ClientIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new ConnectClientFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index ba86035e920a2356424bbdc37050d0eb2771907c..953fafefa89078871c7c207100199f5779af6f22 100644 (file)
@@ -10,10 +10,12 @@ package org.opendaylight.controller.cluster.access.commands;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -26,11 +28,30 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  *
  * <p>
  * It also includes request stream sequencing information.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientRequest extends Request<ClientIdentifier, ConnectClientRequest> {
+    interface SerialForm extends Request.SerialForm<ClientIdentifier, ConnectClientRequest> {
+        @Override
+        default ConnectClientRequest readExternal(final ObjectInput in, final ClientIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+            return new ConnectClientRequest(target, sequence, replyTo, ABIVersion.inexactReadFrom(in),
+                ABIVersion.inexactReadFrom(in));
+        }
+
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ConnectClientRequest msg) throws IOException {
+            Request.SerialForm.super.writeExternal(out, msg);
+            msg.getMinVersion().writeTo(out);
+            msg.getMaxVersion().writeTo(out);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final ABIVersion minVersion;
@@ -50,8 +71,8 @@ public final class ConnectClientRequest extends Request<ClientIdentifier, Connec
 
     private ConnectClientRequest(final ConnectClientRequest request, final ABIVersion version) {
         super(request, version);
-        this.minVersion = request.minVersion;
-        this.maxVersion = request.maxVersion;
+        minVersion = request.minVersion;
+        maxVersion = request.maxVersion;
     }
 
     public ABIVersion getMinVersion() {
@@ -68,9 +89,8 @@ public final class ConnectClientRequest extends Request<ClientIdentifier, Connec
     }
 
     @Override
-    protected AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new ConnectClientRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientRequestProxyV1.java
deleted file mode 100644 (file)
index da3a601..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientRequestProxyV1 extends AbstractRequestProxy<ClientIdentifier, ConnectClientRequest> {
-    private ABIVersion minVersion;
-    private ABIVersion maxVersion;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientRequestProxyV1() {
-        // for Externalizable
-    }
-
-    ConnectClientRequestProxyV1(final ConnectClientRequest request) {
-        super(request);
-        this.minVersion = request.getMinVersion();
-        this.maxVersion = request.getMaxVersion();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        minVersion.writeTo(out);
-        maxVersion.writeTo(out);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        minVersion = ABIVersion.inexactReadFrom(in);
-        maxVersion = ABIVersion.inexactReadFrom(in);
-    }
-
-    @Override
-    protected ConnectClientRequest createRequest(final ClientIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new ConnectClientRequest(target, sequence, replyTo, minVersion, maxVersion);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index 1ec81c8b5bd34fab0f26cee41928fff8fc1a2b9d..ad0e3624e1f1aa05969fa850c8b5b4f897316458 100644 (file)
@@ -12,53 +12,98 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Successful reply to an {@link ConnectClientRequest}. Client actor which initiated this connection should use
  * the version reported via {@link #getVersion()} of this message to communicate with this backend. Should this backend
  * fail, the client can try accessing the provided alternates.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier, ConnectClientSuccess> {
-    private static final long serialVersionUID = 1L;
+    interface SerialForm extends RequestSuccess.SerialForm<ClientIdentifier, ConnectClientSuccess> {
+        @Override
+        default ClientIdentifier readTarget(final DataInput in) throws IOException {
+            return ClientIdentifier.readFrom(in);
+        }
+
+        @Override
+        default ConnectClientSuccess readExternal(final ObjectInput in, final ClientIdentifier target,
+                final long sequence) throws IOException, ClassNotFoundException {
+            final var backend = JavaSerializer.currentSystem().value().provider()
+                .resolveActorRef((String) in.readObject());
+            final var maxMessages = in.readInt();
+
+            final int alternatesSize = in.readInt();
+            final var alternates = new ArrayList<ActorSelection>(alternatesSize);
+            for (int i = 0; i < alternatesSize; ++i) {
+                alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
+            }
+
+            return new ConnectClientSuccess(target, sequence, backend, alternates, maxMessages, null);
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ConnectClientSuccess msg) throws IOException {
+            out.writeObject(Serialization.serializedActorPath(msg.backend));
+            out.writeInt(msg.maxMessages);
+
+            out.writeInt(msg.alternates.size());
+            for (ActorSelection b : msg.alternates) {
+                out.writeObject(b.toSerializationFormat());
+            }
+
+            // We are ignoring the DataTree, it is not serializable anyway
+        }
+    }
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
-    private final @NonNull List<ActorSelection> alternates;
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See justification above.")
-    private final DataTree dataTree;
+    private final @NonNull ImmutableList<ActorSelection> alternates;
+    private final ReadOnlyDataTree dataTree;
     private final @NonNull ActorRef backend;
     private final int maxMessages;
 
+    private ConnectClientSuccess(final ConnectClientSuccess success, final ABIVersion version) {
+        super(success, version);
+        alternates = success.alternates;
+        dataTree = success.dataTree;
+        backend = success.backend;
+        maxMessages = success.maxMessages;
+    }
+
     ConnectClientSuccess(final ClientIdentifier target, final long sequence, final ActorRef backend,
-        final List<ActorSelection> alternates, final Optional<DataTree> dataTree, final int maxMessages) {
+        final List<ActorSelection> alternates, final int maxMessages, final ReadOnlyDataTree dataTree) {
         super(target, sequence);
         this.backend = requireNonNull(backend);
         this.alternates = ImmutableList.copyOf(alternates);
-        this.dataTree = dataTree.orElse(null);
+        this.dataTree = dataTree;
         checkArgument(maxMessages > 0, "Maximum messages has to be positive, not %s", maxMessages);
         this.maxMessages = maxMessages;
     }
 
     public ConnectClientSuccess(final @NonNull ClientIdentifier target, final long sequence,
             final @NonNull ActorRef backend, final @NonNull List<ActorSelection> alternates,
-            final @NonNull DataTree dataTree, final int maxMessages) {
-        this(target, sequence, backend, alternates, Optional.of(dataTree), maxMessages);
+            final @NonNull ReadOnlyDataTree dataTree, final int maxMessages) {
+        this(target, sequence, backend, alternates, maxMessages, requireNonNull(dataTree));
     }
 
     /**
@@ -74,7 +119,7 @@ public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier,
         return backend;
     }
 
-    public Optional<DataTree> getDataTree() {
+    public Optional<ReadOnlyDataTree> getDataTree() {
         return Optional.ofNullable(dataTree);
     }
 
@@ -83,13 +128,13 @@ public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier,
     }
 
     @Override
-    protected ConnectClientSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ConnectClientSuccessProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CCS(this);
     }
 
     @Override
     protected ConnectClientSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ConnectClientSuccess(this, version);
     }
 
     @Override
@@ -97,4 +142,19 @@ public final class ConnectClientSuccess extends RequestSuccess<ClientIdentifier,
         return super.addToStringAttributes(toStringHelper).add("alternates", alternates)
                 .add("dataTree present", getDataTree().isPresent()).add("maxMessages", maxMessages);
     }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ConnectClientSuccessProxyV1.java
deleted file mode 100644 (file)
index fb44e07..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ConnectClientSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ConnectClientSuccessProxyV1 extends AbstractSuccessProxy<ClientIdentifier, ConnectClientSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    private List<ActorSelection> alternates;
-    private ActorRef backend;
-    private int maxMessages;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ConnectClientSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ConnectClientSuccessProxyV1(final ConnectClientSuccess success) {
-        super(success);
-        this.alternates = success.getAlternates();
-        this.backend = success.getBackend();
-        this.maxMessages = success.getMaxMessages();
-        // We are ignoring the DataTree, it is not serializable anyway
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        out.writeObject(Serialization.serializedActorPath(backend));
-        out.writeInt(maxMessages);
-
-        out.writeInt(alternates.size());
-        for (ActorSelection b : alternates) {
-            out.writeObject(b.toSerializationFormat());
-        }
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        backend = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
-        maxMessages = in.readInt();
-
-        final int alternatesSize = in.readInt();
-        alternates = new ArrayList<>(alternatesSize);
-        for (int i = 0; i < alternatesSize; ++i) {
-            alternates.add(ActorSelection.apply(ActorRef.noSender(), (String)in.readObject()));
-        }
-    }
-
-    @Override
-    protected ConnectClientSuccess createSuccess(final ClientIdentifier target, final long sequence) {
-        return new ConnectClientSuccess(target, sequence, backend, alternates, Optional.empty(), maxMessages);
-    }
-
-    @Override
-    protected ClientIdentifier readTarget(final DataInput in) throws IOException {
-        return ClientIdentifier.readFrom(in);
-    }
-}
index 01a110d046e16aed03d68ff355efec133a9d7453..b627bafa438cf005f723146fb316c91a3a0b1e90 100644 (file)
@@ -8,17 +8,23 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to create a new local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class CreateLocalHistoryRequest extends LocalHistoryRequest<CreateLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<CreateLocalHistoryRequest> {
+        @Override
+        default CreateLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new CreateLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CreateLocalHistoryRequest(final LocalHistoryIdentifier target, final ActorRef replyTo) {
@@ -34,9 +40,8 @@ public final class CreateLocalHistoryRequest extends LocalHistoryRequest<CreateL
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new CreateLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new CHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/CreateLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index b61c9f5..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link CreateLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class CreateLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<CreateLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public CreateLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    CreateLocalHistoryRequestProxyV1(final CreateLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected CreateLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new CreateLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DHR.java
new file mode 100644 (file)
index 0000000..ebd0f02
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class DHR implements DestroyLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DestroyLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DHR() {
+        // for Externalizable
+    }
+
+    DHR(final DestroyLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public DestroyLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final DestroyLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 7df84763c5ed4fa491efe6dc62c5fca072c9b344..a91eb6971c3c4d2f173122e0dfb03b294dfe443e 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.collect.RangeSet;
 import com.google.common.primitives.UnsignedLong;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -15,11 +14,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 /**
  * A {@link RequestException} indicating that the backend has received a request to create a history which has already
  * been retired.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DeadHistoryException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DeadHistoryException(final RangeSet<UnsignedLong> purgedHistories) {
index fee439984ac53c15fbdc37c09c618ba410bda150..0f259c1a947ba32d1b8bb1fe3f2dd02677e08357 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.collect.ImmutableRangeSet;
 import com.google.common.collect.RangeSet;
 import com.google.common.primitives.UnsignedLong;
@@ -16,11 +15,9 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 /**
  * A {@link RequestException} indicating that the backend has received a request to create a transaction which has
  * already been purged.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DeadTransactionException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final RangeSet<UnsignedLong> purgedIdentifiers;
index 375128318c43cf2bdf11395fd20ec95f0c42285a..5add5eb05451e9df6002b82772576780b3476abe 100644 (file)
@@ -8,17 +8,23 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to destroy a local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class DestroyLocalHistoryRequest extends LocalHistoryRequest<DestroyLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<DestroyLocalHistoryRequest> {
+        @Override
+        default DestroyLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new DestroyLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DestroyLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence,
@@ -31,9 +37,8 @@ public final class DestroyLocalHistoryRequest extends LocalHistoryRequest<Destro
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> externalizableProxy(
-            final ABIVersion version) {
-        return new DestroyLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new DHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/DestroyLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index 0ebd690..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link DestroyLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class DestroyLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<DestroyLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public DestroyLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    DestroyLocalHistoryRequestProxyV1(final DestroyLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected DestroyLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new DestroyLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETR.java
new file mode 100644 (file)
index 0000000..26964e4
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETR implements ExistsTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ExistsTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ETR() {
+        // for Externalizable
+    }
+
+    ETR(final ExistsTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ExistsTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ExistsTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ETS.java
new file mode 100644 (file)
index 0000000..ad8564b
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class ETS implements TransactionSuccess.SerialForm<ExistsTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ExistsTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ETS() {
+        // for Externalizable
+    }
+
+    ETS(final ExistsTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ExistsTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ExistsTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out, final ExistsTransactionSuccess msg) throws IOException {
+        out.writeBoolean(msg.getExists());
+    }
+
+    @Override
+    public ExistsTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new ExistsTransactionSuccess(target, sequence, in.readBoolean());
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ab2316c28edb63eb65b9f3d48b9cc8a9b99d869f..06c2797ca41d8d7bd6f4e745902aa7e553859e55 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,15 +17,22 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * A transaction request to query if a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ExistsTransactionRequest extends AbstractReadPathTransactionRequest<ExistsTransactionRequest> {
+    interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ExistsTransactionRequest> {
+        @Override
+        default ExistsTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+                throws IOException {
+            return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ExistsTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
-            final @NonNull  ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
+            final @NonNull ActorRef replyTo, final @NonNull YangInstanceIdentifier path, final boolean snapshotOnly) {
         super(identifier, sequence, replyTo, path, snapshotOnly);
     }
 
@@ -38,7 +46,7 @@ public final class ExistsTransactionRequest extends AbstractReadPathTransactionR
     }
 
     @Override
-    protected ExistsTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ExistsTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new ETR(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 2429947..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionRequestProxyV1 extends
-        AbstractReadPathTransactionRequestProxyV1<ExistsTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ExistsTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ExistsTransactionRequestProxyV1(final ExistsTransactionRequest request) {
-        super(request);
-    }
-
-    @Override
-    ExistsTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
-        return new ExistsTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-}
index 8a1704de763725d3e2aaa0a2aad8baba43c00f36..72dee3aefa365fd4a4fbee999e786574e4db6a31 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -15,14 +14,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 /**
  * Successful reply to an {@link ExistsTransactionRequest}. It indicates presence of requested data via
  * {@link #getExists()}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ExistsTransactionSuccess extends TransactionSuccess<ExistsTransactionSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final boolean exists;
 
+    private ExistsTransactionSuccess(final ExistsTransactionSuccess success, final ABIVersion version) {
+        super(success, version);
+        exists = success.exists;
+    }
+
     public ExistsTransactionSuccess(final TransactionIdentifier target, final long sequence, final boolean exists) {
         super(target, sequence);
         this.exists = exists;
@@ -33,13 +36,13 @@ public final class ExistsTransactionSuccess extends TransactionSuccess<ExistsTra
     }
 
     @Override
-    protected ExistsTransactionSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ExistsTransactionSuccessProxyV1(this);
+    protected ETS externalizableProxy(final ABIVersion version) {
+        return new ETS(this);
     }
 
     @Override
     protected ExistsTransactionSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ExistsTransactionSuccess(this, version);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ExistsTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index 24de176..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ExistsTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ExistsTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-    private boolean exists;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ExistsTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ExistsTransactionSuccessProxyV1(final ExistsTransactionSuccess request) {
-        super(request);
-        this.exists = request.getExists();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeBoolean(exists);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        exists = in.readBoolean();
-    }
-
-    @Override
-    protected ExistsTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ExistsTransactionSuccess(target, sequence, exists);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HF.java
new file mode 100644 (file)
index 0000000..68e9b09
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class HF implements LocalHistoryFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HF() {
+        // for Externalizable
+    }
+
+    HF(final LocalHistoryFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public LocalHistoryFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final LocalHistoryFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/HS.java
new file mode 100644 (file)
index 0000000..4ab0ff5
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link LocalHistorySuccess}. It implements the Chlorine SR2 serialization format.
+ */
+final class HS implements LocalHistorySuccess.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistorySuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HS() {
+        // for Externalizable
+    }
+
+    HS(final LocalHistorySuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public LocalHistorySuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final LocalHistorySuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSR.java
new file mode 100644 (file)
index 0000000..ef76f5e
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceRequest}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSR implements IncrementTransactionSequenceRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private IncrementTransactionSequenceRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ITSR() {
+        // for Externalizable
+    }
+
+    ITSR(final IncrementTransactionSequenceRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final IncrementTransactionSequenceRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ITSS.java
new file mode 100644 (file)
index 0000000..7252d58
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the Chlorine SR2
+ * serialization format.
+ */
+final class ITSS implements TransactionSuccess.SerialForm<IncrementTransactionSequenceSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private IncrementTransactionSequenceSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ITSS() {
+        // for Externalizable
+    }
+
+    ITSS(final IncrementTransactionSequenceSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final IncrementTransactionSequenceSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public IncrementTransactionSequenceSuccess readExternal(final ObjectInput it, final TransactionIdentifier target,
+            final long sequence) {
+        return new IncrementTransactionSequenceSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index ffc0a68b8912481bfeca882545ff3626469025e0..5695860e17cd7b0ba48d3dada038baf63c7620d8 100644 (file)
@@ -7,28 +7,55 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static com.google.common.base.Preconditions.checkArgument;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * A blank transaction request. This is used to provide backfill requests in converted retransmit scenarios, such as
  * when a initial request to a transaction (such as a {@link ReadTransactionRequest}) is satisfied by the backend
  * before the need to replay the transaction to a different remote backend.
- *
- * @author Robert Varga
  */
 public final class IncrementTransactionSequenceRequest extends
         AbstractReadTransactionRequest<IncrementTransactionSequenceRequest> {
+    interface SerialForm extends AbstractReadTransactionRequest.SerialForm<IncrementTransactionSequenceRequest> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final IncrementTransactionSequenceRequest msg)
+                throws IOException {
+            AbstractReadTransactionRequest.SerialForm.super.writeExternal(out, msg);
+            WritableObjects.writeLong(out, msg.getIncrement());
+        }
+
+        @Override
+        default IncrementTransactionSequenceRequest readExternal(final ObjectInput in,
+                final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
+                final boolean snapshotOnly) throws IOException {
+            return new IncrementTransactionSequenceRequest(target, sequence, replyTo, snapshotOnly,
+                WritableObjects.readLong(in));
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long increment;
 
+    public IncrementTransactionSequenceRequest(final IncrementTransactionSequenceRequest request,
+            final ABIVersion version) {
+        super(request, version);
+        increment = request.increment;
+    }
+
     public IncrementTransactionSequenceRequest(final TransactionIdentifier identifier, final long sequence,
             final ActorRef replyTo, final boolean snapshotOnly, final long increment) {
         super(identifier, sequence, replyTo, snapshotOnly);
-        Preconditions.checkArgument(increment >= 0);
+        checkArgument(increment >= 0, "Unexpected increment %s", increment);
         this.increment = increment;
     }
 
@@ -42,12 +69,12 @@ public final class IncrementTransactionSequenceRequest extends
     }
 
     @Override
-    protected IncrementTransactionSequenceRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new IncrementTransactionSequenceRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new ITSR(this);
     }
 
     @Override
     protected IncrementTransactionSequenceRequest cloneAsVersion(final ABIVersion targetVersion) {
-        return this;
+        return new IncrementTransactionSequenceRequest(this, targetVersion);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceRequestProxyV1.java
deleted file mode 100644 (file)
index da1659e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-final class IncrementTransactionSequenceRequestProxyV1
-        extends AbstractReadTransactionRequestProxyV1<IncrementTransactionSequenceRequest> {
-    private long increment;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public IncrementTransactionSequenceRequestProxyV1() {
-        // For Externalizable
-    }
-
-    IncrementTransactionSequenceRequestProxyV1(final IncrementTransactionSequenceRequest request) {
-        super(request);
-        this.increment = request.getIncrement();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        WritableObjects.writeLong(out, increment);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws ClassNotFoundException, IOException {
-        super.readExternal(in);
-        increment = WritableObjects.readLong(in);
-    }
-
-    @Override
-    IncrementTransactionSequenceRequest createReadRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyToActor, final boolean snapshotOnly) {
-        return new IncrementTransactionSequenceRequest(target, sequence, replyToActor, snapshotOnly, increment);
-    }
-}
index 80f4a0d5aab941bf64c8d6a868c13cc70cdeda15..4f27f76563f7649594e067799580f2c1ea60d0bc 100644 (file)
@@ -7,30 +7,32 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * Successful reply to an {@link IncrementTransactionSequenceRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class IncrementTransactionSequenceSuccess extends TransactionSuccess<IncrementTransactionSequenceSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private IncrementTransactionSequenceSuccess(final IncrementTransactionSequenceSuccess success,
+            final ABIVersion version) {
+        super(success, version);
+    }
+
     public IncrementTransactionSequenceSuccess(final TransactionIdentifier target, final long sequence) {
         super(target, sequence);
     }
 
     @Override
-    protected IncrementTransactionSequenceSuccessProxyV1 externalizableProxy(final ABIVersion version) {
-        return new IncrementTransactionSequenceSuccessProxyV1(this);
+    protected ITSS externalizableProxy(final ABIVersion version) {
+        return new ITSS(this);
     }
 
     @Override
     protected IncrementTransactionSequenceSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new IncrementTransactionSequenceSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/IncrementTransactionSequenceSuccessProxyV1.java
deleted file mode 100644 (file)
index a99faab..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link IncrementTransactionSequenceSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class IncrementTransactionSequenceSuccessProxyV1
-        extends AbstractTransactionSuccessProxy<IncrementTransactionSequenceSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public IncrementTransactionSequenceSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    IncrementTransactionSequenceSuccessProxyV1(final IncrementTransactionSequenceSuccess request) {
-        super(request);
-    }
-
-    @Override
-    protected IncrementTransactionSequenceSuccess createSuccess(final TransactionIdentifier target,
-            final long sequence) {
-        return new IncrementTransactionSequenceSuccess(target, sequence);
-    }
-}
index 4fd69c24cee0e2ec2948f768975683a683038faa..fc24d8aedacbf3bf5105d7ecb6b9b7f836f6dcc2 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 
 /**
  * Generic {@link RequestFailure} involving a {@link LocalHistoryRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class LocalHistoryFailure extends RequestFailure<LocalHistoryIdentifier, LocalHistoryFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<LocalHistoryIdentifier, LocalHistoryFailure> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
+
+        @Override
+        default LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new LocalHistoryFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private LocalHistoryFailure(final LocalHistoryFailure failure, final ABIVersion version) {
+        super(failure, version);
+    }
+
     LocalHistoryFailure(final LocalHistoryIdentifier target, final long sequence, final RequestException cause) {
         super(target, sequence, cause);
     }
 
     @Override
-    protected LocalHistoryFailure cloneAsVersion(final ABIVersion version) {
-        return this;
+    protected LocalHistoryFailure cloneAsVersion(final ABIVersion targetVersion) {
+        return new LocalHistoryFailure(this, targetVersion);
     }
 
     @Override
-    protected LocalHistoryFailureProxyV1 externalizableProxy(final ABIVersion version) {
-        return new LocalHistoryFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new HF(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistoryFailureProxyV1.java
deleted file mode 100644 (file)
index 0d3a687..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-
-/**
- * Externalizable proxy for use with {@link LocalHistoryFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class LocalHistoryFailureProxyV1 extends
-        AbstractRequestFailureProxy<LocalHistoryIdentifier, LocalHistoryFailure> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public LocalHistoryFailureProxyV1() {
-        // For Externalizable
-    }
-
-    LocalHistoryFailureProxyV1(final LocalHistoryFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected LocalHistoryFailure createFailure(final LocalHistoryIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new LocalHistoryFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-}
index 33d04850fb91d01a524a7f17ffc12820445e392a..c304384fd8b7089729c03ba8108aee1ba86c67f3 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.Preconditions;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Request;
@@ -19,12 +20,17 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
  * Abstract base class for {@link Request}s involving specific local history. This class is visible outside of this
  * package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class LocalHistoryRequest<T extends LocalHistoryRequest<T>> extends Request<LocalHistoryIdentifier, T> {
+    interface SerialForm<T extends LocalHistoryRequest<T>> extends Request.SerialForm<LocalHistoryIdentifier, T> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     LocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
@@ -42,5 +48,5 @@ public abstract class LocalHistoryRequest<T extends LocalHistoryRequest<T>> exte
     }
 
     @Override
-    protected abstract AbstractLocalHistoryRequestProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index 3b8ed35816ede5bb36a0e41ddaa57f7d74ae5971..7c0e1865c12585f75736d42dc6e54b8ffdd14e40 100644 (file)
@@ -7,37 +7,48 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 
 /**
  * Success class for {@link RequestSuccess}es involving a specific local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class LocalHistorySuccess extends RequestSuccess<LocalHistoryIdentifier, LocalHistorySuccess> {
-    private static final long serialVersionUID = 1L;
+    interface SerialForm extends RequestSuccess.SerialForm<LocalHistoryIdentifier, LocalHistorySuccess> {
+        @Override
+        default LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
+            return LocalHistoryIdentifier.readFrom(in);
+        }
 
-    public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
-        super(target, sequence);
+        @Override
+        default LocalHistorySuccess readExternal(final ObjectInput it, final LocalHistoryIdentifier target,
+                final long sequence) {
+            return new LocalHistorySuccess(target, sequence);
+        }
     }
 
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
     private LocalHistorySuccess(final LocalHistorySuccess success, final ABIVersion version) {
         super(success, version);
     }
 
+    public LocalHistorySuccess(final LocalHistoryIdentifier target, final long sequence) {
+        super(target, sequence);
+    }
+
     @Override
     protected LocalHistorySuccess cloneAsVersion(final ABIVersion version) {
         return new LocalHistorySuccess(this, version);
     }
 
     @Override
-    protected AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new LocalHistorySuccessProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new HS(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/LocalHistorySuccessProxyV1.java
deleted file mode 100644 (file)
index 97a7a1c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractSuccessProxy;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Serialization proxy associated with {@link LocalHistorySuccess}.
- *
- * @author Robert Varga
- */
-final class LocalHistorySuccessProxyV1 extends AbstractSuccessProxy<LocalHistoryIdentifier, LocalHistorySuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public LocalHistorySuccessProxyV1() {
-        // For Externalizable
-    }
-
-    LocalHistorySuccessProxyV1(final LocalHistorySuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected LocalHistoryIdentifier readTarget(final DataInput in) throws IOException {
-        return LocalHistoryIdentifier.readFrom(in);
-    }
-
-    @Override
-    protected LocalHistorySuccess createSuccess(final LocalHistoryIdentifier target, final long sequence) {
-        return new LocalHistorySuccess(target, sequence);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTR.java
new file mode 100644 (file)
index 0000000..a0c5acf
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTR implements ModifyTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ModifyTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MTR() {
+        // for Externalizable
+    }
+
+    MTR(final ModifyTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ModifyTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/MTS.java
new file mode 100644 (file)
index 0000000..ee7e876
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class MTS implements TransactionSuccess.SerialForm<ModifyTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ModifyTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MTS() {
+        // for Externalizable
+    }
+
+    MTS(final ModifyTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ModifyTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ModifyTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new ModifyTransactionSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 39b577cef26b67d9739fb32fe32b5d739a1c1908..b1ddd389306ac9ad5e38f8abbcd155dd91a1dec7 100644 (file)
@@ -8,33 +8,83 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.collect.ImmutableList;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
 
 /**
  * A transaction request to apply a particular set of operations on top of the current transaction. This message is
  * used to also finish a transaction by specifying a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ModifyTransactionRequest extends TransactionRequest<ModifyTransactionRequest>
         implements SliceableMessage {
+    interface SerialForm extends TransactionRequest.SerialForm<ModifyTransactionRequest> {
+
+
+        @Override
+        default ModifyTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+
+            final var protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
+            final int size = in.readInt();
+            final List<TransactionModification> modifications;
+            if (size != 0) {
+                modifications = new ArrayList<>(size);
+                final var nnin = NormalizedNodeDataInput.newDataInput(in);
+                final var writer = ReusableImmutableNormalizedNodeStreamWriter.create();
+                for (int i = 0; i < size; ++i) {
+                    modifications.add(TransactionModification.readFrom(nnin, writer));
+                }
+            } else {
+                modifications = ImmutableList.of();
+            }
+
+            return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final ModifyTransactionRequest msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+            out.writeByte(PersistenceProtocol.byteValue(msg.getPersistenceProtocol().orElse(null)));
+
+            final var modifications = msg.getModifications();
+            out.writeInt(modifications.size());
+            if (!modifications.isEmpty()) {
+                try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                    for (var op : modifications) {
+                        op.writeTo(nnout);
+                    }
+                }
+            }
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
     private final List<TransactionModification> modifications;
     private final PersistenceProtocol protocol;
 
+    private ModifyTransactionRequest(final ModifyTransactionRequest request, final ABIVersion version) {
+        super(request, version);
+        modifications = request.modifications;
+        protocol = request.protocol;
+    }
+
     ModifyTransactionRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo,
         final List<TransactionModification> modifications, final PersistenceProtocol protocol) {
         super(target, sequence, replyTo);
@@ -57,12 +107,27 @@ public final class ModifyTransactionRequest extends TransactionRequest<ModifyTra
     }
 
     @Override
-    protected ModifyTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ModifyTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new MTR(this);
     }
 
     @Override
     protected ModifyTransactionRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ModifyTransactionRequest(this, version);
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
     }
 }
index 9312e4c7ac69105690de54a8b471b452bf703133..0e99942c1ee9655bb32ac996d9462ec346cefa8d 100644 (file)
@@ -7,28 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import java.util.List;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Builder;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
 /**
- * A reusable {@link Builder} for creating {@link ModifyTransactionRequest} message instances. Its internal state is
- * reset when {@link #build()} is invoked, hence it can be used to create a sequence of messages.
- *
- * @author Robert Varga
+ * A reusable builder for creating {@link ModifyTransactionRequest} message instances. Its internal state is reset when
+ * {@link #build()} is invoked, hence it can be used to create a sequence of messages. This class is NOT thread-safe.
  */
-@Beta
-@NotThreadSafe
-public final class ModifyTransactionRequestBuilder implements Builder<ModifyTransactionRequest>,
-        Identifiable<TransactionIdentifier> {
+public final class ModifyTransactionRequestBuilder implements Identifiable<TransactionIdentifier> {
     private final List<TransactionModification> modifications = new ArrayList<>(1);
-    private final TransactionIdentifier identifier;
+    private final @NonNull TransactionIdentifier identifier;
     private final ActorRef replyTo;
 
     private PersistenceProtocol protocol;
@@ -36,8 +31,8 @@ public final class ModifyTransactionRequestBuilder implements Builder<ModifyTran
     private long sequence;
 
     public ModifyTransactionRequestBuilder(final TransactionIdentifier identifier, final ActorRef replyTo) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.replyTo = Preconditions.checkNotNull(replyTo);
+        this.identifier = requireNonNull(identifier);
+        this.replyTo = requireNonNull(replyTo);
     }
 
     @Override
@@ -46,16 +41,16 @@ public final class ModifyTransactionRequestBuilder implements Builder<ModifyTran
     }
 
     private void checkNotFinished() {
-        Preconditions.checkState(protocol == null, "Batch has already been finished");
+        checkState(protocol == null, "Batch has already been finished");
     }
 
     public void addModification(final TransactionModification modification) {
         checkNotFinished();
-        modifications.add(Preconditions.checkNotNull(modification));
+        modifications.add(requireNonNull(modification));
     }
 
     public void setSequence(final long sequence) {
-        Preconditions.checkState(!haveSequence, "Sequence has already been set");
+        checkState(!haveSequence, "Sequence has already been set");
         this.sequence = sequence;
         haveSequence = true;
     }
@@ -81,9 +76,8 @@ public final class ModifyTransactionRequestBuilder implements Builder<ModifyTran
         return modifications.size();
     }
 
-    @Override
-    public ModifyTransactionRequest build() {
-        Preconditions.checkState(haveSequence, "Request sequence has not been set");
+    public @NonNull ModifyTransactionRequest build() {
+        checkState(haveSequence, "Request sequence has not been set");
 
         final ModifyTransactionRequest ret = new ModifyTransactionRequest(identifier, sequence, replyTo, modifications,
             protocol);
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index 846756d..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataInput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-
-/**
- * Externalizable proxy for use with {@link ExistsTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionRequestProxyV1 extends AbstractTransactionRequestProxy<ModifyTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-    private List<TransactionModification> modifications;
-    private Optional<PersistenceProtocol> protocol;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ModifyTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ModifyTransactionRequestProxyV1(final ModifyTransactionRequest request) {
-        super(request);
-        this.modifications = Preconditions.checkNotNull(request.getModifications());
-        this.protocol = request.getPersistenceProtocol();
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        protocol = Optional.ofNullable(PersistenceProtocol.readFrom(in));
-
-        final int size = in.readInt();
-        if (size != 0) {
-            modifications = new ArrayList<>(size);
-            final NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(in);
-            for (int i = 0; i < size; ++i) {
-                modifications.add(TransactionModification.readFrom(nnin));
-            }
-        } else {
-            modifications = ImmutableList.of();
-        }
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        out.writeByte(PersistenceProtocol.byteValue(protocol.orElse(null)));
-        out.writeInt(modifications.size());
-        if (!modifications.isEmpty()) {
-            try (NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(out)) {
-                for (TransactionModification op : modifications) {
-                    op.writeTo(nnout);
-                }
-            }
-        }
-    }
-
-    @Override
-    protected ModifyTransactionRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new ModifyTransactionRequest(target, sequence, replyTo, modifications, protocol.orElse(null));
-    }
-}
index c4dd20d6c9a0bf660c648fa11069cae82b0a09fa..38adf787b96c64116414b8cc290ed67ecd38a2c8 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * Response to a {@link ModifyTransactionRequest} which does not have a {@link PersistenceProtocol}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ModifyTransactionSuccess extends TransactionSuccess<ModifyTransactionSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ModifyTransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
@@ -29,8 +26,8 @@ public final class ModifyTransactionSuccess extends TransactionSuccess<ModifyTra
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<ModifyTransactionSuccess> externalizableProxy(final ABIVersion version) {
-        return new ModifyTransactionSuccessProxyV1(this);
+    protected MTS externalizableProxy(final ABIVersion version) {
+        return new MTS(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ModifyTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index 0efff09..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ModifyTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ModifyTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ModifyTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ModifyTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ModifyTransactionSuccessProxyV1(final ModifyTransactionSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected ModifyTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ModifyTransactionSuccess(target, sequence);
-    }
-}
index 0864cd0cf0c2fc7495fda9ed9aadf3314dbf780a..c4353c37fd6bf74a74e4186b9968bac5b6972cc7 100644 (file)
@@ -8,17 +8,14 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * General error raised when the recipient of a Request is not the correct backend to talk to. This typically
  * means that the backend processing has moved and the frontend needs to run rediscovery and retry the request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class NotLeaderException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public NotLeaderException(final ActorRef me) {
index cd110d66b6aa708623fcca696a9daaec17872d44..0c908078ebbee07789f35dbeb335179c51837518 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a Request whose sequence does not match the
  * next expected sequence for the target. This is a hard error, as it indicates a Request is missing in the stream.
- *
- * @author Robert Varga
  */
-@Beta
 public final class OutOfOrderRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public OutOfOrderRequestException(final long expectedRequest) {
index ad3dd8d700966a56effac9c225498dec38b1fe57..b39e09a6a45ac570148b824d67da02c7f984f31c 100644 (file)
@@ -7,18 +7,15 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a RequestEnvelope whose sequence does not match
  * the next expected sequence. This can happen during leader transitions, when a part of the stream is rejected because
  * the backend is not the leader and it transitions to being a leader with old stream messages still being present.
- *
- * @author Robert Varga
  */
-@Beta
 public final class OutOfSequenceEnvelopeException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public OutOfSequenceEnvelopeException(final long expectedEnvelope) {
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PHR.java
new file mode 100644 (file)
index 0000000..e2b3959
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class PHR implements PurgeLocalHistoryRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private PurgeLocalHistoryRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PHR() {
+        // for Externalizable
+    }
+
+    PHR(final PurgeLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public PurgeLocalHistoryRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final PurgeLocalHistoryRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index be58b05b1fc87f3f3cdceb6f9ef6271dd210fbd8..82fca03087c6ac794caa188cd2928c989015af71 100644 (file)
@@ -7,19 +7,16 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.yangtools.concepts.WritableObject;
 
 /**
  * Enumeration of transaction persistence protocols. These govern which protocol is executed between the frontend
  * and backend to drive persistence of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public enum PersistenceProtocol implements WritableObject {
     /**
      * Abort protocol. The transaction has been aborted on the frontend and its effects should not be visible
@@ -77,20 +74,14 @@ public enum PersistenceProtocol implements WritableObject {
         return finish == null ? 0 : finish.byteValue();
     }
 
-    static PersistenceProtocol valueOf(final byte value) {
-        switch (value) {
-            case 0:
-                return null;
-            case 1:
-                return ABORT;
-            case 2:
-                return SIMPLE;
-            case 3:
-                return THREE_PHASE;
-            case 4:
-                return READY;
-            default:
-                throw new IllegalArgumentException("Unhandled byte value " + value);
-        }
+    static @Nullable PersistenceProtocol valueOf(final byte value) {
+        return switch (value) {
+            case 0 -> null;
+            case 1 -> ABORT;
+            case 2 -> SIMPLE;
+            case 3 -> THREE_PHASE;
+            case 4 -> READY;
+            default -> throw new IllegalArgumentException("Unhandled byte value " + value);
+        };
     }
 }
index ecbd749dd1f8ada03f0f8a1b83beabc0ba0ef7a8..c9dc5dc1e7c3a8343c99eae7918b76b9e80670e9 100644 (file)
@@ -8,18 +8,24 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 
 /**
  * Request to purge a local history. This request is sent by the client once it receives a successful reply to
  * {@link DestroyLocalHistoryRequest} and indicates it has removed all state attached to a particular local history.
- *
- * @author Robert Varga
  */
-@Beta
 public final class PurgeLocalHistoryRequest extends LocalHistoryRequest<PurgeLocalHistoryRequest> {
+    interface SerialForm extends LocalHistoryRequest.SerialForm<PurgeLocalHistoryRequest> {
+        @Override
+        default PurgeLocalHistoryRequest readExternal(final ObjectInput in, final LocalHistoryIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new PurgeLocalHistoryRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public PurgeLocalHistoryRequest(final LocalHistoryIdentifier target, final long sequence, final ActorRef replyTo) {
@@ -31,8 +37,8 @@ public final class PurgeLocalHistoryRequest extends LocalHistoryRequest<PurgeLoc
     }
 
     @Override
-    protected AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> externalizableProxy(final ABIVersion version) {
-        return new PurgeLocalHistoryRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new PHR(this);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/PurgeLocalHistoryRequestProxyV1.java
deleted file mode 100644 (file)
index 11c344f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-
-/**
- * Externalizable proxy for use with {@link PurgeLocalHistoryRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class PurgeLocalHistoryRequestProxyV1 extends AbstractLocalHistoryRequestProxy<PurgeLocalHistoryRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public PurgeLocalHistoryRequestProxyV1() {
-        // For Externalizable
-    }
-
-    PurgeLocalHistoryRequestProxyV1(final PurgeLocalHistoryRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected PurgeLocalHistoryRequest createRequest(final LocalHistoryIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new PurgeLocalHistoryRequest(target, sequence, replyTo);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTR.java
new file mode 100644 (file)
index 0000000..e342c18
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTR implements ReadTransactionRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ReadTransactionRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RTR() {
+        // for Externalizable
+    }
+
+    RTR(final ReadTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ReadTransactionRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/RTS.java
new file mode 100644 (file)
index 0000000..2c80834
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+
+/**
+ * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class RTS implements TransactionSuccess.SerialForm<ReadTransactionSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ReadTransactionSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RTS() {
+        // for Externalizable
+    }
+
+    RTS(final ReadTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final ReadTransactionSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public ReadTransactionSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        final Optional<NormalizedNode> data;
+        if (in.readBoolean()) {
+            data = Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
+        } else {
+            data = Optional.empty();
+        }
+        return new ReadTransactionSuccess(target, sequence, data);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out, final ReadTransactionSuccess msg) throws IOException {
+        TransactionSuccess.SerialForm.super.writeExternal(out, msg);
+
+        final var data = msg.getData();
+        if (data.isPresent()) {
+            out.writeBoolean(true);
+            try (var nnout = msg.getVersion().getStreamVersion().newDataOutput(out)) {
+                nnout.writeNormalizedNode(data.orElseThrow());
+            }
+        } else {
+            out.writeBoolean(false);
+        }
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index 92caa59b7acdeb641f4f655463e20876ba87ec2d..292496b7b43667f217177f4aa253f16206c039a9 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectInput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,11 +17,18 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * A transaction request to read a particular path exists in the current view of a particular transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ReadTransactionRequest extends AbstractReadPathTransactionRequest<ReadTransactionRequest> {
+    interface SerialForm extends AbstractReadPathTransactionRequest.SerialForm<ReadTransactionRequest> {
+        @Override
+        default ReadTransactionRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence, final ActorRef replyTo, final boolean snapshotOnly, final YangInstanceIdentifier path)
+                throws IOException {
+            return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public ReadTransactionRequest(final @NonNull TransactionIdentifier identifier, final long sequence,
@@ -38,7 +46,7 @@ public final class ReadTransactionRequest extends AbstractReadPathTransactionReq
     }
 
     @Override
-    protected ReadTransactionRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new ReadTransactionRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new RTR(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionRequestProxyV1.java
deleted file mode 100644 (file)
index a83b6bc..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionRequestProxyV1 extends AbstractReadPathTransactionRequestProxyV1<ReadTransactionRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ReadTransactionRequestProxyV1() {
-        // For Externalizable
-    }
-
-    ReadTransactionRequestProxyV1(final ReadTransactionRequest request) {
-        super(request);
-    }
-
-    @Override
-    ReadTransactionRequest createReadPathRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo, final YangInstanceIdentifier path, final boolean snapshotOnly) {
-        return new ReadTransactionRequest(target, sequence, replyTo, path, snapshotOnly);
-    }
-}
index 8fe4b0bb0ab8ffe1152401522c558ee8908de691..a03766e9da32d88c7ad3950449ebe3d37b991d6b 100644 (file)
@@ -7,9 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.SliceableMessage;
@@ -19,33 +22,51 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 /**
  * Successful reply to an {@link ReadTransactionRequest}. It indicates presence of requested data via
  * {@link #getData()}.
- *
- * @author Robert Varga
  */
-@Beta
-@SuppressFBWarnings("SE_BAD_FIELD")
 public final class ReadTransactionSuccess extends TransactionSuccess<ReadTransactionSuccess>
         implements SliceableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final Optional<NormalizedNode<?, ?>> data;
+
+    private final Optional<NormalizedNode> data;
+
+    private ReadTransactionSuccess(final ReadTransactionSuccess request, final ABIVersion version) {
+        super(request, version);
+        data = request.data;
+    }
 
     public ReadTransactionSuccess(final TransactionIdentifier identifier, final long sequence,
-            final Optional<NormalizedNode<?, ?>> data) {
+            final Optional<NormalizedNode> data) {
         super(identifier, sequence);
-        this.data = Preconditions.checkNotNull(data);
+        this.data = requireNonNull(data);
     }
 
-    public Optional<NormalizedNode<?, ?>> getData() {
+    public Optional<NormalizedNode> getData() {
         return data;
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<ReadTransactionSuccess> externalizableProxy(final ABIVersion version) {
-        return new ReadTransactionSuccessProxyV1(this);
+    protected RTS externalizableProxy(final ABIVersion version) {
+        return new RTS(this);
     }
 
     @Override
     protected ReadTransactionSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new ReadTransactionSuccess(this, version);
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/ReadTransactionSuccessProxyV1.java
deleted file mode 100644 (file)
index 24a305b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Externalizable proxy for use with {@link ReadTransactionSuccess}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class ReadTransactionSuccessProxyV1 extends AbstractTransactionSuccessProxy<ReadTransactionSuccess> {
-    private static final long serialVersionUID = 1L;
-    private Optional<NormalizedNode<?, ?>> data;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public ReadTransactionSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    ReadTransactionSuccessProxyV1(final ReadTransactionSuccess request) {
-        super(request);
-        this.data = request.getData();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-
-        if (data.isPresent()) {
-            out.writeBoolean(true);
-            try (NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(out)) {
-                nnout.writeNormalizedNode(data.get());
-            }
-        } else {
-            out.writeBoolean(false);
-        }
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-
-        if (in.readBoolean()) {
-            data = Optional.of(NormalizedNodeInputOutput.newDataInput(in).readNormalizedNode());
-        } else {
-            data = Optional.empty();
-        }
-    }
-
-    @Override
-    protected ReadTransactionSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new ReadTransactionSuccess(target, sequence, data);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STR.java
new file mode 100644 (file)
index 0000000..aa529ea
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STR implements SkipTransactionsRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SkipTransactionsRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public STR() {
+        // for Externalizable
+    }
+
+    STR(final SkipTransactionsRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final SkipTransactionsRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/STS.java
new file mode 100644 (file)
index 0000000..5489709
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link SkipTransactionsResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class STS implements TransactionSuccess.SerialForm<SkipTransactionsResponse> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SkipTransactionsResponse message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public STS() {
+        // for Externalizable
+    }
+
+    STS(final SkipTransactionsResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsResponse message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final SkipTransactionsResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public SkipTransactionsResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new SkipTransactionsResponse(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequest.java
new file mode 100644 (file)
index 0000000..a2c037f
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import akka.actor.ActorRef;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.collect.ImmutableList;
+import com.google.common.primitives.UnsignedLong;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Collection;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Request to skip a number of {@link TransactionIdentifier}s within a {code local history}. This request is essentially
+ * equivalent to {@link TransactionPurgeRequest} for {@link #getTarget()}, but also carries additional sibling
+ * {@link TransactionIdentifier}s in {@link #getOthers()}.
+ *
+ * <p>
+ * This request is sent by the frontend to inform the backend that a set of {@link TransactionIdentifier}s are
+ * explicitly retired and are guaranteed to never be used by the frontend.
+ */
+public final class SkipTransactionsRequest extends TransactionRequest<SkipTransactionsRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<SkipTransactionsRequest> {
+        @Override
+        default SkipTransactionsRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) throws IOException {
+            final int size = in.readInt();
+            final var builder = ImmutableList.<UnsignedLong>builderWithExpectedSize(size);
+            int idx;
+            if (size % 2 != 0) {
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readLong(in)));
+                idx = 1;
+            } else {
+                idx = 0;
+            }
+            for (; idx < size; idx += 2) {
+                final byte hdr = WritableObjects.readLongHeader(in);
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, hdr)));
+                builder.add(UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, hdr)));
+            }
+
+            return new SkipTransactionsRequest(target, sequence, replyTo, builder.build());
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final SkipTransactionsRequest msg) throws IOException {
+            TransactionRequest.SerialForm.super.writeExternal(out, msg);
+
+            final var others = msg.others;
+            final int size = others.size();
+            out.writeInt(size);
+
+            int idx;
+            if (size % 2 != 0) {
+                WritableObjects.writeLong(out, others.get(0).longValue());
+                idx = 1;
+            } else {
+                idx = 0;
+            }
+            for (; idx < size; idx += 2) {
+                WritableObjects.writeLongs(out, others.get(idx).longValue(), others.get(idx + 1).longValue());
+            }
+        }
+    }
+
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Note: UnsignedLong is arbitrary, yang.common.Uint64 would work just as well, we really want an immutable
+    //       List<long>, though.
+    private final @NonNull ImmutableList<UnsignedLong> others;
+
+    public SkipTransactionsRequest(final TransactionIdentifier target, final long sequence,
+            final ActorRef replyTo, final Collection<UnsignedLong> others) {
+        super(target, sequence, replyTo);
+        this.others = ImmutableList.copyOf(others);
+    }
+
+    private SkipTransactionsRequest(final SkipTransactionsRequest request, final ABIVersion version) {
+        super(request, version);
+        others = request.others;
+    }
+
+    /**
+     * Return this {@link #getTarget()}s sibling {@link TransactionIdentifier}s.
+     *
+     * @return Siblings values of {@link TransactionIdentifier#getTransactionId()}
+     */
+    public List<UnsignedLong> getOthers() {
+        return others;
+    }
+
+    @Override
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new STR(this);
+    }
+
+    @Override
+    protected SkipTransactionsRequest cloneAsVersion(final ABIVersion version) {
+        return new SkipTransactionsRequest(this, version);
+    }
+
+    @Override
+    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
+        final var helper = super.addToStringAttributes(toStringHelper);
+        if (!others.isEmpty()) {
+            helper.add("others", others);
+        }
+        return helper;
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponse.java
new file mode 100644 (file)
index 0000000..9f3d54d
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import org.opendaylight.controller.cluster.access.ABIVersion;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Successful reply to a {@link SkipTransactionsRequest}.
+ */
+// FIXME: rename to SkipTransactionsSuccess
+public final class SkipTransactionsResponse extends TransactionSuccess<SkipTransactionsResponse> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SkipTransactionsResponse(final SkipTransactionsResponse success, final ABIVersion version) {
+        super(success, version);
+    }
+
+    public SkipTransactionsResponse(final TransactionIdentifier identifier, final long sequence) {
+        super(identifier, sequence);
+    }
+
+    @Override
+    protected STS externalizableProxy(final ABIVersion version) {
+        return new STS(this);
+    }
+
+    @Override
+    protected SkipTransactionsResponse cloneAsVersion(final ABIVersion version) {
+        return new SkipTransactionsResponse(this, version);
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAR.java
new file mode 100644 (file)
index 0000000..98f63b7
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAR implements TransactionAbortRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionAbortRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TAR() {
+        // for Externalizable
+    }
+
+    TAR(final TransactionAbortRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionAbortRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TAS.java
new file mode 100644 (file)
index 0000000..daeee07
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TAS implements TransactionSuccess.SerialForm<TransactionAbortSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionAbortSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TAS() {
+        // for Externalizable
+    }
+
+    TAS(final TransactionAbortSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionAbortSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionAbortSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionAbortSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCCS.java
new file mode 100644 (file)
index 0000000..22a8a84
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCCS implements TransactionSuccess.SerialForm<TransactionCanCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionCanCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TCCS() {
+        // for Externalizable
+    }
+
+    TCCS(final TransactionCanCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCanCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionCanCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCanCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionCanCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TCS.java
new file mode 100644 (file)
index 0000000..7f897d8
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TCS implements TransactionSuccess.SerialForm<TransactionCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TCS() {
+        // for Externalizable
+    }
+
+    TCS(final TransactionCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TDCR.java
new file mode 100644 (file)
index 0000000..01c2733
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TDCR implements TransactionDoCommitRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionDoCommitRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TDCR() {
+        // for Externalizable
+    }
+
+    TDCR(final TransactionDoCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionDoCommitRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionDoCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TF.java
new file mode 100644 (file)
index 0000000..6e26fc3
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionFailure}. It implements the Chlorine SR2 serialization format.
+ */
+final class TF implements TransactionFailure.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionFailure message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TF() {
+        // for Externalizable
+    }
+
+    TF(final TransactionFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionFailure message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionFailure message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCR.java
new file mode 100644 (file)
index 0000000..0bf4ae5
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCR implements TransactionPreCommitRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPreCommitRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPCR() {
+        // for Externalizable
+    }
+
+    TPCR(final TransactionPreCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPreCommitRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPCS.java
new file mode 100644 (file)
index 0000000..a64efa1
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPCS implements TransactionSuccess.SerialForm<TransactionPreCommitSuccess> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPreCommitSuccess message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPCS() {
+        // for Externalizable
+    }
+
+    TPCS(final TransactionPreCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitSuccess message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPreCommitSuccess message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPreCommitSuccess readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) throws IOException {
+        return new TransactionPreCommitSuccess(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPR.java
new file mode 100644 (file)
index 0000000..a80e1f6
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPR implements TransactionPurgeRequest.SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPurgeRequest message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPR() {
+        // for Externalizable
+    }
+
+    TPR(final TransactionPurgeRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeRequest message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPurgeRequest message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TPS.java
new file mode 100644 (file)
index 0000000..1b2f94a
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+
+/**
+ * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the Chlorine SR2 serialization
+ * format.
+ */
+final class TPS implements TransactionSuccess.SerialForm<TransactionPurgeResponse> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionPurgeResponse message;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TPS() {
+        // for Externalizable
+    }
+
+    TPS(final TransactionPurgeResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeResponse message() {
+        return verifyNotNull(message);
+    }
+
+    @Override
+    public void setMessage(final TransactionPurgeResponse message) {
+        this.message = requireNonNull(message);
+    }
+
+    @Override
+    public TransactionPurgeResponse readExternal(final ObjectInput in, final TransactionIdentifier target,
+            final long sequence) {
+        return new TransactionPurgeResponse(target, sequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return message();
+    }
+}
index b8499cc2a25dacaccd126c568d37ef2aedfbfcee..c9238ab9af80bcac87185aab1082e33bd8078290 100644 (file)
@@ -8,30 +8,40 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the abort step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionAbortRequest extends TransactionRequest<TransactionAbortRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionAbortRequest> {
+        @Override
+        default TransactionAbortRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionAbortRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionAbortRequest(final TransactionAbortRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionAbortRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionAbortRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionAbortRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TAR(this);
     }
 
     @Override
     protected TransactionAbortRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionAbortRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortRequestProxyV1.java
deleted file mode 100644 (file)
index 3e67dfe..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionAbortRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionAbortRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionAbortRequestProxyV1(final TransactionAbortRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionAbortRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionAbortRequest(target, sequence, replyTo);
-    }
-}
index 69c6dddd8f9c49f84801cbb0f938c71b256ec8d3..db92890b1b94df332a36dcd70e1a44d1ded39b16 100644 (file)
@@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionAbortSuccess extends TransactionSuccess<TransactionAbortSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionAbortSuccess(final TransactionAbortSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionAbortSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionAbortSuccess> externalizableProxy(final ABIVersion version) {
-        return new TransactionAbortSuccessProxyV1(this);
+    protected TAS externalizableProxy(final ABIVersion version) {
+        return new TAS(this);
     }
 
     @Override
     protected TransactionAbortSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionAbortSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionAbortSuccessProxyV1.java
deleted file mode 100644 (file)
index c9de3b9..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionAbortSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionAbortSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionAbortSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionAbortSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionAbortSuccessProxyV1(final TransactionAbortSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionAbortSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionAbortSuccess(target, sequence);
-    }
-}
index e6149bd5da226529e746a07e09314fde37759346..55c5cdb2d61641d3cc2b3224c04a88feccde7708 100644 (file)
@@ -17,20 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionCanCommitSuccess extends TransactionSuccess<TransactionCanCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionCanCommitSuccess(final TransactionCanCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionCanCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionCanCommitSuccessProxyV1(this);
+    protected TCCS externalizableProxy(final ABIVersion version) {
+        return new TCCS(this);
     }
 
     @Override
     protected TransactionCanCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionCanCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCanCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index b41ec29..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCanCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCanCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCanCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionCanCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionCanCommitSuccessProxyV1(final TransactionCanCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-    }
-
-    @Override
-    protected TransactionCanCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionCanCommitSuccess(target, sequence);
-    }
-}
index 6b28244484e3eb739881696429e9119db14dc47e..1fc06da9c988685027ffb7ae18515e805f4a2b6d 100644 (file)
@@ -17,19 +17,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionCommitSuccess extends TransactionSuccess<TransactionCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionCommitSuccess(final TransactionCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionCommitSuccess> externalizableProxy(final ABIVersion version) {
-        return new TransactionCommitSuccessProxyV1(this);
+    protected TCS externalizableProxy(final ABIVersion version) {
+        return new TCS(this);
     }
 
     @Override
     protected TransactionCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index f23da7b..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionCommitSuccessProxyV1(final TransactionCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionCommitSuccess(target, sequence);
-    }
-}
index 72520c0aeca0b0c5c5f3f1972767f07b8863384e..d6eb297bb043b55d4764b5a30e81eb68ac280609 100644 (file)
@@ -7,28 +7,25 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.IOException;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 
 /**
  * A {@link TransactionModification} which has a data component.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class TransactionDataModification extends TransactionModification {
-    private final NormalizedNode<?, ?> data;
+    private final NormalizedNode data;
 
-    TransactionDataModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    TransactionDataModification(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path);
-        this.data = Preconditions.checkNotNull(data);
+        this.data = requireNonNull(data);
     }
 
-    public final NormalizedNode<?, ?> getData() {
+    public final NormalizedNode getData() {
         return data;
     }
 
index 62acdbbb76ba627a2fd970b0cdeb72214a72d2db..00c9bd8259fb3eaf22467becd632f990d1883f88 100644 (file)
@@ -7,15 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
  * Delete a particular path.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionDelete extends TransactionModification {
     public TransactionDelete(final YangInstanceIdentifier path) {
         super(path);
index 955c2680086db7553969857e0656238e37c179f8..ad7ffad146a0f5a05215cd21a3af0b9655acd1f8 100644 (file)
@@ -8,30 +8,40 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the final, doCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionDoCommitRequest extends TransactionRequest<TransactionDoCommitRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionDoCommitRequest> {
+        @Override
+        default TransactionDoCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionDoCommitRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionDoCommitRequest(final TransactionDoCommitRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionDoCommitRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionDoCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionDoCommitRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TDCR(this);
     }
 
     @Override
     protected TransactionDoCommitRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionDoCommitRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionDoCommitRequestProxyV1.java
deleted file mode 100644 (file)
index fcb63fd..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionDoCommitRequest}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionDoCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionDoCommitRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionDoCommitRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionDoCommitRequestProxyV1(final TransactionDoCommitRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionDoCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionDoCommitRequest(target, sequence, replyTo);
-    }
-}
index e0b6a5998795c271453d761edb158700a8e9a0b6..288a90ee3c3d89e8e75c1db17b4285d6f2e7655f 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
@@ -15,24 +16,39 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 /**
  * Generic {@link RequestFailure} involving a {@link TransactionRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionFailure extends RequestFailure<TransactionIdentifier, TransactionFailure> {
+    interface SerialForm extends RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+
+        @Override
+        default TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
+                final RequestException cause) {
+            return new TransactionFailure(target, sequence, cause);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionFailure(final TransactionFailure failure, final ABIVersion version) {
+        super(failure, version);
+    }
+
     TransactionFailure(final TransactionIdentifier target, final long sequence, final RequestException cause) {
         super(target, sequence, cause);
     }
 
     @Override
     protected TransactionFailure cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionFailure(this, version);
     }
 
     @Override
-    protected TransactionFailureProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionFailureProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TF(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionFailureProxyV1.java
deleted file mode 100644 (file)
index d3b1dd7..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionFailure}. It implements the initial (Boron) serialization
- * format.
- *
- * @author Robert Varga
- */
-final class TransactionFailureProxyV1 extends AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionFailureProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionFailureProxyV1(final TransactionFailure failure) {
-        super(failure);
-    }
-
-    @Override
-    protected TransactionFailure createFailure(final TransactionIdentifier target, final long sequence,
-            final RequestException cause) {
-        return new TransactionFailure(target, sequence, cause);
-    }
-
-    @Override
-    protected TransactionIdentifier readTarget(final DataInput in) throws IOException {
-        return TransactionIdentifier.readFrom(in);
-    }
-}
index 1d9bffd346c95bcb0deee73ee72f32e88c96ed8a..2784687d288e19f1e4a74ddc3e01a0c5f907a954 100644 (file)
@@ -7,18 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
  * Merge a {@link NormalizedNode} tree onto a specific path.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionMerge extends TransactionDataModification {
-    public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    public TransactionMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path, data);
     }
 
index d71142201c2c25807e73fcd1e22128d16f5e7ca3..96bea87d46764c808d5a4cf50fd2dced686f66f6 100644 (file)
@@ -7,23 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import java.io.IOException;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataInput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
 
 /**
  * An individual modification of a transaction's state. This class and its subclasses are not serializable, but rather
- * expose {@link #writeTo(NormalizedNodeDataOutput)} and {@link #readFrom(NormalizedNodeDataInput)} methods for explicit
- * serialization. The reason for this is that they are usually transmitted in bulk, hence it is advantageous to reuse
+ * expose {@link #writeTo(NormalizedNodeDataOutput)} and
+ * {@link #readFrom(NormalizedNodeDataInput, ReusableStreamReceiver)} methods for explicit serialization. The reason for
+ * this is that they are usually transmitted in bulk, hence it is advantageous to reuse
  * a {@link NormalizedNodeDataOutput} instance to achieve better compression.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class TransactionModification {
     static final byte TYPE_DELETE = 1;
     static final byte TYPE_MERGE = 2;
@@ -32,7 +31,7 @@ public abstract class TransactionModification {
     private final YangInstanceIdentifier path;
 
     TransactionModification(final YangInstanceIdentifier path) {
-        this.path = Preconditions.checkNotNull(path);
+        this.path = requireNonNull(path);
     }
 
     public final YangInstanceIdentifier getPath() {
@@ -51,17 +50,14 @@ public abstract class TransactionModification {
         out.writeYangInstanceIdentifier(path);
     }
 
-    static TransactionModification readFrom(final NormalizedNodeDataInput in) throws IOException {
+    static TransactionModification readFrom(final NormalizedNodeDataInput in, final ReusableStreamReceiver writer)
+            throws IOException {
         final byte type = in.readByte();
-        switch (type) {
-            case TYPE_DELETE:
-                return new TransactionDelete(in.readYangInstanceIdentifier());
-            case TYPE_MERGE:
-                return new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode());
-            case TYPE_WRITE:
-                return new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode());
-            default:
-                throw new IllegalArgumentException("Unhandled type " + type);
-        }
+        return switch (type) {
+            case TYPE_DELETE -> new TransactionDelete(in.readYangInstanceIdentifier());
+            case TYPE_MERGE -> new TransactionMerge(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+            case TYPE_WRITE -> new TransactionWrite(in.readYangInstanceIdentifier(), in.readNormalizedNode(writer));
+            default -> throw new IllegalArgumentException("Unhandled type " + type);
+        };
     }
 }
index 226557d12436fca9b8cce808f2b82907559741b8..3172842f7667551502f8248ca30374081895a562 100644 (file)
@@ -8,31 +8,41 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
 /**
  * A transaction request to perform the second, preCommit, step of the three-phase commit protocol.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionPreCommitRequest extends TransactionRequest<TransactionPreCommitRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionPreCommitRequest> {
+        @Override
+        default TransactionPreCommitRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionPreCommitRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPreCommitRequest(final TransactionPreCommitRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionPreCommitRequest(final TransactionIdentifier target, final long sequence,
             final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionPreCommitRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionPreCommitRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TPCR(this);
     }
 
     @Override
     protected TransactionPreCommitRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPreCommitRequest(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitRequestProxyV1.java
deleted file mode 100644 (file)
index bf044e1..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPreCommitRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPreCommitRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPreCommitRequestProxyV1(final TransactionPreCommitRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionPreCommitRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionPreCommitRequest(target, sequence, replyTo);
-    }
-}
index 716f37804cc0b88f7fd0efdba345fc8de381e086..695d2615120a3612eb3161575828bd053c1b941f 100644 (file)
@@ -16,20 +16,24 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * @author Robert Varga
  */
 public final class TransactionPreCommitSuccess extends TransactionSuccess<TransactionPreCommitSuccess> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPreCommitSuccess(final TransactionPreCommitSuccess success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionPreCommitSuccess(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionPreCommitSuccessProxyV1(this);
+    protected TPCS externalizableProxy(final ABIVersion version) {
+        return new TPCS(this);
     }
 
     @Override
     protected TransactionPreCommitSuccess cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPreCommitSuccess(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPreCommitSuccessProxyV1.java
deleted file mode 100644 (file)
index 17b1b8d..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPreCommitSuccess}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPreCommitSuccessProxyV1 extends AbstractTransactionSuccessProxy<TransactionPreCommitSuccess> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPreCommitSuccessProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPreCommitSuccessProxyV1(final TransactionPreCommitSuccess success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionPreCommitSuccess createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionPreCommitSuccess(target, sequence);
-    }
-}
index a0fab70188f1a29f0f8e019da85e8ac0a61d1d51..757c8134a24d2dda1c8cfccc07f7b6cc119290b1 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.ObjectInput;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -16,24 +16,34 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * A transaction request to perform the final transaction transition, which is purging it from the protocol view,
  * meaning the frontend has no further knowledge of the transaction. The backend is free to purge any state related
  * to the transaction and responds with a {@link TransactionPurgeResponse}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionPurgeRequest extends TransactionRequest<TransactionPurgeRequest> {
+    interface SerialForm extends TransactionRequest.SerialForm<TransactionPurgeRequest> {
+        @Override
+        default TransactionPurgeRequest readExternal(final ObjectInput in, final TransactionIdentifier target,
+                final long sequence, final ActorRef replyTo) {
+            return new TransactionPurgeRequest(target, sequence, replyTo);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPurgeRequest(final TransactionPurgeRequest request, final ABIVersion version) {
+        super(request, version);
+    }
+
     public TransactionPurgeRequest(final TransactionIdentifier target, final long sequence, final ActorRef replyTo) {
         super(target, sequence, replyTo);
     }
 
     @Override
-    protected TransactionPurgeRequestProxyV1 externalizableProxy(final ABIVersion version) {
-        return new TransactionPurgeRequestProxyV1(this);
+    protected SerialForm externalizableProxy(final ABIVersion version) {
+        return new TPR(this);
     }
 
     @Override
     protected TransactionPurgeRequest cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPurgeRequest(this, version);
     }
 }
\ No newline at end of file
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeRequestProxyV1.java
deleted file mode 100644 (file)
index ee56b4c..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeRequest}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeRequestProxyV1 extends AbstractTransactionRequestProxy<TransactionPurgeRequest> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPurgeRequestProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPurgeRequestProxyV1(final TransactionPurgeRequest request) {
-        super(request);
-    }
-
-    @Override
-    protected TransactionPurgeRequest createRequest(final TransactionIdentifier target, final long sequence,
-            final ActorRef replyTo) {
-        return new TransactionPurgeRequest(target, sequence, replyTo);
-    }
-}
index 54710143bcdd8633fa030c9f1b0040abffcb3fdd..558e414d12991d5f31379193668d86de3f2d1da8 100644 (file)
@@ -12,24 +12,27 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 /**
  * Successful reply to a {@link TransactionPurgeRequest}.
- *
- * @author Robert Varga
  */
+// FIXME: rename to TransactionPurgeSuccess
 public final class TransactionPurgeResponse extends TransactionSuccess<TransactionPurgeResponse> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
+    private TransactionPurgeResponse(final TransactionPurgeResponse success, final ABIVersion version) {
+        super(success, version);
+    }
+
     public TransactionPurgeResponse(final TransactionIdentifier identifier, final long sequence) {
         super(identifier, sequence);
     }
 
     @Override
-    protected AbstractTransactionSuccessProxy<TransactionPurgeResponse> externalizableProxy(
-            final ABIVersion version) {
-        return new TransactionPurgeResponseProxyV1(this);
+    protected TPS externalizableProxy(final ABIVersion version) {
+        return new TPS(this);
     }
 
     @Override
     protected TransactionPurgeResponse cloneAsVersion(final ABIVersion version) {
-        return this;
+        return new TransactionPurgeResponse(this, version);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/commands/TransactionPurgeResponseProxyV1.java
deleted file mode 100644 (file)
index d15d729..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.commands;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * Externalizable proxy for use with {@link TransactionPurgeResponse}. It implements the initial (Boron)
- * serialization format.
- *
- * @author Robert Varga
- */
-final class TransactionPurgeResponseProxyV1 extends AbstractTransactionSuccessProxy<TransactionPurgeResponse> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public TransactionPurgeResponseProxyV1() {
-        // For Externalizable
-    }
-
-    TransactionPurgeResponseProxyV1(final TransactionPurgeResponse success) {
-        super(success);
-    }
-
-    @Override
-    protected TransactionPurgeResponse createSuccess(final TransactionIdentifier target, final long sequence) {
-        return new TransactionPurgeResponse(target, sequence);
-    }
-}
index 4dcf6ea93c6451d5b0b96c3e3d24a29b9bbfe82f..15d98f91507990ddbcd499d28934a4330f93fb9d 100644 (file)
@@ -8,7 +8,8 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -18,12 +19,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Abstract base class for {@link Request}s involving specific transaction. This class is visible outside of this
  * package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class TransactionRequest<T extends TransactionRequest<T>> extends Request<TransactionIdentifier, T> {
+    protected interface SerialForm<T extends TransactionRequest<T>>
+            extends Request.SerialForm<TransactionIdentifier, T> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     TransactionRequest(final TransactionIdentifier identifier, final long sequence, final ActorRef replyTo) {
@@ -40,5 +47,5 @@ public abstract class TransactionRequest<T extends TransactionRequest<T>> extend
     }
 
     @Override
-    protected abstract AbstractTransactionRequestProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index 636a2e741bd42f8fcd5471c92a794a21ae3b2528..689b4d5ee1443faadc57d93fe1f3d3f8ad8585fc 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
+import java.io.DataInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
@@ -16,13 +17,18 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  * Abstract base class for {@link RequestSuccess}es involving specific transaction. This class is visible outside of
  * this package solely for the ability to perform a unified instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Message type
  */
-@Beta
 public abstract class TransactionSuccess<T extends TransactionSuccess<T>>
         extends RequestSuccess<TransactionIdentifier, T> {
+    interface SerialForm<T extends TransactionSuccess<T>> extends RequestSuccess.SerialForm<TransactionIdentifier, T> {
+        @Override
+        default TransactionIdentifier readTarget(final DataInput in) throws IOException {
+            return TransactionIdentifier.readFrom(in);
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     TransactionSuccess(final TransactionIdentifier identifier, final long sequence) {
@@ -34,5 +40,5 @@ public abstract class TransactionSuccess<T extends TransactionSuccess<T>>
     }
 
     @Override
-    protected abstract AbstractTransactionSuccessProxy<T> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T> externalizableProxy(ABIVersion version);
 }
index b46cf38882b78548babf569e03afc184c268c6f7..af1acbe57ed25ea2dfbe36067bf14aad5145f7b9 100644 (file)
@@ -7,18 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
  * Modification to write (and replace) a subtree at specified path with another subtree.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionWrite extends TransactionDataModification {
-    public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    public TransactionWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path, data);
     }
 
index 196c60c0d82951068c46a9d3edc225a6ac622002..c688df3c90f5ede0d33791a7686706bdccb5efb2 100644 (file)
@@ -7,17 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 /**
  * A {@link RequestException} indicating that the backend has received a request referencing an unknown history. This
  * typically happens when the linear history ID is newer than the highest observed {@link CreateLocalHistoryRequest}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class UnknownHistoryException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public UnknownHistoryException(final Long lastSeenHistory) {
@@ -25,7 +22,7 @@ public final class UnknownHistoryException extends RequestException {
     }
 
     private static String historyToString(final Long history) {
-        return history == null ? "null" : Long.toUnsignedString(history.longValue());
+        return history == null ? "null" : Long.toUnsignedString(history);
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractEnvelopeProxy.java
deleted file mode 100644 (file)
index 71a731a..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractEnvelopeProxy<T extends Message<?, ?>> implements Externalizable {
-    private static final long serialVersionUID = 1L;
-
-    private T message;
-    private long sessionId;
-    private long txSequence;
-
-    AbstractEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    AbstractEnvelopeProxy(final Envelope<T> envelope) {
-        message = envelope.getMessage();
-        txSequence = envelope.getTxSequence();
-        sessionId = envelope.getSessionId();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        WritableObjects.writeLongs(out, sessionId, txSequence);
-        out.writeObject(message);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        final byte header = WritableObjects.readLongHeader(in);
-        sessionId = WritableObjects.readFirstLong(in, header);
-        txSequence = WritableObjects.readSecondLong(in, header);
-        message = (T) in.readObject();
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract Envelope<T> createEnvelope(T wrappedNessage, long sessionId, long txSequence);
-
-    final Object readResolve() {
-        return createEnvelope(message, sessionId, txSequence);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractMessageProxy.java
deleted file mode 100644 (file)
index 0367527..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import static com.google.common.base.Verify.verifyNotNull;
-
-import java.io.DataInput;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-/**
- * Abstract Externalizable proxy for use with {@link Message} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractMessageProxy<T extends WritableIdentifier, C extends Message<T, C>> implements Externalizable {
-    private static final long serialVersionUID = 1L;
-    private T target;
-    private long sequence;
-
-    protected AbstractMessageProxy() {
-        // For Externalizable
-    }
-
-    AbstractMessageProxy(final @NonNull C message) {
-        this.target = message.getTarget();
-        this.sequence = message.getSequence();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        target.writeTo(out);
-        WritableObjects.writeLong(out, sequence);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        target = verifyNotNull(readTarget(in));
-        sequence = WritableObjects.readLong(in);
-    }
-
-    protected final Object readResolve() {
-        return verifyNotNull(createMessage(target, sequence));
-    }
-
-    protected abstract @NonNull T readTarget(@NonNull DataInput in) throws IOException;
-
-    abstract @NonNull C createMessage(@NonNull T msgTarget, long msgSequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestFailureProxy.java
deleted file mode 100644 (file)
index e35936d..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestFailure} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestFailureProxy<T extends WritableIdentifier, C extends RequestFailure<T, C>>
-        extends AbstractResponseProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-    private RequestException cause;
-
-    protected AbstractRequestFailureProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractRequestFailureProxy(final @NonNull C failure) {
-        super(failure);
-        this.cause = failure.getCause();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeObject(cause);
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        cause = (RequestException) in.readObject();
-    }
-
-    @Override
-    final C createResponse(final T target, final long sequence) {
-        return createFailure(target, sequence, cause);
-    }
-
-    protected abstract @NonNull C createFailure(@NonNull T target, long sequence,
-            @NonNull RequestException failureCause);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractRequestProxy.java
deleted file mode 100644 (file)
index 183766f..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import akka.actor.ActorRef;
-import akka.serialization.JavaSerializer;
-import akka.serialization.Serialization;
-import com.google.common.annotations.Beta;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link Request} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractRequestProxy<T extends WritableIdentifier, C extends Request<T, C>>
-        extends AbstractMessageProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-    private ActorRef replyTo;
-
-    protected AbstractRequestProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractRequestProxy(final @NonNull C request) {
-        super(request);
-        this.replyTo = request.getReplyTo();
-    }
-
-    @Override
-    public void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        out.writeObject(Serialization.serializedActorPath(replyTo));
-    }
-
-    @Override
-    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        replyTo = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
-    }
-
-    @Override
-    final C createMessage(final T target, final long sequence) {
-        return createRequest(target, sequence, replyTo);
-    }
-
-    protected abstract @NonNull C createRequest(@NonNull T target, long sequence, @NonNull ActorRef replyToActor);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseEnvelopeProxy.java
deleted file mode 100644 (file)
index 1e873b4..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import org.opendaylight.yangtools.concepts.WritableObjects;
-
-abstract class AbstractResponseEnvelopeProxy<T extends Response<?, ?>> extends AbstractEnvelopeProxy<T> {
-    private static final long serialVersionUID = 1L;
-
-    private long executionTimeNanos;
-
-    AbstractResponseEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    AbstractResponseEnvelopeProxy(final ResponseEnvelope<T> envelope) {
-        super(envelope);
-        this.executionTimeNanos = envelope.getExecutionTimeNanos();
-    }
-
-    @Override
-    public final void writeExternal(final ObjectOutput out) throws IOException {
-        super.writeExternal(out);
-        WritableObjects.writeLong(out, executionTimeNanos);
-    }
-
-    @Override
-    public final void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-        super.readExternal(in);
-        executionTimeNanos = WritableObjects.readLong(in);
-    }
-
-    @Override
-    final ResponseEnvelope<T> createEnvelope(final T message, final long sessionId, final long txSequence) {
-        return createEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    abstract ResponseEnvelope<T> createEnvelope(T message, long sessionId, long txSequence, long executionTimeNanos);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractResponseProxy.java
deleted file mode 100644 (file)
index c9edfdb..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy class to use with {@link Response} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- * @param <C> Message class
- */
-abstract class AbstractResponseProxy<T extends WritableIdentifier, C extends Response<T, C>>
-        extends AbstractMessageProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractResponseProxy() {
-        // for Externalizable
-    }
-
-    AbstractResponseProxy(final @NonNull C response) {
-        super(response);
-    }
-
-    @Override
-    final C createMessage(final T target, final long sequence) {
-        return createResponse(target, sequence);
-    }
-
-    abstract @NonNull C createResponse(@NonNull T target, long sequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/AbstractSuccessProxy.java
deleted file mode 100644 (file)
index ecf792e..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.WritableIdentifier;
-
-/**
- * Abstract Externalizable proxy for use with {@link RequestSuccess} subclasses.
- *
- * @author Robert Varga
- *
- * @param <T> Target identifier type
- */
-@Beta
-public abstract class AbstractSuccessProxy<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
-        extends AbstractResponseProxy<T, C> {
-    private static final long serialVersionUID = 1L;
-
-    protected AbstractSuccessProxy() {
-        // For Externalizable
-    }
-
-    protected AbstractSuccessProxy(final @NonNull C success) {
-        super(success);
-    }
-
-    @Override
-    final C createResponse(final T target, final long sequence) {
-        return createSuccess(target, sequence);
-    }
-
-    protected abstract @NonNull C createSuccess(@NonNull T target, long sequence);
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/CI.java
new file mode 100644 (file)
index 0000000..e88764d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ClientIdentifier}.
+ */
+final class CI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ClientIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CI() {
+        // for Externalizable
+    }
+
+    CI(final ClientIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new ClientIdentifier(FrontendIdentifier.readFrom(in), WritableObjects.readLong(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getFrontendId().writeTo(out);
+        WritableObjects.writeLong(out, identifier.getGeneration());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index fe178f26e9d25cc0aa29202ce951b5096fd6a3c1..42701539a6170a0f48942fa9e635ba29db0d464a 100644 (file)
@@ -7,75 +7,40 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024.ClientGeneration;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 import org.opendaylight.yangtools.concepts.WritableObjects;
+import org.opendaylight.yangtools.yang.common.Uint64;
 
 /**
  * A cluster-wide unique identifier of a frontend instance. This identifier discerns between individual incarnations
  * of a particular frontend.
- *
- * @author Robert Varga
  */
-@Beta
 public final class ClientIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private FrontendIdentifier frontendId;
-        private long generation;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // Needed for Externalizable
-        }
-
-        Proxy(final FrontendIdentifier frontendId, final long generation) {
-            this.frontendId = Preconditions.checkNotNull(frontendId);
-            this.generation = generation;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            frontendId.writeTo(out);
-            WritableObjects.writeLong(out, generation);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            frontendId = FrontendIdentifier.readFrom(in);
-            generation = WritableObjects.readLong(in);
-        }
-
-        private Object readResolve() {
-            return new ClientIdentifier(frontendId, generation);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final FrontendIdentifier frontendId;
+
+    private final @NonNull FrontendIdentifier frontendId;
     private final long generation;
 
     ClientIdentifier(final FrontendIdentifier frontendId, final long generation) {
-        this.frontendId = Preconditions.checkNotNull(frontendId);
+        this.frontendId = requireNonNull(frontendId);
         this.generation = generation;
     }
 
-    public static ClientIdentifier create(final FrontendIdentifier frontendId,
+    public static @NonNull ClientIdentifier create(final FrontendIdentifier frontendId,
             final long generation) {
         return new ClientIdentifier(frontendId, generation);
     }
 
-    public static ClientIdentifier readFrom(final DataInput in) throws IOException {
+    public static @NonNull ClientIdentifier readFrom(final DataInput in) throws IOException {
         final FrontendIdentifier frontendId = FrontendIdentifier.readFrom(in);
         return new ClientIdentifier(frontendId, WritableObjects.readLong(in));
     }
@@ -86,7 +51,7 @@ public final class ClientIdentifier implements WritableIdentifier {
         WritableObjects.writeLong(out, generation);
     }
 
-    public FrontendIdentifier getFrontendId() {
+    public @NonNull FrontendIdentifier getFrontendId() {
         return frontendId;
     }
 
@@ -94,6 +59,10 @@ public final class ClientIdentifier implements WritableIdentifier {
         return generation;
     }
 
+    public @NonNull ClientGeneration getYangGeneration() {
+        return new ClientGeneration(Uint64.fromLongBits(generation));
+    }
+
     @Override
     public int hashCode() {
         return frontendId.hashCode() * 31 + Long.hashCode(generation);
@@ -101,24 +70,20 @@ public final class ClientIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof ClientIdentifier)) {
-            return false;
-        }
-
-        final ClientIdentifier other = (ClientIdentifier) obj;
-        return generation == other.generation && frontendId.equals(other.frontendId);
+        return this == obj || obj instanceof ClientIdentifier other && generation == other.generation
+            && frontendId.equals(other.frontendId);
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(ClientIdentifier.class).add("frontend", frontendId)
-                .add("generation", Long.toUnsignedString(generation)).toString();
+        return MoreObjects.toStringHelper(ClientIdentifier.class)
+            .add("frontend", frontendId)
+            .add("generation", Long.toUnsignedString(generation))
+            .toString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(frontendId, generation);
+        return new CI(this);
     }
 }
index 02686d9708254acac8373ed32ee0604f4f7ad7fa..eed7b5374113a4bf4b91d53fb576281d667259e6 100644 (file)
@@ -7,20 +7,60 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Serializable {
+    interface SerialForm<T extends Message<?, ?>, E extends Envelope<T>> extends Externalizable {
+
+        @NonNull E envelope();
+
+        void setEnvelope(@NonNull E envelope);
+
+        @java.io.Serial
+        Object readResolve();
+
+        @Override
+        default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final byte header = WritableObjects.readLongHeader(in);
+            final var sessionId = WritableObjects.readFirstLong(in, header);
+            final var txSequence = WritableObjects.readSecondLong(in, header);
+            @SuppressWarnings("unchecked")
+            final var message = (T) in.readObject();
+            setEnvelope(readExternal(in, sessionId, txSequence, message));
+        }
+
+        E readExternal(ObjectInput in, long sessionId, long txSequence, T message) throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            writeExternal(out, envelope());
+        }
+
+        default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+            WritableObjects.writeLongs(out, envelope.getSessionId(), envelope.getTxSequence());
+            out.writeObject(envelope.getMessage());
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private final T message;
+    private final @NonNull T message;
     private final long txSequence;
     private final long sessionId;
 
     Envelope(final T message, final long sessionId, final long txSequence) {
-        this.message = Preconditions.checkNotNull(message);
+        this.message = requireNonNull(message);
         this.sessionId = sessionId;
         this.txSequence = txSequence;
     }
@@ -30,7 +70,7 @@ public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Se
      *
      * @return enclose message
      */
-    public T getMessage() {
+    public @NonNull T getMessage() {
         return message;
     }
 
@@ -58,9 +98,10 @@ public abstract class Envelope<T extends Message<?, ?>> implements Immutable, Se
                 .add("txSequence", Long.toHexString(txSequence)).add("message", message).toString();
     }
 
+    @java.io.Serial
     final Object writeReplace() {
         return createProxy();
     }
 
-    abstract AbstractEnvelopeProxy<T> createProxy();
+    abstract @NonNull SerialForm<T, ?> createProxy();
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FE.java
new file mode 100644 (file)
index 0000000..3038437
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link FailureEnvelope}.
+ */
+final class FE implements ResponseEnvelope.SerialForm<RequestFailure<?, ?>, FailureEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private FailureEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FE() {
+        // for Externalizable
+    }
+
+    FE(final FailureEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public FailureEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final FailureEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public FailureEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final RequestFailure<?, ?> message, final long executionTimeNanos) {
+        return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FI.java
new file mode 100644 (file)
index 0000000..1a3e72b
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FrontendIdentifier}.
+ */
+final class FI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private FrontendIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FI() {
+        // for Externalizable
+    }
+
+    FI(final FrontendIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new FrontendIdentifier(MemberName.readFrom(in), FrontendType.readFrom(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getMemberName().writeTo(out);
+        identifier.getClientType().writeTo(out);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FT.java
new file mode 100644 (file)
index 0000000..9e900f7
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link FrontendType}.
+ */
+final class FT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private byte[] serialized;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FT() {
+        // for Externalizable
+    }
+
+    FT(final byte[] serialized) {
+        this.serialized = requireNonNull(serialized);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(serialized.length);
+        out.write(serialized);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        serialized = new byte[in.readInt()];
+        in.readFully(serialized);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        // TODO: consider caching instances here
+        return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
+    }
+}
index 1f641eb1819f945da32ee4a9b76a403e5e7bea3d..5342d05f5eb989e38787b90d8b8a4edc9ca151e9 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.access.concepts;
 
 public final class FailureEnvelope extends ResponseEnvelope<RequestFailure<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public FailureEnvelope(final RequestFailure<?, ?> message, final long sessionId, final long txSequence,
@@ -16,7 +17,7 @@ public final class FailureEnvelope extends ResponseEnvelope<RequestFailure<?, ?>
     }
 
     @Override
-    FailureEnvelopeProxy createProxy() {
-        return new FailureEnvelopeProxy(this);
+    FE createProxy() {
+        return new FE(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/FailureEnvelopeProxy.java
deleted file mode 100644 (file)
index adc50e1..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class FailureEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestFailure<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to be
-    // able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public FailureEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    FailureEnvelopeProxy(final FailureEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    ResponseEnvelope<RequestFailure<?, ?>> createEnvelope(final RequestFailure<?, ?> message, final long sessionId,
-            final long txSequence, final long executionTimeNanos) {
-        return new FailureEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-}
index 60e40e9e161faf74c5f82c7fdd564c1872d916f9..76aad38da71e4d802b988771b292c7fc34a01dba 100644 (file)
@@ -7,74 +7,37 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.util.Objects;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 /**
  * A cluster-wide unique identifier of a frontend type located at a cluster member.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FrontendIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private MemberName memberName;
-        private FrontendType clientType;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // Needed for Externalizable
-        }
-
-        Proxy(final MemberName memberName, final FrontendType clientType) {
-            this.memberName = Preconditions.checkNotNull(memberName);
-            this.clientType = Preconditions.checkNotNull(clientType);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            memberName.writeTo(out);
-            clientType.writeTo(out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            memberName = MemberName.readFrom(in);
-            clientType = FrontendType.readFrom(in);
-        }
-
-        private Object readResolve() {
-            return new FrontendIdentifier(memberName, clientType);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final MemberName memberName;
     private final FrontendType clientType;
 
     FrontendIdentifier(final MemberName memberName, final FrontendType clientType) {
-        this.clientType = Preconditions.checkNotNull(clientType);
-        this.memberName = Preconditions.checkNotNull(memberName);
+        this.clientType = requireNonNull(clientType);
+        this.memberName = requireNonNull(memberName);
     }
 
-    public static FrontendIdentifier create(final MemberName memberName, final FrontendType clientType) {
+    public static @NonNull FrontendIdentifier create(final MemberName memberName, final FrontendType clientType) {
         return new FrontendIdentifier(memberName, clientType);
     }
 
-    public static FrontendIdentifier readFrom(final DataInput in) throws IOException {
-        final MemberName memberName = MemberName.readFrom(in);
-        final FrontendType clientType = FrontendType.readFrom(in);
+    public static @NonNull FrontendIdentifier readFrom(final DataInput in) throws IOException {
+        final var memberName = MemberName.readFrom(in);
+        final var clientType = FrontendType.readFrom(in);
         return new FrontendIdentifier(memberName, clientType);
     }
 
@@ -84,11 +47,11 @@ public final class FrontendIdentifier implements WritableIdentifier {
         clientType.writeTo(out);
     }
 
-    public FrontendType getClientType() {
+    public @NonNull FrontendType getClientType() {
         return clientType;
     }
 
-    public MemberName getMemberName() {
+    public @NonNull MemberName getMemberName() {
         return memberName;
     }
 
@@ -99,18 +62,11 @@ public final class FrontendIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof FrontendIdentifier)) {
-            return false;
-        }
-
-        final FrontendIdentifier other = (FrontendIdentifier) obj;
-        return memberName.equals(other.memberName) && clientType.equals(other.clientType);
+        return this == obj || obj instanceof FrontendIdentifier other && memberName.equals(other.memberName)
+            && clientType.equals(other.clientType);
     }
 
-    public String toPersistentId() {
+    public @NonNull String toPersistentId() {
         return memberName.getName() + "-frontend-" + clientType.getName();
     }
 
@@ -119,7 +75,8 @@ public final class FrontendIdentifier implements WritableIdentifier {
         return toPersistentId();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(memberName, clientType);
+        return new FI(this);
     }
 }
index b6a7b4b260e4bb5ce1cb94c0302e25c6df53dc3a..66191816136573bdce79299a33d388012b163cf3 100644 (file)
@@ -11,18 +11,15 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.Strings;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.nio.charset.StandardCharsets;
 import java.util.regex.Pattern;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
@@ -30,48 +27,14 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * An {@link Identifier} identifying a data store frontend type, which is able to access the data store backend.
  * Frontend implementations need to define this identifier so that multiple clients existing on a member node can be
  * discerned.
- *
- * @author Robert Varga
  */
-@Beta
 public final class FrontendType implements Comparable<FrontendType>, WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            this.serialized = requireNonNull(serialized);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            serialized = new byte[in.readInt()];
-            in.readFully(serialized);
-        }
-
-        private Object readResolve() {
-            // TODO: consider caching instances here
-            return new FrontendType(new String(serialized, StandardCharsets.UTF_8), serialized);
-        }
-    }
-
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
     private static final String SIMPLE_STRING_REGEX = "^[a-zA-Z0-9-_.*+:=,!~';]+$";
     private static final Pattern SIMPLE_STRING_PATTERN = Pattern.compile(SIMPLE_STRING_REGEX);
-    private static final long serialVersionUID = 1L;
-    private final String name;
+
+    private final @NonNull String name;
 
     @SuppressFBWarnings(value = "VO_VOLATILE_REFERENCE_TO_ARRAY",
             justification = "The array elements are non-volatile but we don't access them.")
@@ -96,14 +59,14 @@ public final class FrontendType implements Comparable<FrontendType>, WritableIde
      * @return A {@link FrontendType} instance
      * @throws IllegalArgumentException if the string is null, empty or contains invalid characters
      */
-    public static FrontendType forName(final String name) {
+    public static @NonNull FrontendType forName(final String name) {
         checkArgument(!Strings.isNullOrEmpty(name));
         checkArgument(SIMPLE_STRING_PATTERN.matcher(name).matches(),
             "Supplied name %s does not patch pattern %s", name, SIMPLE_STRING_REGEX);
         return new FrontendType(name);
     }
 
-    public static FrontendType readFrom(final DataInput in) throws IOException {
+    public static @NonNull FrontendType readFrom(final DataInput in) throws IOException {
         final byte[] serialized = new byte[in.readInt()];
         in.readFully(serialized);
         return new FrontendType(new String(serialized, StandardCharsets.UTF_8));
@@ -116,10 +79,16 @@ public final class FrontendType implements Comparable<FrontendType>, WritableIde
         out.write(local);
     }
 
-    public String getName() {
+    public @NonNull String getName() {
         return name;
     }
 
+    public org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
+        . @NonNull FrontendType toYang() {
+        return new org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
+                .FrontendType(name);
+    }
+
     @Override
     public int hashCode() {
         return name.hashCode();
@@ -149,7 +118,8 @@ public final class FrontendType implements Comparable<FrontendType>, WritableIde
         return local;
     }
 
-    Object writeReplace() {
-        return new Proxy(getSerialized());
+    @java.io.Serial
+    private Object writeReplace() {
+        return new FT(getSerialized());
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/HI.java
new file mode 100644 (file)
index 0000000..ab4d884
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link LocalHistoryIdentifier}.
+ *
+ * @implNote
+ *     cookie is currently required only for module-based sharding, which is implemented as part of normal
+ *     DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence we may end up not needing
+ *     cookie at all.
+ *     We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we end up not needing
+ *     the cookie at all, we can switch to writeLong() and use zero flags for compatibility.
+ */
+final class HI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public HI() {
+        // for Externalizable
+    }
+
+    HI(final LocalHistoryIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getClientId().writeTo(out);
+        WritableObjects.writeLongs(out, identifier.getHistoryId(), identifier.getCookie());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final var clientId = ClientIdentifier.readFrom(in);
+        final byte header = WritableObjects.readLongHeader(in);
+        final var historyId = WritableObjects.readFirstLong(in, header);
+        final var cookie = WritableObjects.readSecondLong(in, header);
+        identifier = new LocalHistoryIdentifier(clientId, historyId, cookie);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index 7346ed0f0fffd94e3c69fac7e3d1a97c399ef756..ddeb2936151b9b8b7f42affa9648a2e53d6a0cb4 100644 (file)
@@ -7,14 +7,13 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 import org.opendaylight.yangtools.concepts.WritableObjects;
 
@@ -23,60 +22,12 @@ import org.opendaylight.yangtools.concepts.WritableObjects;
  * - a {@link ClientIdentifier}, which uniquely identifies a single instantiation of a particular frontend
  * - an unsigned long, which uniquely identifies the history on the backend
  * - an unsigned long cookie, assigned by the client and meaningless on the backend, which just reflects it back
- *
- * @author Robert Varga
  */
 public final class LocalHistoryIdentifier implements WritableIdentifier {
-    /*
-     * Implementation note: cookie is currently required only for module-based sharding, which is implemented as part
-     *                      of normal DataBroker interfaces. For DOMDataTreeProducer cookie will always be zero, hence
-     *                      we may end up not needing cookie at all.
-     *
-     *                      We use WritableObjects.writeLongs() to output historyId and cookie (in that order). If we
-     *                      end up not needing the cookie at all, we can switch to writeLong() and use zero flags for
-     *                      compatibility.
-     */
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private ClientIdentifier clientId;
-        private long historyId;
-        private long cookie;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ClientIdentifier frontendId, final long historyId, final long cookie) {
-            this.clientId = Preconditions.checkNotNull(frontendId);
-            this.historyId = historyId;
-            this.cookie = cookie;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            clientId.writeTo(out);
-            WritableObjects.writeLongs(out, historyId, cookie);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            clientId = ClientIdentifier.readFrom(in);
-
-            final byte header = WritableObjects.readLongHeader(in);
-            historyId = WritableObjects.readFirstLong(in, header);
-            cookie = WritableObjects.readSecondLong(in, header);
-        }
-
-        private Object readResolve() {
-            return new LocalHistoryIdentifier(clientId, historyId, cookie);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final ClientIdentifier clientId;
+
+    private final @NonNull ClientIdentifier clientId;
     private final long historyId;
     private final long cookie;
 
@@ -85,12 +36,12 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
     }
 
     public LocalHistoryIdentifier(final ClientIdentifier frontendId, final long historyId, final long cookie) {
-        this.clientId = Preconditions.checkNotNull(frontendId);
+        clientId = requireNonNull(frontendId);
         this.historyId = historyId;
         this.cookie = cookie;
     }
 
-    public static LocalHistoryIdentifier readFrom(final DataInput in) throws IOException {
+    public static @NonNull LocalHistoryIdentifier readFrom(final DataInput in) throws IOException {
         final ClientIdentifier clientId = ClientIdentifier.readFrom(in);
 
         final byte header = WritableObjects.readLongHeader(in);
@@ -104,7 +55,7 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
         WritableObjects.writeLongs(out, historyId, cookie);
     }
 
-    public ClientIdentifier getClientId() {
+    public @NonNull ClientIdentifier getClientId() {
         return clientId;
     }
 
@@ -129,11 +80,10 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
         if (this == obj) {
             return true;
         }
-        if (!(obj instanceof LocalHistoryIdentifier)) {
+        if (!(obj instanceof LocalHistoryIdentifier other)) {
             return false;
         }
 
-        final LocalHistoryIdentifier other = (LocalHistoryIdentifier) obj;
         return historyId == other.historyId && cookie == other.cookie && clientId.equals(other.clientId);
     }
 
@@ -144,7 +94,8 @@ public final class LocalHistoryIdentifier implements WritableIdentifier {
                 .add("cookie", Long.toUnsignedString(cookie, 16)).toString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(clientId, historyId, cookie);
+        return new HI(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/MN.java
new file mode 100644 (file)
index 0000000..37b9fb8
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Serialization proxy for {@link MemberName}.
+ */
+final class MN implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private byte[] serialized;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MN() {
+        // for Externalizable
+    }
+
+    MN(final byte[] serialized) {
+        this.serialized = requireNonNull(serialized);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(serialized.length);
+        out.write(serialized);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        serialized = new byte[in.readInt()];
+        in.readFully(serialized);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        // TODO: consider caching instances here
+        return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
+    }
+}
index 09e39e8c582f17a8be1fc72f0e880de137b82972..daab643f8a6e13647d7e92554718a49e70b14570 100644 (file)
@@ -7,84 +7,49 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
-import com.google.common.base.Verify;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.nio.charset.StandardCharsets;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 /**
  * Type-safe encapsulation of a cluster member name.
- *
- * @author Robert Varga
  */
-@Beta
 public final class MemberName implements Comparable<MemberName>, WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            this.serialized = Preconditions.checkNotNull(serialized);
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            serialized = new byte[in.readInt()];
-            in.readFully(serialized);
-        }
-
-        private Object readResolve() {
-            // TODO: consider caching instances here
-            return new MemberName(new String(serialized, StandardCharsets.UTF_8), serialized);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final String name;
+
+    private final @NonNull String name;
 
     @SuppressFBWarnings(value = "VO_VOLATILE_REFERENCE_TO_ARRAY",
             justification = "The array elements are non-volatile but we don't access them.")
     private volatile byte[] serialized;
 
     private MemberName(final String name) {
-        this.name = Preconditions.checkNotNull(name);
+        this.name = requireNonNull(name);
     }
 
     MemberName(final String name, final byte[] serialized) {
         this(name);
-        this.serialized = Verify.verifyNotNull(serialized);
+        this.serialized = verifyNotNull(serialized);
     }
 
-    public static MemberName forName(final String name) {
-        Preconditions.checkArgument(!Strings.isNullOrEmpty(name));
+    public static @NonNull MemberName forName(final String name) {
+        checkArgument(!Strings.isNullOrEmpty(name));
         // TODO: consider caching instances here
         return new MemberName(name);
     }
 
-    public static MemberName readFrom(final DataInput in) throws IOException {
+    public static @NonNull MemberName readFrom(final DataInput in) throws IOException {
         final byte[] serialized = new byte[in.readInt()];
         in.readFully(serialized);
         return new MemberName(new String(serialized, StandardCharsets.UTF_8));
@@ -97,10 +62,16 @@ public final class MemberName implements Comparable<MemberName>, WritableIdentif
         out.write(local);
     }
 
-    public String getName() {
+    public @NonNull String getName() {
         return name;
     }
 
+    public org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
+        .@NonNull MemberName toYang() {
+        return new org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.cds.types.rev191024
+                .MemberName(name);
+    }
+
     @Override
     public int hashCode() {
         return name.hashCode();
@@ -130,7 +101,8 @@ public final class MemberName implements Comparable<MemberName>, WritableIdentif
         return local;
     }
 
+    @java.io.Serial
     Object writeReplace() {
-        return new Proxy(getSerialized());
+        return new MN(getSerialized());
     }
 }
index 2bc33cd1e4bc6c6ec5fc45271e48e941ccdbbfa1..9748264e7fc0144bd2e9780599418600f2c28ae3 100644 (file)
@@ -10,15 +10,24 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.DataInput;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.NotSerializableException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutput;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
 import java.io.Serializable;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.Immutable;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * An abstract concept of a Message. This class cannot be instantiated directly, use its specializations {@link Request}
@@ -47,14 +56,49 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * Note that this class specifies the {@link Immutable} contract, which means that all subclasses must follow this API
  * contract.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
-public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>> implements Immutable,
-        Serializable {
+public abstract class Message<T extends WritableIdentifier, C extends Message<T, C>>
+        implements Immutable, Serializable {
+    /**
+     * Externalizable proxy for use with {@link Message} subclasses.
+     *
+     * @param <T> Target identifier type
+     * @param <C> Message class
+     */
+    protected interface SerialForm<T extends WritableIdentifier, C extends Message<T, C>> extends Externalizable {
+
+        @NonNull C message();
+
+        void setMessage(@NonNull C message);
+
+        @Override
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            final var message = message();
+            message.getTarget().writeTo(out);
+            WritableObjects.writeLong(out, message.getSequence());
+            writeExternal(out, message);
+        }
+
+        void writeExternal(@NonNull ObjectOutput out, @NonNull C msg) throws IOException;
+
+        @Override
+        default void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            final var target = verifyNotNull(readTarget(in));
+            final var sequence = WritableObjects.readLong(in);
+            setMessage(verifyNotNull(readExternal(in, target, sequence)));
+        }
+
+        @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence)
+            throws IOException, ClassNotFoundException;
+
+        Object readResolve();
+
+        @NonNull T readTarget(@NonNull DataInput in) throws IOException;
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull ABIVersion version;
@@ -106,21 +150,14 @@ public abstract class Message<T extends WritableIdentifier, C extends Message<T,
      */
     @SuppressWarnings("unchecked")
     public final @NonNull C toVersion(final @NonNull ABIVersion toVersion) {
-        if (this.version == toVersion) {
+        if (version == toVersion) {
             return (C)this;
         }
 
-        switch (toVersion) {
-            case BORON:
-                return verifyNotNull(cloneAsVersion(toVersion));
-            case TEST_PAST_VERSION:
-            case TEST_FUTURE_VERSION:
-            default:
-                // Fall-through to throw
-                break;
-        }
-
-        throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
+        return switch (toVersion) {
+            case POTASSIUM -> verifyNotNull(cloneAsVersion(toVersion));
+            default -> throw new IllegalArgumentException("Unhandled ABI version " + toVersion);
+        };
     }
 
     /**
@@ -159,9 +196,29 @@ public abstract class Message<T extends WritableIdentifier, C extends Message<T,
      * @param reqVersion Requested ABI version
      * @return Proxy for this object
      */
-    abstract @NonNull AbstractMessageProxy<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
+    protected abstract @NonNull SerialForm<T, C> externalizableProxy(@NonNull ABIVersion reqVersion);
 
+    @java.io.Serial
     protected final Object writeReplace() {
         return externalizableProxy(version);
     }
+
+    protected final void throwNSE() throws NotSerializableException {
+        throw new NotSerializableException(getClass().getName());
+    }
+
+    @java.io.Serial
+    private void readObject(final ObjectInputStream stream) throws IOException, ClassNotFoundException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void readObjectNoData() throws ObjectStreamException {
+        throwNSE();
+    }
+
+    @java.io.Serial
+    private void writeObject(final ObjectOutputStream stream) throws IOException {
+        throwNSE();
+    }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RE.java
new file mode 100644 (file)
index 0000000..27bf825
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link RequestEnvelope}.
+ */
+final class RE implements Envelope.SerialForm<Request<?, ?>, RequestEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private RequestEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RE() {
+        // for Externalizable
+    }
+
+    RE(final RequestEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public RequestEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final RequestEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public RequestEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final Request<?, ?> message) {
+        return new RequestEnvelope(message, sessionId, txSequence);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
index 093a3f1eb0c1cdedb4c957cfdb81c85b639ac6f2..97ce498bda202b9cf9f2e2b79ab0b65146ebdd0f 100644 (file)
@@ -10,8 +10,12 @@ package org.opendaylight.controller.cluster.access.concepts;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -20,14 +24,31 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * A request message concept. Upon receipt of this message, the recipient will respond with either
  * a {@link RequestSuccess} or a {@link RequestFailure} message.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
 public abstract class Request<T extends WritableIdentifier, C extends Request<T, C>> extends Message<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends Request<T, C>>
+            extends Message.SerialForm<T, C> {
+        @Override
+        default C readExternal(final ObjectInput in, final T target, final long sequence)
+                throws ClassNotFoundException, IOException {
+            return readExternal(in, target, sequence,
+                JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject()));
+        }
+
+        @NonNull C readExternal(@NonNull ObjectInput in, @NonNull T target, long sequence, @NonNull ActorRef replyTo)
+            throws IOException;
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            out.writeObject(Serialization.serializedActorPath(msg.getReplyTo()));
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     private final @NonNull ActorRef replyTo;
 
     protected Request(final @NonNull T target, final long sequence, final @NonNull ActorRef replyTo) {
@@ -63,5 +84,5 @@ public abstract class Request<T extends WritableIdentifier, C extends Request<T,
     }
 
     @Override
-    protected abstract AbstractRequestProxy<T, C> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
 }
index 46d5d1f99620b83304b49e7d160a6648768dce65..e8983697cf659e56ad99fa347278d8c0df58e596 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.access.concepts;
 import akka.actor.ActorRef;
 
 public final class RequestEnvelope extends Envelope<Request<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RequestEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
@@ -17,8 +18,8 @@ public final class RequestEnvelope extends Envelope<Request<?, ?>> {
     }
 
     @Override
-    RequestEnvelopeProxy createProxy() {
-        return new RequestEnvelopeProxy(this);
+    RE createProxy() {
+        return new RE(this);
     }
 
     /**
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/RequestEnvelopeProxy.java
deleted file mode 100644 (file)
index 66e7eaa..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class RequestEnvelopeProxy extends AbstractEnvelopeProxy<Request<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public RequestEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    RequestEnvelopeProxy(final RequestEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    RequestEnvelope createEnvelope(final Request<?, ?> message, final long sessionId, final long txSequence) {
-        return new RequestEnvelope(message, sessionId, txSequence);
-    }
-}
index 7267edea4f5d19e7bc10137b37857c849e2200b2..d1120c61ef468f8060cc6b67261b895f5bda5c04 100644 (file)
@@ -9,16 +9,13 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * A failure cause behind a {@link RequestFailure} to process a {@link Request}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class RequestException extends Exception {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     protected RequestException(final @NonNull String message) {
index db5a15b0bc57c5cda03bfd05ffaed53db7550024..7204912669cabf88596be24a5e82e385a4e1dcaf 100644 (file)
@@ -9,8 +9,10 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -18,14 +20,33 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
 /**
  * A failure response to a {@link Request}. Contains a {@link RequestException} detailing the cause for this failure.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message class
  */
-@Beta
 public abstract class RequestFailure<T extends WritableIdentifier, C extends RequestFailure<T, C>>
         extends Response<T, C> {
+    /**
+     * Externalizable proxy for use with {@link RequestFailure} subclasses.
+     *
+     * @param <T> Target identifier type
+     */
+    protected interface SerialForm<T extends WritableIdentifier, C extends RequestFailure<T, C>>
+            extends Message.SerialForm<T, C> {
+        @Override
+        default C readExternal(final ObjectInput in, final T target, final long sequence)
+                throws IOException, ClassNotFoundException {
+            return createFailure(target, sequence, (RequestException) in.readObject());
+        }
+
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            out.writeObject(msg.getCause());
+        }
+
+        @NonNull C createFailure(@NonNull T target, long sequence, @NonNull RequestException failureCause);
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final @NonNull RequestException cause;
@@ -65,5 +86,5 @@ public abstract class RequestFailure<T extends WritableIdentifier, C extends Req
     }
 
     @Override
-    protected abstract AbstractRequestFailureProxy<T, C> externalizableProxy(ABIVersion version);
+    protected abstract SerialForm<T, C> externalizableProxy(ABIVersion version);
 }
index 9b60d21b0d2fc13b9aa391ac6460629f457c9d33..f7e59ed1e6597019c43a04826fc5f006fbfea7d9 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
+import java.io.IOException;
+import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -15,23 +16,26 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
 /**
  * A successful reply to a {@link Request}.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  */
-@Beta
-public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>> extends
-        Response<T, C> {
+public abstract class RequestSuccess<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+        extends Response<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends RequestSuccess<T, C>>
+            extends Response.SerialForm<T, C> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final C msg) throws IOException {
+            // Defaults to no-op
+        }
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    protected RequestSuccess(final @NonNull C success,  final @NonNull ABIVersion version) {
+    protected RequestSuccess(final @NonNull C success, final @NonNull ABIVersion version) {
         super(success, version);
     }
 
     protected RequestSuccess(final @NonNull T target, final long sequence) {
         super(target, sequence);
     }
-
-    @Override
-    protected abstract AbstractSuccessProxy<T, C> externalizableProxy(ABIVersion version);
 }
index f733a9e9199b31c1ed8b4afeb84549c8685bad78..a41fa01db99d2a9c1701be940ce52115a43b2eb4 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
@@ -17,13 +16,16 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
  * {@link RequestFailure} and {@link RequestSuccess}, which provide appropriate specialization. It is visible purely for
  * the purpose of allowing to check if an object is either of those specializations with a single instanceof check.
  *
- * @author Robert Varga
- *
  * @param <T> Target identifier type
  * @param <C> Message type
  */
-@Beta
 public abstract class Response<T extends WritableIdentifier, C extends Response<T, C>> extends Message<T, C> {
+    protected interface SerialForm<T extends WritableIdentifier, C extends Response<T, C>>
+            extends Message.SerialForm<T, C> {
+
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     Response(final @NonNull T target, final long sequence) {
@@ -33,7 +35,4 @@ public abstract class Response<T extends WritableIdentifier, C extends Response<
     Response(final @NonNull C response, final @NonNull ABIVersion version) {
         super(response, version);
     }
-
-    @Override
-    abstract AbstractResponseProxy<T, C> externalizableProxy(ABIVersion version);
 }
index 7936baa1696059b184b8ff55f548ac5fd6e9229b..50d1e7434ce1c845fb9c5948f0759b41d526fb88 100644 (file)
@@ -7,16 +7,39 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.WritableObjects;
 
 public abstract class ResponseEnvelope<T extends Response<?, ?>> extends Envelope<T> {
+    interface SerialForm<T extends Response<?, ?>, E extends ResponseEnvelope<T>> extends Envelope.SerialForm<T, E> {
+        @Override
+        default void writeExternal(final ObjectOutput out, final @NonNull E envelope) throws IOException {
+            Envelope.SerialForm.super.writeExternal(out, envelope);
+            WritableObjects.writeLong(out, envelope.getExecutionTimeNanos());
+        }
+
+        @Override
+        default E readExternal(final ObjectInput in, final long sessionId, final long txSequence, final T message)
+                throws IOException {
+            return readExternal(in, sessionId, txSequence, message, WritableObjects.readLong(in));
+        }
+
+        E readExternal(ObjectInput in, long sessionId, long txSequence, T message, long executionTimeNanos);
+    }
+
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long executionTimeNanos;
 
     ResponseEnvelope(final T message, final long sessionId, final long txSequence, final long executionTimeNanos) {
         super(message, sessionId, txSequence);
-        Preconditions.checkArgument(executionTimeNanos >= 0);
+        checkArgument(executionTimeNanos >= 0, "Negative executionTime");
         this.executionTimeNanos = executionTimeNanos;
     }
 
@@ -29,7 +52,4 @@ public abstract class ResponseEnvelope<T extends Response<?, ?>> extends Envelop
     public final long getExecutionTimeNanos() {
         return executionTimeNanos;
     }
-
-    @Override
-    abstract AbstractResponseEnvelopeProxy<T> createProxy();
 }
index 7730318f578d31c02272abac6faad7ff193f4f99..3f1f71d17e830beaa572a054421df84841003480 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * General error raised when the recipient of a {@link Request} determines that the request contains
  * a {@link ClientIdentifier} which corresponds to an outdated generation.
- *
- * @author Robert Varga
  */
-@Beta
 public final class RetiredGenerationException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RetiredGenerationException(final long originatingGeneration, final long newGeneration) {
index a52396fd5d5f6a00ab78085bd457f8aca3a3246d..3f886a85102c5813bed14fb3a84b11791dfaa4ef 100644 (file)
@@ -7,22 +7,21 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.Strings;
 
 /**
  * General error raised when the recipient of a {@link Request} fails to process a request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class RuntimeRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public RuntimeRequestException(final String message, final Throwable cause) {
-        super(message, Preconditions.checkNotNull(cause));
-        Preconditions.checkArgument(!Strings.isNullOrEmpty(message), "Exception message is mandatory");
+        super(message, requireNonNull(cause));
+        checkArgument(!Strings.isNullOrEmpty(message), "Exception message is mandatory");
     }
 
     @Override
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SE.java
new file mode 100644 (file)
index 0000000..3e8ce6f
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.ObjectInput;
+
+/**
+ * Serialization proxy for {@link SuccessEnvelope}.
+ */
+final class SE implements ResponseEnvelope.SerialForm<RequestSuccess<?, ?>, SuccessEnvelope> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private SuccessEnvelope envelope;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SE() {
+        // for Externalizable
+    }
+
+    SE(final SuccessEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public SuccessEnvelope envelope() {
+        return verifyNotNull(envelope);
+    }
+
+    @Override
+    public void setEnvelope(final SuccessEnvelope envelope) {
+        this.envelope = requireNonNull(envelope);
+    }
+
+    @Override
+    public SuccessEnvelope readExternal(final ObjectInput in, final long sessionId, final long txSequence,
+            final RequestSuccess<?, ?> message, final long executionTimeNanos) {
+        return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
+    }
+
+    @Override
+    public Object readResolve() {
+        return envelope();
+    }
+}
index cd3e2608d60ea03cb052dcdfa8a0f52d69d69ad5..118e9262a7dad58bc06d64435da0ac810ee723f7 100644 (file)
@@ -7,14 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * A tagging interface that specifies a message whose serialized size can be large and thus should be sliced into
  * smaller chunks when transporting over the wire.
  *
  * @author Thomas Pantelis
  */
-@Beta
 public interface SliceableMessage {
+    // Marker interface
 }
index 3c23a23763cd071abb2e967f7c58b6e9cd633ca1..2644c6ff0f8856a550ec8cdacbe227e4afa538ca 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.access.concepts;
 
 public final class SuccessEnvelope extends ResponseEnvelope<RequestSuccess<?, ?>> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public SuccessEnvelope(final RequestSuccess<?, ?> message, final long sessionId, final long txSequence,
@@ -16,7 +17,7 @@ public final class SuccessEnvelope extends ResponseEnvelope<RequestSuccess<?, ?>
     }
 
     @Override
-    SuccessEnvelopeProxy createProxy() {
-        return new SuccessEnvelopeProxy(this);
+    SE createProxy() {
+        return new SE(this);
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/SuccessEnvelopeProxy.java
deleted file mode 100644 (file)
index 3ac388b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.access.concepts;
-
-final class SuccessEnvelopeProxy extends AbstractResponseEnvelopeProxy<RequestSuccess<?, ?>> {
-    private static final long serialVersionUID = 1L;
-
-    // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-    // be able to create instances via reflection.
-    @SuppressWarnings("checkstyle:RedundantModifier")
-    public SuccessEnvelopeProxy() {
-        // for Externalizable
-    }
-
-    SuccessEnvelopeProxy(final SuccessEnvelope envelope) {
-        super(envelope);
-    }
-
-    @Override
-    ResponseEnvelope<RequestSuccess<?, ?>> createEnvelope(final RequestSuccess<?, ?> message, final long sessionId,
-            final long txSequence, final long executionTimeNanos) {
-        return new SuccessEnvelope(message, sessionId, txSequence, executionTimeNanos);
-    }
-}
diff --git a/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java b/opendaylight/md-sal/cds-access-api/src/main/java/org/opendaylight/controller/cluster/access/concepts/TI.java
new file mode 100644 (file)
index 0000000..8bc927f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.concepts;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link TransactionIdentifier}.
+ */
+final class TI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public TI() {
+        // for Externalizable
+    }
+
+    TI(final TransactionIdentifier identifier) {
+        this.identifier = requireNonNull(identifier);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        identifier = new TransactionIdentifier(LocalHistoryIdentifier.readFrom(in), WritableObjects.readLong(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        identifier.getHistoryId().writeTo(out);
+        WritableObjects.writeLong(out, identifier.getTransactionId());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(identifier);
+    }
+}
index 8fa0b627027166abbf207be2a3f63f2da481f840..ea72c847501a79a8982af88b1fd6aadfc90784c4 100644 (file)
@@ -9,60 +9,21 @@ package org.opendaylight.controller.cluster.access.concepts;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import java.io.DataInput;
 import java.io.DataOutput;
-import java.io.Externalizable;
 import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 import org.opendaylight.yangtools.concepts.WritableObjects;
 
 /**
  * Globally-unique identifier of a transaction.
- *
- * @author Robert Varga
  */
-@Beta
 public final class TransactionIdentifier implements WritableIdentifier {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private LocalHistoryIdentifier historyId;
-        private long transactionId;
-
-        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
-        // be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final LocalHistoryIdentifier historyId, final long transactionId) {
-            this.historyId = requireNonNull(historyId);
-            this.transactionId = transactionId;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            historyId.writeTo(out);
-            WritableObjects.writeLong(out, transactionId);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            historyId = LocalHistoryIdentifier.readFrom(in);
-            transactionId = WritableObjects.readLong(in);
-        }
-
-        private Object readResolve() {
-            return new TransactionIdentifier(historyId, transactionId);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final LocalHistoryIdentifier historyId;
+
+    private final @NonNull LocalHistoryIdentifier historyId;
     private final long transactionId;
     private String shortString;
 
@@ -71,7 +32,7 @@ public final class TransactionIdentifier implements WritableIdentifier {
         this.transactionId = transactionId;
     }
 
-    public static TransactionIdentifier readFrom(final DataInput in) throws IOException {
+    public static @NonNull TransactionIdentifier readFrom(final DataInput in) throws IOException {
         final LocalHistoryIdentifier historyId = LocalHistoryIdentifier.readFrom(in);
         return new TransactionIdentifier(historyId, WritableObjects.readLong(in));
     }
@@ -82,7 +43,7 @@ public final class TransactionIdentifier implements WritableIdentifier {
         WritableObjects.writeLong(out, transactionId);
     }
 
-    public LocalHistoryIdentifier getHistoryId() {
+    public @NonNull LocalHistoryIdentifier getHistoryId() {
         return historyId;
     }
 
@@ -97,15 +58,8 @@ public final class TransactionIdentifier implements WritableIdentifier {
 
     @Override
     public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof TransactionIdentifier)) {
-            return false;
-        }
-
-        final TransactionIdentifier other = (TransactionIdentifier) obj;
-        return transactionId == other.transactionId && historyId.equals(other.historyId);
+        return this == obj || obj instanceof TransactionIdentifier other && transactionId == other.transactionId
+            && historyId.equals(other.historyId);
     }
 
     public String toShortString() {
@@ -125,7 +79,8 @@ public final class TransactionIdentifier implements WritableIdentifier {
         return toShortString();
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(historyId, transactionId);
+        return new TI(this);
     }
 }
index 903ed59fbc04c00d3f7121d11edfc11875dc76b7..1de266d2851d2f14975f86f64f5a4b20971fefd4 100644 (file)
@@ -7,16 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import com.google.common.annotations.Beta;
-
 /**
  * General error raised when the recipient of a {@link Request} determines that it does not know how to handle
  * the request.
- *
- * @author Robert Varga
  */
-@Beta
 public final class UnsupportedRequestException extends RequestException {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public UnsupportedRequestException(final Request<?, ?> request) {
diff --git a/opendaylight/md-sal/cds-access-api/src/main/yang/odl-controller-cds-types.yang b/opendaylight/md-sal/cds-access-api/src/main/yang/odl-controller-cds-types.yang
new file mode 100644 (file)
index 0000000..58f3b9e
--- /dev/null
@@ -0,0 +1,53 @@
+module odl-controller-cds-types {
+  yang-version 1;
+  namespace "urn:opendaylight:params:xml:ns:yang:controller:cds:types";
+  prefix "cdst";
+
+  organization "The OpenDaylight Project";
+
+  description "Common type definitions related to clustered data store.";
+
+  revision 2019-10-24 {
+    description "Initial revision.";
+  }
+
+  typedef member-name {
+    description "Cluster member name.";
+    type string;
+  }
+
+  typedef frontend-type {
+    description "Frontend type.";
+    type string {
+      pattern "";
+    }
+  }
+
+  typedef client-generation {
+    description "Client generation.";
+    type uint64;
+  }
+
+  grouping frontend-identifier {
+    description "Identifier of a particular frontend.";
+    leaf member {
+      type member-name;
+      mandatory true;
+    }
+
+    leaf type {
+      type frontend-type;
+      mandatory true;
+    }
+  }
+
+  grouping client-identifier {
+    description "Identifier of a particular client.";
+    uses frontend-identifier;
+    leaf generation {
+      type client-generation;
+      mandatory true;
+    }
+  }
+}
+
index f9e9c0c95473e7d39f7bd4315ae2a434f32aba4d..1513f363969aa54331d7469c4b5ce228136139de 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.access;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.cluster.access.ABIVersion.BORON;
+import static org.opendaylight.controller.cluster.access.ABIVersion.POTASSIUM;
 import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_FUTURE_VERSION;
 import static org.opendaylight.controller.cluster.access.ABIVersion.TEST_PAST_VERSION;
 
@@ -22,25 +23,25 @@ public class ABIVersionTest {
     @Test
     public void testInvalidVersions() {
         assertTrue(TEST_PAST_VERSION.compareTo(TEST_FUTURE_VERSION) < 0);
-        assertTrue(TEST_PAST_VERSION.compareTo(BORON) < 0);
-        assertTrue(TEST_FUTURE_VERSION.compareTo(BORON) > 0);
+        assertTrue(TEST_PAST_VERSION.compareTo(POTASSIUM) < 0);
+        assertTrue(TEST_FUTURE_VERSION.compareTo(POTASSIUM) > 0);
     }
 
     @Test
-    public void testBoronVersion() throws Exception {
-        assertEquals((short)5, BORON.shortValue());
-        assertEquals(BORON, ABIVersion.valueOf(BORON.shortValue()));
-        assertEquals(BORON, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(BORON))));
+    public void testMagnesiumVersion() throws Exception {
+        assertEquals((short)10, POTASSIUM.shortValue());
+        assertEquals(POTASSIUM, ABIVersion.valueOf(POTASSIUM.shortValue()));
+        assertEquals(POTASSIUM, ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(POTASSIUM))));
     }
 
-    @Test(expected = PastVersionException.class)
-    public void testInvalidPastVersion() throws Exception {
-        ABIVersion.valueOf(TEST_PAST_VERSION.shortValue());
+    @Test
+    public void testInvalidPastVersion() {
+        assertThrows(PastVersionException.class, () -> ABIVersion.valueOf(TEST_PAST_VERSION.shortValue()));
     }
 
-    @Test(expected = FutureVersionException.class)
-    public void testInvalidFutureVersion() throws Exception {
-        ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue());
+    @Test
+    public void testInvalidFutureVersion() {
+        assertThrows(FutureVersionException.class, () -> ABIVersion.valueOf(TEST_FUTURE_VERSION.shortValue()));
     }
 
     private static byte[] writeVersion(final ABIVersion version) {
@@ -49,8 +50,9 @@ public class ABIVersionTest {
         return bado.toByteArray();
     }
 
-    @Test(expected = IOException.class)
-    public void testBadRead() throws IOException {
-        ABIVersion.readFrom(ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION)));
+    @Test
+    public void testBadRead() {
+        final var in = ByteStreams.newDataInput(writeVersion(TEST_PAST_VERSION));
+        assertThrows(IOException.class, () -> ABIVersion.readFrom(in));
     }
 }
index 60e7dc83884c15f7bce09bf280307c54bee28ffa..48465208e21a27007fbe9512cfd1c63a200e213e 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -25,14 +26,12 @@ public class AbortLocalTransactionRequestTest
 
     private static final AbortLocalTransactionRequest OBJECT = new AbortLocalTransactionRequest(TRANSACTION, ACTOR_REF);
 
-    @Override
-    protected AbortLocalTransactionRequest object() {
-        return OBJECT;
+    public AbortLocalTransactionRequestTest() {
+        super(OBJECT);
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof AbortLocalTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((AbortLocalTransactionRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final AbortLocalTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index e40a39450b78c7b5462cae45b2cf9407c369a9cc..1cb9af38a01330d74bccf98e667d7771db275542 100644 (file)
@@ -7,24 +7,34 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public abstract class AbstractLocalTransactionRequestTest<T extends AbstractLocalTransactionRequest<T>>
         extends AbstractTransactionRequestTest<T> {
-    @Override
-    protected abstract T object();
+    protected AbstractLocalTransactionRequestTest(final T object) {
+        super(object, -1);
+    }
 
     @Test
     public void cloneAsVersionTest() {
-        Assert.assertEquals(object(), object().cloneAsVersion(ABIVersion.BORON));
+        assertSame(object(), object().cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION));
     }
 
     @Override
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void serializationTest() {
-        SerializationUtils.clone(object());
+        final var ex = assertThrows(UnsupportedOperationException.class, () -> SerializationUtils.clone(object()));
+        assertThat(ex.getMessage(), allOf(
+            startsWith("Local transaction request "),
+            endsWith(" should never be serialized")));
     }
 }
index c5cfd2831979f2cab943907c01242d4ed48813ec..5ae72198880b0c7c31f854b9ddca6e2e3d32daa1 100644 (file)
@@ -7,32 +7,36 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 public abstract class AbstractReadTransactionRequestTest<T extends AbstractReadPathTransactionRequest<T>>
         extends AbstractTransactionRequestTest<T> {
-    protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.EMPTY;
+    protected static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
     protected static final boolean SNAPSHOT_ONLY = true;
 
-    @Override
-    protected abstract T object();
+    protected AbstractReadTransactionRequestTest(final T object, final int baseSize) {
+        super(object, baseSize);
+    }
 
     @Test
     public void getPathTest() {
-        Assert.assertEquals(PATH, object().getPath());
+        assertEquals(PATH, object().getPath());
     }
 
     @Test
     public void isSnapshotOnlyTest() {
-        Assert.assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
+        assertEquals(SNAPSHOT_ONLY, object().isSnapshotOnly());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
-        Assert.assertTrue(result.toString().contains("path=" + PATH));
+        final var result = object().addToStringAttributes(MoreObjects.toStringHelper(object())).toString();
+        assertThat(result, containsString("path=" + PATH));
     }
 }
index ccdc6753ac640600d972d4591ba8246814ae96d8..78456b246afaf9014776b047ac33dfec48952bed 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -29,26 +32,36 @@ public abstract class AbstractRequestFailureTest<T extends RequestFailure<?, T>>
     protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
             HISTORY_IDENTIFIER, 0);
     protected static final RequestException CAUSE = new RuntimeRequestException("fail", new Throwable());
+    private static final int CAUSE_SIZE = SerializationUtils.serialize(CAUSE).length;
+
+    private final T object;
+    private final int expectedSize;
 
-    abstract T object();
+    protected AbstractRequestFailureTest(final T object, final int baseSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = baseSize + CAUSE_SIZE;
+    }
 
     @Test
     public void getCauseTest() {
-        Assert.assertEquals(CAUSE, object().getCause());
+        assertEquals(CAUSE, object.getCause());
     }
 
     @Test
     public void isHardFailureTest() {
-        Assert.assertTrue(object().isHardFailure());
+        assertTrue(object.isHardFailure());
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final var bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+
+        @SuppressWarnings("unchecked")
+        final var deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
     }
 }
index 8a812522336588517faff0a1a490196a1c46c077..b0038758c787e75033d67ddd72d339603e9a44fc 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -18,25 +21,34 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 
 public abstract class AbstractRequestSuccessTest<T extends RequestSuccess<?, T>> {
-
     private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
             MemberName.forName("test"), FrontendType.forName("one"));
     protected static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0);
-    protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(
-            CLIENT_IDENTIFIER, 0);
+    protected static final LocalHistoryIdentifier HISTORY_IDENTIFIER = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0);
+
+    private final @NonNull T object;
+    private final int expectedSize;
 
-    protected abstract T object();
+    protected AbstractRequestSuccessTest(final T object, final int expectedSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = expectedSize;
+    }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final var bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+
+        @SuppressWarnings("unchecked")
+        final var deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
         doAdditionalAssertions(deserialize);
     }
 
-    protected abstract void doAdditionalAssertions(Object deserialize);
+    protected void doAdditionalAssertions(final T deserialize) {
+        // No-op by default
+    }
 }
index 58d24e4e53838a2c19b674a9251d32fd86cfa245..f276ac3937763895d0c13ec74fb0b7ded97ad985 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -15,7 +16,6 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -29,14 +29,14 @@ public abstract class AbstractTransactionRequestTest<T extends TransactionReques
     protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
             HISTORY_IDENTIFIER, 0);
 
-    @Override
-    protected abstract T object();
+    protected AbstractTransactionRequestTest(final T object, final int baseSize) {
+        super(object, baseSize);
+    }
 
     @Test
     public void toRequestFailureTest() {
-        final Throwable cause = new Throwable();
-        final RequestException exception = new RuntimeRequestException("fail", cause);
-        final TransactionFailure failure = object().toRequestFailure(exception);
-        Assert.assertNotNull(failure);
+        final var exception = new RuntimeRequestException("fail", new Throwable());
+        final var failure = object().toRequestFailure(exception);
+        assertNotNull(failure);
     }
 }
index 08c9abb3af1d6905b904c74e03be822bde2e0cfd..4f00501750f55120599002bb63b3c03088928a0d 100644 (file)
@@ -11,8 +11,10 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 
 public abstract class AbstractTransactionSuccessTest<T extends TransactionSuccess<T>>
         extends AbstractRequestSuccessTest<T> {
+    protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_IDENTIFIER,
+        0);
 
-    protected static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(
-            HISTORY_IDENTIFIER, 0);
-
+    protected AbstractTransactionSuccessTest(final T object, final int expectedSize) {
+        super(object, expectedSize);
+    }
 }
index 61b7dc2ad40b2e95ca6587b3ecc8789b11b4ca3a..f1df2d882f80132a138b7061feeb7f7b050d2789 100644 (file)
@@ -7,8 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -17,7 +20,7 @@ import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public class CommitLocalTransactionRequestTest
         extends AbstractLocalTransactionRequestTest<CommitLocalTransactionRequest> {
@@ -30,34 +33,32 @@ public class CommitLocalTransactionRequestTest
     private static final DataTreeModification MODIFICATION = Mockito.mock(DataTreeModification.class);
     private static final boolean COORDINATED = true;
 
-    private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(
-            TRANSACTION, 0, ACTOR_REF, MODIFICATION, null, COORDINATED);
+    private static final CommitLocalTransactionRequest OBJECT = new CommitLocalTransactionRequest(TRANSACTION, 0,
+        ACTOR_REF, MODIFICATION, null, COORDINATED);
 
-    @Override
-    protected CommitLocalTransactionRequest object() {
-        return OBJECT;
+    public CommitLocalTransactionRequestTest() {
+        super(OBJECT);
     }
 
     @Test
     public void getModificationTest() {
-        Assert.assertEquals(MODIFICATION, OBJECT.getModification());
+        assertEquals(MODIFICATION, OBJECT.getModification());
     }
 
     @Test
     public void isCoordinatedTest() {
-        Assert.assertEquals(COORDINATED, OBJECT.isCoordinated());
+        assertEquals(COORDINATED, OBJECT.isCoordinated());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("coordinated=" + COORDINATED));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("coordinated=" + COORDINATED));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof CommitLocalTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((CommitLocalTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getModification(), ((CommitLocalTransactionRequest) deserialize).getModification());
+    protected void doAdditionalAssertions(final CommitLocalTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getModification(), deserialize.getModification());
     }
 }
\ No newline at end of file
index 6518102fa144189992ec263174de9e12aba8f12b..2278195d826574f8061390fcdf5cfa623bd2ad2e 100644 (file)
@@ -7,23 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ConnectClientFailureTest extends AbstractRequestFailureTest<ConnectClientFailure> {
     private static final ConnectClientFailure OBJECT = new ConnectClientFailure(CLIENT_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    ConnectClientFailure object() {
-        return OBJECT;
+    public ConnectClientFailureTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
         final ConnectClientFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT.getTarget(), clone.getTarget());
-        Assert.assertEquals(OBJECT.getSequence(), clone.getSequence());
-        Assert.assertEquals(OBJECT.getCause(), clone.getCause());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index 64e4717e85aa15595968560f04089b9a5951779a..3bf1951e5037bd4d1e1978b69ef3f5a597f809f7 100644 (file)
@@ -7,9 +7,13 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableRangeSet;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.concepts.AbstractRequestTest;
@@ -17,7 +21,6 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
 public class ConnectClientRequestTest extends AbstractRequestTest<ConnectClientRequest> {
     private static final FrontendIdentifier FRONTEND_IDENTIFIER = FrontendIdentifier.create(
@@ -30,49 +33,45 @@ public class ConnectClientRequestTest extends AbstractRequestTest<ConnectClientR
     private static final ConnectClientRequest OBJECT = new ConnectClientRequest(
             CLIENT_IDENTIFIER, 0, ACTOR_REF, MIN_VERSION, MAX_VERSION);
 
-    @Override
-    protected ConnectClientRequest object() {
-        return OBJECT;
+    public ConnectClientRequestTest() {
+        super(OBJECT, 112);
     }
 
     @Test
     public void getMinVersionTest() {
-        Assert.assertEquals(MIN_VERSION, OBJECT.getMinVersion());
+        assertEquals(MIN_VERSION, OBJECT.getMinVersion());
     }
 
     @Test
     public void getMaxVersionTest() {
-        Assert.assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
+        assertEquals(MAX_VERSION, OBJECT.getMaxVersion());
     }
 
     @Test
     public void toRequestFailureTest() {
-        final RequestException exception = new DeadTransactionException(ImmutableRangeSet.of());
-        final ConnectClientFailure failure = OBJECT.toRequestFailure(exception);
-        Assert.assertNotNull(failure);
+        final var exception = new DeadTransactionException(ImmutableRangeSet.of());
+        final var failure = OBJECT.toRequestFailure(exception);
+        assertNotNull(failure);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ConnectClientRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertNotNull(clone);
-        Assert.assertEquals(ABIVersion.BORON, clone.getVersion());
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertNotNull(clone);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("minVersion=" + MIN_VERSION));
-        Assert.assertTrue(result.toString().contains("maxVersion=" + MAX_VERSION));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("minVersion=" + MIN_VERSION));
+        assertThat(result, containsString("maxVersion=" + MAX_VERSION));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ConnectClientRequest);
-        final ConnectClientRequest casted = (ConnectClientRequest) deserialize;
-
-        Assert.assertEquals(OBJECT.getMaxVersion(), casted.getMaxVersion());
-        Assert.assertEquals(OBJECT.getMinVersion(), casted.getMinVersion());
-        Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
+    protected void doAdditionalAssertions(final ConnectClientRequest deserialize) {
+        assertEquals(OBJECT.getMaxVersion(), deserialize.getMaxVersion());
+        assertEquals(OBJECT.getMinVersion(), deserialize.getMinVersion());
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 1dfe3c301bcba2b67470fddb57562cef9e44dc2c..0267b8eb273191155f499920700ab2b90523db06 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
@@ -15,19 +18,17 @@ import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<ConnectClientSuccess> {
-
     private static final DataTree TREE = new InMemoryDataTreeFactory().create(
         DataTreeConfiguration.DEFAULT_OPERATIONAL);
     private static final ActorSystem SYSTEM = ActorSystem.create("test");
@@ -35,12 +36,11 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
     private static final ActorSelection ACTOR_SELECTION =  ActorSelection.apply(ACTOR_REF, "foo");
     private static final List<ActorSelection> ALTERNATES = ImmutableList.of(ACTOR_SELECTION);
     private static final int MAX_MESSAGES = 10;
-    private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(
-            CLIENT_IDENTIFIER, 0, ACTOR_REF, ALTERNATES, TREE, MAX_MESSAGES);
+    private static final ConnectClientSuccess OBJECT = new ConnectClientSuccess(CLIENT_IDENTIFIER, 0, ACTOR_REF,
+        ALTERNATES, TREE, MAX_MESSAGES);
 
-    @Override
-    protected ConnectClientSuccess object() {
-        return OBJECT;
+    public ConnectClientSuccessTest() {
+        super(OBJECT, 146 + ACTOR_REF.path().toSerializationFormat().length());
     }
 
     @Before
@@ -50,32 +50,36 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
 
     @Test
     public void testGetAlternates() {
-        final Collection<ActorSelection> alternates = OBJECT.getAlternates();
-        Assert.assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
+        final var alternates = OBJECT.getAlternates();
+        assertArrayEquals(ALTERNATES.toArray(), alternates.toArray());
     }
 
     @Test
     public void testGetBackend() {
         final ActorRef actorRef = OBJECT.getBackend();
-        Assert.assertEquals(ACTOR_REF, actorRef);
+        assertEquals(ACTOR_REF, actorRef);
     }
 
     @Test
     public void testGetDataTree() {
-        final DataTree tree = OBJECT.getDataTree().get();
-        Assert.assertEquals(TREE, tree);
+        final ReadOnlyDataTree tree = OBJECT.getDataTree().orElseThrow();
+        assertEquals(TREE, tree);
     }
 
     @Test
     public void testGetMaxMessages() {
-        final int maxMessages = OBJECT.getMaxMessages();
-        Assert.assertEquals(MAX_MESSAGES, maxMessages);
+        assertEquals(MAX_MESSAGES, OBJECT.getMaxMessages());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ConnectClientSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getAlternates(), clone.getAlternates());
+        assertEquals(OBJECT.getBackend(), clone.getBackend());
+        assertEquals(OBJECT.getDataTree(), clone.getDataTree());
+        assertEquals(OBJECT.getMaxMessages(), clone.getMaxMessages());
     }
 
     @Test
@@ -85,11 +89,10 @@ public class ConnectClientSuccessTest extends AbstractRequestSuccessTest<Connect
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ConnectClientSuccess);
-        Assert.assertEquals(OBJECT.getAlternates().size(), ((ConnectClientSuccess) deserialize).getAlternates().size());
-        Assert.assertEquals(OBJECT.getBackend(), ((ConnectClientSuccess) deserialize).getBackend());
-        Assert.assertEquals(Optional.empty(), ((ConnectClientSuccess) deserialize).getDataTree());
-        Assert.assertEquals(OBJECT.getMaxMessages(), ((ConnectClientSuccess) deserialize).getMaxMessages());
+    protected void doAdditionalAssertions(final ConnectClientSuccess deserialize) {
+        assertEquals(OBJECT.getAlternates().size(), deserialize.getAlternates().size());
+        assertEquals(OBJECT.getBackend(), deserialize.getBackend());
+        assertEquals(Optional.empty(), deserialize.getDataTree());
+        assertEquals(OBJECT.getMaxMessages(), deserialize.getMaxMessages());
     }
 }
index 2682c9df5982f7cd56d80e3bd2def73968f5e76b..f3e8aa0db6f91a751f328bdbd0104aef364b2581 100644 (file)
@@ -7,32 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ExistsTransactionRequestTest extends AbstractReadTransactionRequestTest<ExistsTransactionRequest> {
-    private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+    private static final ExistsTransactionRequest OBJECT = new ExistsTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, PATH, SNAPSHOT_ONLY);
 
-    @Override
-    protected ExistsTransactionRequest object() {
-        return OBJECT;
+    public ExistsTransactionRequestTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
-        final ExistsTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
-        Assert.assertEquals(cloneVersion, clone.getVersion());
-        Assert.assertEquals(OBJECT.getPath(), clone.getPath());
-        Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+        final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+        final var clone = OBJECT.cloneAsVersion(cloneVersion);
+        assertEquals(cloneVersion, clone.getVersion());
+        assertEquals(OBJECT.getPath(), clone.getPath());
+        assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ExistsTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((ExistsTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getPath(), ((ExistsTransactionRequest) deserialize).getPath());
+    protected void doAdditionalAssertions(final ExistsTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPath(), deserialize.getPath());
     }
 }
\ No newline at end of file
index e5a74a63854811c9a5db5db20c37bcf38342c720..e8ce28dedb69441f9a53710005c5bcfcddb13348 100644 (file)
@@ -7,43 +7,45 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ExistsTransactionSuccessTest extends AbstractTransactionSuccessTest<ExistsTransactionSuccess> {
     private static final boolean EXISTS = true;
 
-    private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, EXISTS);
+    private static final ExistsTransactionSuccess OBJECT = new ExistsTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        EXISTS);
 
-    @Override
-    protected ExistsTransactionSuccess object() {
-        return OBJECT;
+    public ExistsTransactionSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void getExistsTest() {
-        final boolean result = OBJECT.getExists();
-        Assert.assertEquals(EXISTS, result);
+        assertEquals(EXISTS, OBJECT.getExists());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ExistsTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getExists(), clone.getExists());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("exists=" + EXISTS));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("exists=" + EXISTS));
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ExistsTransactionSuccess);
-        Assert.assertEquals(OBJECT.getExists(), ((ExistsTransactionSuccess) deserialize).getExists());
+    protected void doAdditionalAssertions(final ExistsTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getExists(), deserialize.getExists());
     }
 }
\ No newline at end of file
index c9d76f9b3d4591947978cf2f27e4bd8361285f98..13b9d6e8a19f0957bb875327e966e55c4908dad8 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class LocalHistoryFailureTest extends AbstractRequestFailureTest<LocalHistoryFailure> {
     private static final LocalHistoryFailure OBJECT = new LocalHistoryFailure(HISTORY_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    LocalHistoryFailure object() {
-        return OBJECT;
+    public LocalHistoryFailureTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final LocalHistoryFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index 635384304b41d2ee2fc59fb05de410533e743029..8afca0072c75c7451d7a982202b24e3316252ea5 100644 (file)
@@ -7,30 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class LocalHistorySuccessTest extends AbstractRequestSuccessTest<LocalHistorySuccess> {
+    private static final LocalHistorySuccess OBJECT = new LocalHistorySuccess(HISTORY_IDENTIFIER, 0);
 
-    private static final LocalHistorySuccess OBJECT = new LocalHistorySuccess(
-            HISTORY_IDENTIFIER, 0);
-
-    @Override
-    protected LocalHistorySuccess object() {
-        return OBJECT;
+    public LocalHistorySuccessTest() {
+        super(OBJECT, 96);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final LocalHistorySuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT.getSequence(), clone.getSequence());
-        Assert.assertEquals(OBJECT.getTarget(), clone.getTarget());
-        Assert.assertEquals(OBJECT.getVersion(), clone.getVersion());
-    }
-
-    @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof LocalHistorySuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
index a546e2819454a781e608073ec726e5bcf8a625b0..e424f37cf69061e26c85d1a6fa1bba067316f7b9 100644 (file)
@@ -8,12 +8,12 @@
 package org.opendaylight.controller.cluster.access.commands;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.testkit.TestActors;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -24,11 +24,11 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ModifyTransactionRequestBuilderTest {
-
     private final MemberName memberName = MemberName.forName("member-1");
     private final FrontendType frontendType = FrontendType.forName("test");
     private final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, frontendType);
@@ -36,10 +36,11 @@ public class ModifyTransactionRequestBuilderTest {
     private final TransactionIdentifier transactionIdentifier =
             new TransactionIdentifier(new LocalHistoryIdentifier(clientId, 0L), 0L);
     private final ActorRef actorRef = ActorSystem.create("test").actorOf(Props.create(TestActors.EchoActor.class));
-    private final NormalizedNode<?, ?> node = Builders.containerBuilder().withNodeIdentifier(
-            YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private final NormalizedNode node = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
     private final TransactionModification transactionModification =
-            new TransactionWrite(YangInstanceIdentifier.EMPTY, node);
+            new TransactionWrite(YangInstanceIdentifier.of(), node);
     private final ModifyTransactionRequestBuilder modifyTransactionRequestBuilder =
             new ModifyTransactionRequestBuilder(transactionIdentifier, actorRef);
 
@@ -52,38 +53,37 @@ public class ModifyTransactionRequestBuilderTest {
 
     @Test
     public void testGetIdentifier() {
-        final TransactionIdentifier identifier = modifyTransactionRequestBuilder.getIdentifier();
-        Assert.assertEquals(transactionIdentifier, identifier);
+        final var identifier = modifyTransactionRequestBuilder.getIdentifier();
+        assertEquals(transactionIdentifier, identifier);
     }
 
     @Test
     public void testBuildReady() {
         modifyTransactionRequestBuilder.setReady();
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        Assert.assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().get());
-        Assert.assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0));
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.READY, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
+        assertEquals(transactionModification, modifyTransactionRequest.getModifications().get(0));
     }
 
     @Test
     public void testBuildAbort() {
         modifyTransactionRequestBuilder.setAbort();
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        Assert.assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().get());
-        Assert.assertTrue(modifyTransactionRequest.getModifications().isEmpty());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.ABORT, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
+        assertTrue(modifyTransactionRequest.getModifications().isEmpty());
     }
 
     @Test
     public void testBuildCommitTrue() {
         modifyTransactionRequestBuilder.setCommit(true);
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        Assert.assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.THREE_PHASE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
     }
 
     @Test
     public void testBuildCommitFalse() {
         modifyTransactionRequestBuilder.setCommit(false);
-        final ModifyTransactionRequest modifyTransactionRequest = modifyTransactionRequestBuilder.build();
-        Assert.assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().get());
+        final var modifyTransactionRequest = modifyTransactionRequestBuilder.build();
+        assertEquals(PersistenceProtocol.SIMPLE, modifyTransactionRequest.getPersistenceProtocol().orElseThrow());
     }
-
 }
index 3f284e8fc4069ab7911d0368fa01d866528572f6..45ee0bd2584b5f5096d55bdab40410e26f94ceb6 100644 (file)
@@ -7,59 +7,55 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import com.google.common.base.MoreObjects;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ModifyTransactionRequestEmptyTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
     private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
+    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, List.of(), PROTOCOL);
 
-    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, new ArrayList<>(), PROTOCOL);
-
-    @Override
-    protected ModifyTransactionRequest object() {
-        return OBJECT;
+    public ModifyTransactionRequestEmptyTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void getPersistenceProtocolTest() {
-        final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
-        Assert.assertTrue(result.isPresent());
-        Assert.assertEquals(PROTOCOL, result.get());
+        assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
     }
 
     @Test
     public void getModificationsTest() {
-        final List<TransactionModification> result = OBJECT.getModifications();
-        Assert.assertNotNull(result);
-        Assert.assertTrue(result.isEmpty());
+        assertEquals(List.of(), OBJECT.getModifications());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("modifications=0"));
-        Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("modifications=0"));
+        assertThat(result, containsString("protocol=" + PROTOCOL));
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+        assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ModifyTransactionRequest);
-        final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
-        Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
-        Assert.assertEquals(OBJECT.getModifications(), casted.getModifications());
-        Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
+    protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getModifications(), deserialize.getModifications());
+        assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
     }
 }
\ No newline at end of file
index 8aef2747576781162e77b38294ff967340df2fd4..e3039d319ef219b919f9d5bd6dc42466dc217a33 100644 (file)
@@ -7,76 +7,75 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.opendaylight.controller.cluster.access.commands.TransactionModification.TYPE_WRITE;
 
 import com.google.common.base.MoreObjects;
-import com.google.common.collect.Lists;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ModifyTransactionRequestTest extends AbstractTransactionRequestTest<ModifyTransactionRequest> {
-    private static final NormalizedNode<?, ?> NODE = Builders.containerBuilder().withNodeIdentifier(
-            YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
 
-    private static final List<TransactionModification> MODIFICATIONS = Lists.newArrayList(
-            new TransactionWrite(YangInstanceIdentifier.EMPTY, NODE));
+    private static final List<TransactionModification> MODIFICATIONS = List.of(
+            new TransactionWrite(YangInstanceIdentifier.of(), NODE));
 
     private static final PersistenceProtocol PROTOCOL = PersistenceProtocol.ABORT;
 
-    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, MODIFICATIONS, PROTOCOL);
+    private static final ModifyTransactionRequest OBJECT = new ModifyTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, MODIFICATIONS, PROTOCOL);
 
-    @Override
-    protected ModifyTransactionRequest object() {
-        return OBJECT;
+    public ModifyTransactionRequestTest() {
+        super(OBJECT, 140);
     }
 
     @Test
     public void getPersistenceProtocolTest() {
-        final Optional<PersistenceProtocol> result = OBJECT.getPersistenceProtocol();
-        Assert.assertTrue(result.isPresent());
-        Assert.assertEquals(PROTOCOL, result.get());
+        assertEquals(Optional.of(PROTOCOL), OBJECT.getPersistenceProtocol());
     }
 
     @Test
     public void getModificationsTest() {
-        final List<TransactionModification> result = OBJECT.getModifications();
-        Assert.assertNotNull(result);
-        Assert.assertEquals(MODIFICATIONS, result);
+        assertEquals(MODIFICATIONS, OBJECT.getModifications());
     }
 
     @Test
     public void addToStringAttributesTest() {
-        final MoreObjects.ToStringHelper result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT));
-        Assert.assertTrue(result.toString().contains("modifications=1"));
-        Assert.assertTrue(result.toString().contains("protocol=" + PROTOCOL));
+        final var result = OBJECT.addToStringAttributes(MoreObjects.toStringHelper(OBJECT)).toString();
+        assertThat(result, containsString("modifications=1"));
+        assertThat(result, containsString("protocol=" + PROTOCOL));
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+        assertEquals(OBJECT.getModifications(), clone.getModifications());
+        assertEquals(OBJECT.getPersistenceProtocol(), clone.getPersistenceProtocol());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ModifyTransactionRequest);
-        final ModifyTransactionRequest casted = (ModifyTransactionRequest) deserialize;
-
-        Assert.assertEquals(OBJECT.getReplyTo(), casted.getReplyTo());
-        Assert.assertEquals(OBJECT.getPersistenceProtocol(), casted.getPersistenceProtocol());
-
-        Assert.assertNotNull(casted.getModifications());
-        Assert.assertEquals(1, casted.getModifications().size());
-        final TransactionModification modification = casted.getModifications().get(0);
-        Assert.assertEquals(YangInstanceIdentifier.EMPTY, modification.getPath());
-        Assert.assertEquals(TYPE_WRITE, modification.getType());
+    protected void doAdditionalAssertions(final ModifyTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPersistenceProtocol(), deserialize.getPersistenceProtocol());
+        assertNotNull(deserialize.getModifications());
+        assertEquals(1, deserialize.getModifications().size());
+        final var modification = deserialize.getModifications().get(0);
+        assertEquals(YangInstanceIdentifier.of(), modification.getPath());
+        assertEquals(TYPE_WRITE, modification.getType());
     }
-}
\ No newline at end of file
+}
index 3756a8a2525825d0f47cae778d0713d497a394a0..8fb470d765e6fbe47b2a4c9c53f8dc82240265e5 100644 (file)
@@ -7,29 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ModifyTransactionSuccessTest extends AbstractTransactionSuccessTest<ModifyTransactionSuccess> {
-    private static final ModifyTransactionSuccess OBJECT = new ModifyTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final ModifyTransactionSuccess OBJECT = new ModifyTransactionSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected ModifyTransactionSuccess object() {
-        return OBJECT;
+    public ModifyTransactionSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ModifyTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT.getVersion(), clone.getVersion());
-        Assert.assertEquals(OBJECT.getSequence(), clone.getSequence());
-        Assert.assertEquals(OBJECT.getTarget(), clone.getTarget());
-    }
-
-    @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ModifyTransactionSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(ABIVersion.TEST_FUTURE_VERSION, clone.getVersion());
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index c4096833ca282f88c19b97791c8bb3986f1cb9b7..10f4f0eb7834208e21c51d344b73c5db06cb6a95 100644 (file)
@@ -7,32 +7,31 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class ReadTransactionRequestTest extends AbstractReadTransactionRequestTest<ReadTransactionRequest> {
-    private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF, PATH, SNAPSHOT_ONLY);
+    private static final ReadTransactionRequest OBJECT = new ReadTransactionRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, PATH, SNAPSHOT_ONLY);
 
-    @Override
-    protected ReadTransactionRequest object() {
-        return OBJECT;
+    public ReadTransactionRequestTest() {
+        super(OBJECT, 108);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ABIVersion cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
-        final ReadTransactionRequest clone = OBJECT.cloneAsVersion(cloneVersion);
-        Assert.assertEquals(cloneVersion, clone.getVersion());
-        Assert.assertEquals(OBJECT.getPath(), clone.getPath());
-        Assert.assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
+        final var cloneVersion = ABIVersion.TEST_FUTURE_VERSION;
+        final var clone = OBJECT.cloneAsVersion(cloneVersion);
+        assertEquals(cloneVersion, clone.getVersion());
+        assertEquals(OBJECT.getPath(), clone.getPath());
+        assertEquals(OBJECT.isSnapshotOnly(), clone.isSnapshotOnly());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ReadTransactionRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((ReadTransactionRequest) deserialize).getReplyTo());
-        Assert.assertEquals(OBJECT.getPath(), ((ReadTransactionRequest) deserialize).getPath());
+    protected void doAdditionalAssertions(final ReadTransactionRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+        assertEquals(OBJECT.getPath(), deserialize.getPath());
     }
 }
\ No newline at end of file
index 0a56e822a332fc9aa3adf241f5edef7902f8449a..055b6f5c88e94522a923621fef0398d0f7958190 100644 (file)
@@ -7,36 +7,35 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 public class ReadTransactionSuccessNoDataTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
-    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, Optional.empty());
+    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        Optional.empty());
 
-    @Override
-    protected ReadTransactionSuccess object() {
-        return OBJECT;
+    public ReadTransactionSuccessNoDataTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void getDataTest() {
-        final Optional<NormalizedNode<?, ?>> result = OBJECT.getData();
-        Assert.assertFalse(result.isPresent());
+        assertEquals(Optional.empty(), OBJECT.getData());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getData(), clone.getData());
     }
 
     @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ReadTransactionSuccess);
-        Assert.assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+    protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getData(), deserialize.getData());
     }
 }
index a50dc5d50cd7c19c999fea7a920ea0abaf879375..4557897ae5a52b5c69a19f4be451bca7d1466b25 100644 (file)
@@ -7,43 +7,43 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ReadTransactionSuccessTest extends AbstractTransactionSuccessTest<ReadTransactionSuccess> {
-    private static final NormalizedNode<?, ?> NODE = Builders.containerBuilder().withNodeIdentifier(
-            YangInstanceIdentifier.NodeIdentifier.create(QName.create("namespace", "localName"))).build();
+    private static final ContainerNode NODE = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+        .build();
 
-    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(
-            TRANSACTION_IDENTIFIER, 0, Optional.of(NODE));
+    private static final ReadTransactionSuccess OBJECT = new ReadTransactionSuccess(TRANSACTION_IDENTIFIER, 0,
+        Optional.of(NODE));
 
-    @Override
-    protected ReadTransactionSuccess object() {
-        return OBJECT;
+    public ReadTransactionSuccessTest() {
+        super(OBJECT, 129);
     }
 
     @Test
     public void getDataTest() {
-        final Optional<NormalizedNode<?, ?>> result = OBJECT.getData();
-        Assert.assertTrue(result.isPresent());
-        Assert.assertEquals(NODE.getValue(), result.get().getValue());
+        assertEquals(Optional.of(NODE), OBJECT.getData());
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final ReadTransactionSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getData(), clone.getData());
     }
 
     @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof ReadTransactionSuccess);
-        Assert.assertEquals(OBJECT.getData(), ((ReadTransactionSuccess) deserialize).getData());
+    protected void doAdditionalAssertions(final ReadTransactionSuccess deserialize) {
+        assertEquals(OBJECT.getData(), deserialize.getData());
     }
 }
diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsRequestTest.java
new file mode 100644 (file)
index 0000000..27b30d9
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.primitives.UnsignedLong;
+import java.util.List;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+
+public class SkipTransactionsRequestTest extends AbstractTransactionRequestTest<SkipTransactionsRequest> {
+    private static final SkipTransactionsRequest OBJECT = new SkipTransactionsRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF, List.of(UnsignedLong.ONE));
+
+    public SkipTransactionsRequestTest() {
+        super(OBJECT, 109);
+    }
+
+    @Test
+    public void cloneAsVersionTest() {
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
+    }
+
+    @Override
+    protected void doAdditionalAssertions(final SkipTransactionsRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java b/opendaylight/md-sal/cds-access-api/src/test/java/org/opendaylight/controller/cluster/access/commands/SkipTransactionsResponseTest.java
new file mode 100644 (file)
index 0000000..3ff798d
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.access.commands;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+import org.opendaylight.controller.cluster.access.ABIVersion;
+
+public class SkipTransactionsResponseTest extends AbstractTransactionSuccessTest<SkipTransactionsResponse> {
+    private static final SkipTransactionsResponse OBJECT = new SkipTransactionsResponse(TRANSACTION_IDENTIFIER, 0);
+
+    public SkipTransactionsResponseTest() {
+        super(OBJECT, 98);
+    }
+
+    @Test
+    public void cloneAsVersionTest() {
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+    }
+}
\ No newline at end of file
index 9c7dbf11d729bdc97eb05f64a23258e8b3bf1c9b..c0e1ae8e1f221c563fa7292ea6c6a3cc381acfb3 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionAbortRequestTest extends AbstractTransactionRequestTest<TransactionAbortRequest> {
-    private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionAbortRequest OBJECT = new TransactionAbortRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionAbortRequest object() {
-        return OBJECT;
+    public TransactionAbortRequestTest() {
+        super(OBJECT, 101);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionAbortRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionAbortRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionAbortRequest)deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionAbortRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 31959aaf3e18022be887cfb9f645ad1d6f730b25..1ceab66a9557810ac0c460da5ad8a0f23ebd53f6 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionAbortSuccessTest extends AbstractTransactionSuccessTest<TransactionAbortSuccess> {
-    private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionAbortSuccess OBJECT = new TransactionAbortSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionAbortSuccess object() {
-        return OBJECT;
+    public TransactionAbortSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionAbortSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionAbortSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index e8995a9e24737a3f35a9f99407725d40122e2048..ee84907d5413d246e2ecca90f99e138aabdb8a88 100644 (file)
@@ -7,27 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionCanCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCanCommitSuccess> {
-    private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionCanCommitSuccess OBJECT = new TransactionCanCommitSuccess(TRANSACTION_IDENTIFIER,
+        0);
 
-    @Override
-    protected TransactionCanCommitSuccess object() {
-        return OBJECT;
+    public TransactionCanCommitSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionCanCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionCanCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index 9db578d2b6d82e2f444c44da9812716e57196ff2..ca1f8f8dd920d13ef7db101ea78ef1e4415f052a 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionCommitSuccess> {
-    private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionCommitSuccess OBJECT = new TransactionCommitSuccess(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionCommitSuccess object() {
-        return OBJECT;
+    public TransactionCommitSuccessTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
-}
\ No newline at end of file
+}
index 26f1a379bbef7cf1e89f38e321fee0831f606a6f..a5b3401a7f92aabacecdee947646f46ee683e26e 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionDoCommitRequestTest extends AbstractTransactionRequestTest<TransactionDoCommitRequest> {
-    private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionDoCommitRequest OBJECT = new TransactionDoCommitRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionDoCommitRequest object() {
-        return OBJECT;
+    public TransactionDoCommitRequestTest() {
+        super(OBJECT, 102);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionDoCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionDoCommitRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionDoCommitRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionDoCommitRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 9e8467e5ca3425d381d0d03777f3c24a97316bf1..7e027ea2d396e8f8525e7e2aac95b8e951e9cee6 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionFailureTest extends AbstractRequestFailureTest<TransactionFailure> {
     private static final TransactionFailure OBJECT = new TransactionFailure(TRANSACTION_IDENTIFIER, 0, CAUSE);
 
-    @Override
-    TransactionFailure object() {
-        return OBJECT;
+    public TransactionFailureTest() {
+        super(OBJECT, 100);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionFailure clone = OBJECT.cloneAsVersion(ABIVersion.current());
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getCause(), clone.getCause());
     }
 }
\ No newline at end of file
index ceac8606b87b55ed9063f08e350e35aef68ad612..21605372c2d2074ac94c2e9c86cda77ebe9539dd 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPreCommitRequestTest extends AbstractTransactionRequestTest<TransactionPreCommitRequest> {
-    private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionPreCommitRequest OBJECT = new TransactionPreCommitRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionPreCommitRequest object() {
-        return OBJECT;
+    public TransactionPreCommitRequestTest() {
+        super(OBJECT, 102);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPreCommitRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPreCommitRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPreCommitRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionPreCommitRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index 0130ea06ab8c515297854604452c64d73f29016b..5f8f29f45078da8c01c3265ade5c2cd52a4db1d8 100644 (file)
@@ -7,27 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPreCommitSuccessTest extends AbstractTransactionSuccessTest<TransactionPreCommitSuccess> {
-    private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionPreCommitSuccess OBJECT = new TransactionPreCommitSuccess(TRANSACTION_IDENTIFIER,
+        0);
 
-    @Override
-    protected TransactionPreCommitSuccess object() {
-        return OBJECT;
+    public TransactionPreCommitSuccessTest() {
+        super(OBJECT, 99);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPreCommitSuccess clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPreCommitSuccess);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index 5ae9f26dbf5605aa6239a8eea8099979086378f5..7453f4461a5f87146c45f398e113142db9c56f4e 100644 (file)
@@ -7,28 +7,29 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPurgeRequestTest extends AbstractTransactionRequestTest<TransactionPurgeRequest> {
-    private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(
-            TRANSACTION_IDENTIFIER, 0, ACTOR_REF);
+    private static final TransactionPurgeRequest OBJECT = new TransactionPurgeRequest(TRANSACTION_IDENTIFIER, 0,
+        ACTOR_REF);
 
-    @Override
-    protected TransactionPurgeRequest object() {
-        return OBJECT;
+    public TransactionPurgeRequestTest() {
+        super(OBJECT, 101);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPurgeRequest clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
+        assertEquals(OBJECT.getReplyTo(), clone.getReplyTo());
     }
 
     @Override
-    protected void doAdditionalAssertions(final Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPurgeRequest);
-        Assert.assertEquals(OBJECT.getReplyTo(), ((TransactionPurgeRequest) deserialize).getReplyTo());
+    protected void doAdditionalAssertions(final TransactionPurgeRequest deserialize) {
+        assertEquals(OBJECT.getReplyTo(), deserialize.getReplyTo());
     }
 }
\ No newline at end of file
index bef9ae140bde60e9d8b13f5704e6b6964da743cf..e8b4294d5e828e3560a62af0603d3d22a10ffcd0 100644 (file)
@@ -7,27 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.commands;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 public class TransactionPurgeResponseTest extends AbstractTransactionSuccessTest<TransactionPurgeResponse> {
-    private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(
-            TRANSACTION_IDENTIFIER, 0);
+    private static final TransactionPurgeResponse OBJECT = new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, 0);
 
-    @Override
-    protected TransactionPurgeResponse object() {
-        return OBJECT;
+    public TransactionPurgeResponseTest() {
+        super(OBJECT, 98);
     }
 
     @Test
     public void cloneAsVersionTest() {
-        final TransactionPurgeResponse clone = OBJECT.cloneAsVersion(ABIVersion.BORON);
-        Assert.assertEquals(OBJECT, clone);
-    }
-
-    @Override
-    protected void doAdditionalAssertions(Object deserialize) {
-        Assert.assertTrue(deserialize instanceof TransactionPurgeResponse);
+        final var clone = OBJECT.cloneAsVersion(ABIVersion.TEST_FUTURE_VERSION);
+        assertEquals(OBJECT.getSequence(), clone.getSequence());
+        assertEquals(OBJECT.getTarget(), clone.getTarget());
     }
 }
\ No newline at end of file
index f0f5d3d26c56a7c557bd74e21dd294b620feaada..fc34fc39787e8b1d4e21e8c4840616960ca23a20 100644 (file)
@@ -7,12 +7,18 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import static java.util.Objects.requireNonNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public abstract class AbstractEnvelopeTest<E extends Envelope<?>> {
+    protected record EnvelopeDetails<E extends Envelope<?>>(E envelope, int expectedSize) {
+        // Nothing else
+    }
+
     private static final FrontendIdentifier FRONTEND =
             new FrontendIdentifier(MemberName.forName("test"), FrontendIdentifierTest.ONE_FRONTEND_TYPE);
     private static final ClientIdentifier CLIENT = new ClientIdentifier(FRONTEND, 0);
@@ -20,33 +26,37 @@ public abstract class AbstractEnvelopeTest<E extends Envelope<?>> {
     protected static final TransactionIdentifier OBJECT = new TransactionIdentifier(HISTORY, 0);
 
     private E envelope;
+    private int expectedSize;
 
     @Before
     public void setUp() throws Exception {
-        envelope = createEnvelope();
+        final var details = createEnvelope();
+        envelope = requireNonNull(details.envelope);
+        expectedSize = details.expectedSize;
     }
 
     @Test
     public void testProxySerializationDeserialization() {
         final byte[] serializedBytes = SerializationUtils.serialize(envelope);
-        final Object deserialize = SerializationUtils.deserialize(serializedBytes);
-        checkDeserialized((E) deserialize);
+        assertEquals(expectedSize, serializedBytes.length);
+        @SuppressWarnings("unchecked")
+        final E deserialize = (E) SerializationUtils.deserialize(serializedBytes);
+        checkDeserialized(deserialize);
     }
 
     private void checkDeserialized(final E deserializedEnvelope) {
-        Assert.assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
-        Assert.assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
-        final Message<?, ?> expectedMessage = envelope.getMessage();
-        final Message<?, ?> actualMessage = deserializedEnvelope.getMessage();
-        Assert.assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
-        Assert.assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
-        Assert.assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
-        Assert.assertEquals(expectedMessage.getClass(), actualMessage.getClass());
+        assertEquals(envelope.getSessionId(), deserializedEnvelope.getSessionId());
+        assertEquals(envelope.getTxSequence(), deserializedEnvelope.getTxSequence());
+        final var expectedMessage = envelope.getMessage();
+        final var actualMessage = deserializedEnvelope.getMessage();
+        assertEquals(expectedMessage.getSequence(), actualMessage.getSequence());
+        assertEquals(expectedMessage.getTarget(), actualMessage.getTarget());
+        assertEquals(expectedMessage.getVersion(), actualMessage.getVersion());
+        assertEquals(expectedMessage.getClass(), actualMessage.getClass());
         doAdditionalAssertions(envelope, deserializedEnvelope);
     }
 
-    protected abstract E createEnvelope();
+    protected abstract EnvelopeDetails<E> createEnvelope();
 
-    @SuppressWarnings("checkstyle:hiddenField")
     protected abstract void doAdditionalAssertions(E envelope, E resolvedObject);
 }
index 080e1e46b4854a74615e1ac8d9a08bd3066cb01c..74cd4cf3ba182d2b3ee3c805dba0b33991589f25 100644 (file)
@@ -26,6 +26,8 @@ public abstract class AbstractIdentifierTest<T extends Identifier> {
 
     abstract T equalObject();
 
+    abstract int expectedSize();
+
     @Test
     public final void testEquals() {
         assertTrue(object().equals(object()));
@@ -40,22 +42,25 @@ public abstract class AbstractIdentifierTest<T extends Identifier> {
         assertEquals(object().hashCode(), equalObject().hashCode());
     }
 
+    @Test
+    public final void testSerialization() throws Exception {
+        assertTrue(object().equals(copy(object())));
+        assertTrue(object().equals(copy(equalObject())));
+        assertFalse(differentObject().equals(copy(object())));
+    }
+
     @SuppressWarnings("unchecked")
-    private static <T> T copy(T obj) throws IOException, ClassNotFoundException {
+    private T copy(final T obj) throws IOException, ClassNotFoundException {
         final ByteArrayOutputStream bos = new ByteArrayOutputStream();
         try (ObjectOutputStream oos = new ObjectOutputStream(bos)) {
             oos.writeObject(obj);
         }
 
-        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) {
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(expectedSize(), bytes.length);
+
+        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
             return (T) ois.readObject();
         }
     }
-
-    @Test
-    public final void testSerialization() throws Exception {
-        assertTrue(object().equals(copy(object())));
-        assertTrue(object().equals(copy(equalObject())));
-        assertFalse(differentObject().equals(copy(object())));
-    }
 }
index c65c1956c2a9ee1c12dae42760cbc41db6223665..48ceabef81cd87259df989c49ac9cf917350c6cb 100644 (file)
@@ -7,22 +7,37 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static java.util.Objects.requireNonNull;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import com.google.common.base.MoreObjects;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 
 public abstract class AbstractRequestTest<T extends Request<?, T>> {
     private static final ActorSystem SYSTEM = ActorSystem.create("test");
     protected static final ActorRef ACTOR_REF = TestProbe.apply(SYSTEM).ref();
+    private static final int ACTOR_REF_SIZE = ACTOR_REF.path().toSerializationFormat().length();
+
+    private final T object;
+    private final int expectedSize;
 
-    protected abstract T object();
+    protected AbstractRequestTest(final T object, final int baseSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = baseSize + ACTOR_REF_SIZE;
+    }
+
+    protected final T object() {
+        return object;
+    }
 
     @Before
     public void setUp() {
@@ -31,25 +46,27 @@ public abstract class AbstractRequestTest<T extends Request<?, T>> {
 
     @Test
     public void getReplyToTest() {
-        Assert.assertEquals(ACTOR_REF, object().getReplyTo());
+        assertEquals(ACTOR_REF, object.getReplyTo());
     }
 
     @Test
     public void addToStringAttributesCommonTest() {
-        final MoreObjects.ToStringHelper result = object().addToStringAttributes(MoreObjects.toStringHelper(object()));
-        Assert.assertTrue(result.toString().contains("replyTo=" + ACTOR_REF));
+        final var result = object.addToStringAttributes(MoreObjects.toStringHelper(object));
+        assertThat(result.toString(), containsString("replyTo=" + ACTOR_REF));
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void serializationTest() {
-        final Object deserialize = SerializationUtils.clone(object());
+        final byte[] bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+        @SuppressWarnings("unchecked")
+        final T deserialize = (T) SerializationUtils.deserialize(bytes);
 
-        Assert.assertEquals(object().getTarget(), ((T) deserialize).getTarget());
-        Assert.assertEquals(object().getVersion(), ((T) deserialize).getVersion());
-        Assert.assertEquals(object().getSequence(), ((T) deserialize).getSequence());
+        assertEquals(object.getTarget(), deserialize.getTarget());
+        assertEquals(object.getVersion(), deserialize.getVersion());
+        assertEquals(object.getSequence(), deserialize.getSequence());
         doAdditionalAssertions(deserialize);
     }
 
-    protected abstract void doAdditionalAssertions(Object deserialize);
+    protected abstract void doAdditionalAssertions(T deserialize);
 }
index 4871def180b7a63d529d9b4facb8f4c554dde7cf..d9bd5c126b87704243fb56282f8f84e0a9e1b7cf 100644 (file)
@@ -29,4 +29,9 @@ public class ClientIdentifierTest extends AbstractIdentifierTest<ClientIdentifie
     ClientIdentifier equalObject() {
         return EQUAL_OBJECT;
     }
+
+    @Override
+    int expectedSize() {
+        return 94;
+    }
 }
index 733c3c7525041ee316e74264172df73976b39d4c..70132a6c05d0d2bb4da906d3358d5cae6512701b 100644 (file)
@@ -7,32 +7,39 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 
 import java.io.DataInput;
 import java.io.IOException;
+import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
-
     @Override
-    protected FailureEnvelope createEnvelope() {
-        final RequestFailure<?, ?> message =
-                new MockFailure(OBJECT, new RuntimeRequestException("msg", new RuntimeException()), 42);
-        return new FailureEnvelope(message, 1L, 2L, 11L);
+    protected EnvelopeDetails<FailureEnvelope> createEnvelope() {
+        final var cause = new RuntimeRequestException("msg", new RuntimeException());
+        final int causeSize = SerializationUtils.serialize(cause).length;
+        return new EnvelopeDetails<>(new FailureEnvelope(new MockFailure(OBJECT, cause, 42), 1L, 2L, 11L),
+            causeSize + 216);
     }
 
     @Override
     protected void doAdditionalAssertions(final FailureEnvelope envelope, final FailureEnvelope resolvedObject) {
         assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
-        final RequestException expectedCause = envelope.getMessage().getCause();
-        final RequestException actualCause = resolvedObject.getMessage().getCause();
+        final var expectedCause = envelope.getMessage().getCause();
+        final var actualCause = resolvedObject.getMessage().getCause();
         assertEquals(expectedCause.getMessage(), actualCause.getMessage());
         assertEquals(expectedCause.isRetriable(), actualCause.isRetriable());
     }
 
-    private static class MockRequestFailureProxy extends AbstractRequestFailureProxy<WritableIdentifier, MockFailure> {
+    private static class MockRequestFailureProxy implements RequestFailure.SerialForm<WritableIdentifier, MockFailure> {
+        @java.io.Serial
+        private static final long serialVersionUID = 5015515628523887221L;
+
+        private MockFailure message;
 
         @SuppressWarnings("checkstyle:RedundantModifier")
         public MockRequestFailureProxy() {
@@ -40,23 +47,38 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
         }
 
         private MockRequestFailureProxy(final MockFailure mockFailure) {
-            super(mockFailure);
+            message = requireNonNull(mockFailure);
         }
 
         @Override
-        protected MockFailure createFailure(final WritableIdentifier target, final long sequence,
-                                            final RequestException failureCause) {
+        public MockFailure createFailure(final WritableIdentifier target, final long sequence,
+                final RequestException failureCause) {
             return new MockFailure(target, failureCause, sequence);
         }
 
         @Override
-        protected WritableIdentifier readTarget(final DataInput in) throws IOException {
+        public WritableIdentifier readTarget(final DataInput in) throws IOException {
             return TransactionIdentifier.readFrom(in);
         }
 
+        @Override
+        public MockFailure message() {
+            return verifyNotNull(message);
+        }
+
+        @Override
+        public void setMessage(final MockFailure message) {
+            this.message = requireNonNull(message);
+        }
+
+        @Override
+        public Object readResolve() {
+            return message();
+        }
     }
 
     private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         MockFailure(final WritableIdentifier target, final RequestException cause, final long sequence) {
@@ -64,7 +86,7 @@ public class FailureEnvelopeTest extends AbstractEnvelopeTest<FailureEnvelope> {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
+        protected RequestFailure.SerialForm<WritableIdentifier, MockFailure> externalizableProxy(
                 final ABIVersion version) {
             return new MockRequestFailureProxy(this);
         }
index cc56fc1101b3efecba6dbfa0285bd207b4734761..203ffd5ab90a8c4b59589914eed8bbec180d051f 100644 (file)
@@ -30,4 +30,9 @@ public class FrontendIdentifierTest extends AbstractIdentifierTest<FrontendIdent
     FrontendIdentifier equalObject() {
         return EQUAL_OBJECT;
     }
+
+    @Override
+    int expectedSize() {
+        return 93;
+    }
 }
index e14bf766a244add8ff171ebf37c2b3da59ed227d..904a27f2e51721f5f0af0e5ed520ba06c0ee46e3 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -31,14 +33,21 @@ public class FrontendTypeTest extends AbstractIdentifierTest<FrontendType> {
         return FrontendType.forName("type-1");
     }
 
+    @Override
+    int expectedSize() {
+        return 88;
+    }
+
     @Test
     public void testWriteToReadFrom() throws Exception {
         final FrontendType type = FrontendType.forName("type");
         final ByteArrayOutputStream baos = new ByteArrayOutputStream();
         final DataOutputStream dos = new DataOutputStream(baos);
         type.writeTo(dos);
-        final FrontendType read =
-                FrontendType.readFrom(new DataInputStream(new ByteArrayInputStream(baos.toByteArray())));
+
+        final byte[] bytes = baos.toByteArray();
+        assertEquals(8, bytes.length);
+        final FrontendType read = FrontendType.readFrom(new DataInputStream(new ByteArrayInputStream(bytes)));
         Assert.assertEquals(type, read);
     }
 
index 159896b9f4fc7cd09262702adad91de3ef39a834..dc53bb3a76dfc31e62333323acdd52c1b91f6fd2 100644 (file)
@@ -30,4 +30,9 @@ public class LocalHistoryIdentifierTest extends AbstractIdentifierTest<LocalHist
     LocalHistoryIdentifier equalObject() {
         return EQUAL_OBJECT;
     }
+
+    @Override
+    int expectedSize() {
+        return 95;
+    }
 }
index 2c377e5efd14d32ad08b038d66854db58adc7963..469916c68acc019de1606e7ca2d9883a98169400 100644 (file)
@@ -32,6 +32,10 @@ public class MemberNameTest extends AbstractIdentifierTest<MemberName> {
         return EQUAL_OBJECT;
     }
 
+    @Override
+    int expectedSize() {
+        return 87;
+    }
 
     @Test
     public void testCompareTo() {
index b63dc4c78c741d514a097d2c7bf01b2eb7000166..30366c99f1a52a9e1167eb24e93e68e9de58c796 100644 (file)
@@ -7,19 +7,21 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
 
 public class RequestEnvelopeTest extends AbstractEnvelopeTest<RequestEnvelope> {
-
     private ActorSystem system;
     private ActorRef replyTo;
     private TestProbe replyToProbe;
@@ -33,27 +35,29 @@ public class RequestEnvelopeTest extends AbstractEnvelopeTest<RequestEnvelope> {
     }
 
     @Override
-    protected RequestEnvelope createEnvelope() {
+    protected EnvelopeDetails<RequestEnvelope> createEnvelope() {
         replyToProbe = new TestProbe(system);
         replyTo = replyToProbe.ref();
-        final TransactionPurgeRequest message = new TransactionPurgeRequest(OBJECT, 2L, replyTo);
-        return new RequestEnvelope(message, 1L, 2L);
+        final int refSize = replyTo.path().toSerializationFormat().length();
+
+        return new EnvelopeDetails<>(new RequestEnvelope(new TransactionPurgeRequest(OBJECT, 2L, replyTo), 1L, 2L),
+            refSize + 179);
     }
 
     @Override
     protected void doAdditionalAssertions(final RequestEnvelope envelope, final RequestEnvelope resolvedObject) {
         final Request<?, ?> actual = resolvedObject.getMessage();
-        Assert.assertTrue(actual instanceof TransactionPurgeRequest);
-        final TransactionPurgeRequest purgeRequest = (TransactionPurgeRequest) actual;
-        Assert.assertEquals(replyTo, purgeRequest.getReplyTo());
-        final TransactionPurgeResponse response = new TransactionPurgeResponse(OBJECT, 2L);
+        assertThat(actual, instanceOf(TransactionPurgeRequest.class));
+        final var purgeRequest = (TransactionPurgeRequest) actual;
+        assertEquals(replyTo, purgeRequest.getReplyTo());
+        final var response = new TransactionPurgeResponse(OBJECT, 2L);
         resolvedObject.sendSuccess(response, 11L);
-        final SuccessEnvelope successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
-        Assert.assertEquals(response, successEnvelope.getMessage());
-        final RuntimeRequestException failResponse = new RuntimeRequestException("fail", new RuntimeException());
+        final var successEnvelope = replyToProbe.expectMsgClass(SuccessEnvelope.class);
+        assertEquals(response, successEnvelope.getMessage());
+        final var failResponse = new RuntimeRequestException("fail", new RuntimeException());
         resolvedObject.sendFailure(failResponse, 11L);
-        final FailureEnvelope failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
-        Assert.assertEquals(failResponse, failureEnvelope.getMessage().getCause());
+        final var failureEnvelope = replyToProbe.expectMsgClass(FailureEnvelope.class);
+        assertEquals(failResponse, failureEnvelope.getMessage().getCause());
     }
 
     @After
index 30d9e98636e50e036fccdc1791d923ea72d83d6d..9d1aa40b61f151cf38082e89a908692e995faed2 100644 (file)
@@ -7,20 +7,18 @@
  */
 package org.opendaylight.controller.cluster.access.concepts;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
 
 public class SuccessEnvelopeTest extends AbstractEnvelopeTest<SuccessEnvelope> {
-
     @Override
-    protected SuccessEnvelope createEnvelope() {
-        final RequestSuccess<?, ?> message = new TransactionAbortSuccess(OBJECT, 2L);
-        return new SuccessEnvelope(message, 1L, 2L, 11L);
+    protected EnvelopeDetails<SuccessEnvelope> createEnvelope() {
+        return new EnvelopeDetails<>(new SuccessEnvelope(new TransactionAbortSuccess(OBJECT, 2L), 1L, 2L, 11L), 180);
     }
 
     @Override
-    protected void doAdditionalAssertions(final SuccessEnvelope envelope,
-                                          final SuccessEnvelope resolvedObject) {
-        Assert.assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
+    protected void doAdditionalAssertions(final SuccessEnvelope envelope, final SuccessEnvelope resolvedObject) {
+        assertEquals(envelope.getExecutionTimeNanos(), resolvedObject.getExecutionTimeNanos());
     }
 }
\ No newline at end of file
index e1c281ac2bdf2089482c6774a1553877dcb1cc1c..4433c49a6ebda5f959a4714a01a23616dace3608 100644 (file)
@@ -31,4 +31,9 @@ public class TransactionIdentifierTest extends AbstractIdentifierTest<Transactio
     TransactionIdentifier equalObject() {
         return EQUAL_OBJECT;
     }
+
+    @Override
+    int expectedSize() {
+        return 96;
+    }
 }
index 0d1434c5b450b6482b220388cfd61b4d64605a63..78a4e73946a2b6d85f8b39940a1b8ccb5844d9a5 100644 (file)
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>cds-access-client</artifactId>
-    <version>1.6.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+           <groupId>com.google.guava</groupId>
+           <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>cds-access-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-actor_2.12</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
         </dependency>
-
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-api</artifactId>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.checkerframework</groupId>
+            <artifactId>checker-qual</artifactId>
+            <optional>true</optional>
         </dependency>
 
         <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
+            <scope>test</scope>
         </dependency>
         <dependency>
             <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.12</artifactId>
+            <artifactId>akka-testkit_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava-testlib</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
             <scope>test</scope>
         </dependency>
         <dependency>
-           <groupId>com.google.guava</groupId>
-           <artifactId>guava-testlib</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-impl</artifactId>
+            <scope>test</scope>
         </dependency>
         <dependency>
-          <groupId>org.opendaylight.controller</groupId>
-          <artifactId>sal-clustering-commons</artifactId>
-          <type>test-jar</type>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
+            <type>test-jar</type>
         </dependency>
     </dependencies>
 
     <build>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-jar-plugin</artifactId>
-          <executions>
-            <execution>
-              <goals>
-                <goal>test-jar</goal>
-              </goals>
-            </execution>
-          </executions>
-        </plugin>
-      </plugins>
+        <pluginManagement>
+            <plugins>
+                <plugin>
+                    <artifactId>maven-javadoc-plugin</artifactId>
+                    <version>3.1.1</version>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <configuration>
+                    <instructions>
+                        <Automatic-Module-Name>org.opendaylight.controller.cluster.access.client</Automatic-Module-Name>
+                    </instructions>
+                </configuration>
+            </plugin>
+            <plugin>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-source-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar-no-fork</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
     </build>
 
     <scm>
index 7b592fbdb2e0834391c45de165d92505f78cf697..98edb1d3419dbde6e97352c964f143f10558bfcb 100644 (file)
@@ -9,19 +9,15 @@ package org.opendaylight.controller.cluster.access.client;
 
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
-import akka.persistence.UntypedPersistentActor;
-import com.google.common.annotations.Beta;
+import akka.persistence.AbstractPersistentActor;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Frontend actor which takes care of persisting generations and creates an appropriate ClientIdentifier.
- *
- * @author Robert Varga
  */
-@Beta
-public abstract class AbstractClientActor extends UntypedPersistentActor {
+public abstract class AbstractClientActor extends AbstractPersistentActor {
     private static final Logger LOG = LoggerFactory.getLogger(AbstractClientActor.class);
     private AbstractClientActorBehavior<?> currentBehavior;
 
@@ -36,7 +32,7 @@ public abstract class AbstractClientActor extends UntypedPersistentActor {
     }
 
     @Override
-    public void postStop() {
+    public void postStop() throws Exception {
         if (currentBehavior != null) {
             currentBehavior.close();
         }
@@ -59,7 +55,16 @@ public abstract class AbstractClientActor extends UntypedPersistentActor {
     }
 
     @Override
-    public final void onReceiveCommand(final Object command) {
+    public Receive createReceive() {
+        return receiveBuilder().matchAny(this::onReceiveCommand).build();
+    }
+
+    @Override
+    public Receive createReceiveRecover() {
+        return receiveBuilder().matchAny(this::onReceiveRecover).build();
+    }
+
+    private void onReceiveCommand(final Object command) {
         if (command == null) {
             LOG.debug("{}: ignoring null command", persistenceId());
             return;
@@ -72,8 +77,7 @@ public abstract class AbstractClientActor extends UntypedPersistentActor {
         }
     }
 
-    @Override
-    public final void onReceiveRecover(final Object recover) {
+    private void onReceiveRecover(final Object recover) {
         switchBehavior(currentBehavior.onReceiveRecover(recover));
     }
 
index 4188a41fd5720894ffefc2351c2dd803d315352b..39ae396cff714c088c7a8405ee64480a4f9124b5 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.cluster.access.client;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
-import com.google.common.annotations.Beta;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 
@@ -18,10 +17,7 @@ import org.eclipse.jdt.annotation.Nullable;
  * Base behavior attached to {@link AbstractClientActor}.
  *
  * @param <C> Type of associated context
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class AbstractClientActorBehavior<C extends AbstractClientActorContext> implements AutoCloseable {
     private final @NonNull C context;
 
@@ -60,6 +56,7 @@ public abstract class AbstractClientActorBehavior<C extends AbstractClientActorC
 
     @Override
     public void close() {
+        // No-op
     }
 
     /**
index 03d4691cb44fe14eb05c33790632859c17d14b8f..f34760ec03c0057f2393d4a9504b6399de68ecb9 100644 (file)
@@ -15,14 +15,16 @@ import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Collection;
+import java.util.List;
 import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -36,11 +38,10 @@ import scala.concurrent.duration.FiniteDuration;
 /**
  * Base class for a connection to the backend. Responsible to queueing and dispatch of requests toward the backend.
  * Can be in three conceptual states: Connecting, Connected and Reconnecting, which are represented by public final
- * classes exposed from this package.
+ * classes exposed from this package. This class NOT thread-safe, not are its subclasses expected to be thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 public abstract class AbstractClientConnection<T extends BackendInfo> {
     private static final Logger LOG = LoggerFactory.getLogger(AbstractClientConnection.class);
 
@@ -75,11 +76,11 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     private static final long MAX_DELAY_NANOS = TimeUnit.SECONDS.toNanos(MAX_DELAY_SECONDS);
 
     private final Lock lock = new ReentrantLock();
-    private final ClientActorContext context;
-    @GuardedBy("lock")
-    private final TransmitQueue queue;
+    private final @NonNull ClientActorContext context;
     private final @NonNull Long cookie;
     private final String backendName;
+    @GuardedBy("lock")
+    private final TransmitQueue queue;
 
     @GuardedBy("lock")
     private boolean haveTimer;
@@ -94,12 +95,12 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     // Private constructor to avoid code duplication.
     private AbstractClientConnection(final AbstractClientConnection<T> oldConn, final TransmitQueue newQueue,
             final String backendName) {
-        this.context = requireNonNull(oldConn.context);
-        this.cookie = requireNonNull(oldConn.cookie);
+        context = oldConn.context;
+        cookie = oldConn.cookie;
         this.backendName = requireNonNull(backendName);
-        this.queue = requireNonNull(newQueue);
+        queue = requireNonNull(newQueue);
         // Will be updated in finishReplay if needed.
-        this.lastReceivedTicks = oldConn.lastReceivedTicks;
+        lastReceivedTicks = oldConn.lastReceivedTicks;
     }
 
     // This constructor is only to be called by ConnectingClientConnection constructor.
@@ -109,8 +110,8 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         this.context = requireNonNull(context);
         this.cookie = requireNonNull(cookie);
         this.backendName = requireNonNull(backendName);
-        this.queue = new TransmitQueue.Halted(queueDepth);
-        this.lastReceivedTicks = currentTime();
+        queue = new TransmitQueue.Halted(queueDepth);
+        lastReceivedTicks = currentTime();
     }
 
     // This constructor is only to be called (indirectly) by ReconnectingClientConnection constructor.
@@ -127,7 +128,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             requireNonNull(oldConn.context).messageSlicer()), newBackend.getName());
     }
 
-    public final ClientActorContext context() {
+    public final @NonNull ClientActorContext context() {
         return context;
     }
 
@@ -135,7 +136,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         return cookie;
     }
 
-    public final ActorRef localActor() {
+    public final @NonNull ActorRef localActor() {
         return context.self();
     }
 
@@ -164,7 +165,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
      *
      * <p>
      * Note that unlike {@link #sendRequest(Request, Consumer)}, this method does not exert backpressure, hence it
-     * should never be called from an application thread.
+     * should never be called from an application thread and serves mostly for moving requests between queues.
      *
      * @param request Request to send
      * @param callback Callback to invoke
@@ -198,7 +199,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         }
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     private void commonEnqueue(final ConnectionEntry entry, final long now) {
         final RequestException maybePoison = poisoned;
         if (maybePoison != null) {
@@ -223,7 +224,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         return queue.drain();
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     final void finishReplay(final ReconnectForwarder forwarder) {
         setForwarder(forwarder);
 
@@ -243,12 +244,12 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         lock.unlock();
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     final void setForwarder(final ReconnectForwarder forwarder) {
         queue.setForwarder(forwarder, currentTime());
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     abstract ClientActorBehavior<T> lockedReconnect(ClientActorBehavior<T> current,
             RequestException runtimeRequestException);
 
@@ -287,7 +288,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
      *
      * @param delay Delay, in nanoseconds
      */
-    @GuardedBy("lock")
+    @Holding("lock")
     private void scheduleTimer(final long delay) {
         if (haveTimer) {
             LOG.debug("{}: timer already scheduled on {}", context.persistenceId(), this);
@@ -317,9 +318,10 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
      */
     @VisibleForTesting
     final ClientActorBehavior<T> runTimer(final ClientActorBehavior<T> current) {
-        final Optional<Long> delay;
-
         lock.lock();
+
+        final List<ConnectionEntry> poisonEntries;
+        final NoProgressException poisonCause;
         try {
             haveTimer = false;
             final long now = currentTime();
@@ -329,41 +331,43 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             // The following line is only reliable when queue is not forwarding, but such state should not last long.
             // FIXME: BUG-8422: this may not be accurate w.r.t. replayed entries
             final long ticksSinceProgress = queue.ticksStalling(now);
-            if (ticksSinceProgress >= context.config().getNoProgressTimeout()) {
-                LOG.error("Queue {} has not seen progress in {} seconds, failing all requests", this,
-                    TimeUnit.NANOSECONDS.toSeconds(ticksSinceProgress));
+            if (ticksSinceProgress < context.config().getNoProgressTimeout()) {
+                // Requests are always scheduled in sequence, hence checking for timeout is relatively straightforward.
+                // Note we use also inquire about the delay, so we can re-schedule if needed, hence the unusual
+                // tri-state return convention.
+                final OptionalLong delay = lockedCheckTimeout(now);
+                if (delay == null) {
+                    // We have timed out. There is no point in scheduling a timer
+                    LOG.debug("{}: connection {} timed out", context.persistenceId(), this);
+                    return lockedReconnect(current, new RuntimeRequestException("Backend connection timed out",
+                        new TimeoutException()));
+                }
 
-                lockedPoison(new NoProgressException(ticksSinceProgress));
-                current.removeConnection(this);
-                return current;
-            }
+                if (delay.isPresent()) {
+                    // If there is new delay, schedule a timer
+                    scheduleTimer(delay.orElseThrow());
+                } else {
+                    LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this);
+                }
 
-            // Requests are always scheduled in sequence, hence checking for timeout is relatively straightforward.
-            // Note we use also inquire about the delay, so we can re-schedule if needed, hence the unusual tri-state
-            // return convention.
-            delay = lockedCheckTimeout(now);
-            if (delay == null) {
-                // We have timed out. There is no point in scheduling a timer
-                LOG.debug("{}: connection {} timed out", context.persistenceId(), this);
-                return lockedReconnect(current, new RuntimeRequestException("Backend connection timed out",
-                    new TimeoutException()));
+                return current;
             }
 
-            if (delay.isPresent()) {
-                // If there is new delay, schedule a timer
-                scheduleTimer(delay.get());
-            } else {
-                LOG.debug("{}: not scheduling timeout on {}", context.persistenceId(), this);
-            }
+            LOG.error("Queue {} has not seen progress in {} seconds, failing all requests", this,
+                TimeUnit.NANOSECONDS.toSeconds(ticksSinceProgress));
+            poisonCause = new NoProgressException(ticksSinceProgress);
+            poisonEntries = lockedPoison(poisonCause);
+            current.removeConnection(this);
         } finally {
             lock.unlock();
         }
 
+        poison(poisonEntries, poisonCause);
         return current;
     }
 
     @VisibleForTesting
-    final Optional<Long> checkTimeout(final long now) {
+    final OptionalLong checkTimeout(final long now) {
         lock.lock();
         try {
             return lockedCheckTimeout(now);
@@ -385,10 +389,10 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     @SuppressFBWarnings(value = "NP_OPTIONAL_RETURN_NULL",
             justification = "Returning null Optional is documented in the API contract.")
     @GuardedBy("lock")
-    private Optional<Long> lockedCheckTimeout(final long now) {
+    private OptionalLong lockedCheckTimeout(final long now) {
         if (queue.isEmpty()) {
             LOG.debug("{}: connection {} is empty", context.persistenceId(), this);
-            return Optional.empty();
+            return OptionalLong.empty();
         }
 
         final long backendSilentTicks = backendSilentTicks(now);
@@ -403,7 +407,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             final long beenOpen = now - head.getEnqueuedTicks();
             final long requestTimeout = context.config().getRequestTimeout();
             if (beenOpen < requestTimeout) {
-                return Optional.of(requestTimeout - beenOpen);
+                return OptionalLong.of(requestTimeout - beenOpen);
             }
 
             tasksTimedOut++;
@@ -418,7 +422,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
             queue.tryTransmit(now);
         }
 
-        return Optional.empty();
+        return OptionalLong.empty();
     }
 
     private void timeoutEntry(final ConnectionEntry entry, final long beenOpen) {
@@ -436,18 +440,31 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
     }
 
     final void poison(final RequestException cause) {
+        final List<ConnectionEntry> entries;
+
         lock.lock();
         try {
-            lockedPoison(cause);
+            entries = lockedPoison(cause);
         } finally {
             lock.unlock();
         }
+
+        poison(entries, cause);
     }
 
-    @GuardedBy("lock")
-    private void lockedPoison(final RequestException cause) {
+    // Do not hold any locks while calling this
+    private static void poison(final Collection<? extends ConnectionEntry> entries, final RequestException cause) {
+        for (ConnectionEntry e : entries) {
+            final Request<?, ?> request = e.getRequest();
+            LOG.trace("Poisoning request {}", request, cause);
+            e.complete(request.toRequestFailure(cause));
+        }
+    }
+
+    @Holding("lock")
+    private List<ConnectionEntry> lockedPoison(final RequestException cause) {
         poisoned = enrichPoison(cause);
-        queue.poison(cause);
+        return queue.poison();
     }
 
     RequestException enrichPoison(final RequestException ex) {
@@ -472,7 +489,7 @@ public abstract class AbstractClientConnection<T extends BackendInfo> {
         }
 
         if (maybeEntry.isPresent()) {
-            final TransmittedConnectionEntry entry = maybeEntry.get();
+            final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
             LOG.debug("Completing {} with {}", entry, envelope);
             entry.complete(envelope.getMessage());
         }
index 919aaf8bf86883f47736d8abcdbfb9178ab8bac4..965fad150df1fb8b091c2257bce122ae4bb3ce57 100644 (file)
@@ -9,7 +9,6 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import java.util.concurrent.TimeUnit;
-import javax.annotation.concurrent.NotThreadSafe;
 
 /**
  * A ProgressTracker subclass which uses {@code ticksWorkedPerClosedTask} to compute delays.
@@ -24,9 +23,11 @@ import javax.annotation.concurrent.NotThreadSafe;
  * <p>On the other hand, there is no delay when number of open tasks is half the limit or less,
  * in order to prevent backend from running out of tasks while there may be waiting frontend threads.
  *
+ * <p>
+ * This class is NOT thread-safe.
+ *
  * @author Vratko Polak
  */
-@NotThreadSafe
 final class AveragingProgressTracker extends ProgressTracker {
     private static final long DEFAULT_TICKS_PER_TASK = TimeUnit.MILLISECONDS.toNanos(500);
 
index 01aff95532b4bd228afed5fe025531a93fecc1a7..891de52fb5a48ecf9b132f7d55c80ced68b6b047 100644 (file)
@@ -7,10 +7,12 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 
 /**
@@ -33,10 +35,10 @@ public class BackendInfo {
 
     protected BackendInfo(final ActorRef actor, final String name, final long sessionId, final ABIVersion version,
             final int maxMessages) {
-        this.version = Preconditions.checkNotNull(version);
-        this.actor = Preconditions.checkNotNull(actor);
-        this.name = Preconditions.checkNotNull(name);
-        Preconditions.checkArgument(maxMessages > 0, "Maximum messages has to be positive, not %s", maxMessages);
+        this.version = requireNonNull(version);
+        this.actor = requireNonNull(actor);
+        this.name = requireNonNull(name);
+        checkArgument(maxMessages > 0, "Maximum messages has to be positive, not %s", maxMessages);
         this.maxMessages = maxMessages;
         this.sessionId = sessionId;
     }
index 53c6bd92b194902048ce55a17926c6ff085ec54a..3f8c11a9137ed3bc5cccda43b8a37f55d6f46f3d 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.access.client;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.annotations.Beta;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Verify;
 import java.util.Collection;
@@ -18,7 +17,7 @@ import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.NotLeaderException;
@@ -45,10 +44,7 @@ import scala.concurrent.duration.FiniteDuration;
 
 /**
  * A behavior, which handles messages sent to a {@link AbstractClientActor}.
- *
- * @author Robert Varga
  */
-@Beta
 public abstract class ClientActorBehavior<T extends BackendInfo> extends
         RecoveredClientActorBehavior<ClientActorContext> implements Identifiable<ClientIdentifier> {
     /**
@@ -152,12 +148,11 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
             return ((InternalCommand<T>) command).execute(this);
         }
 
-        if (command instanceof SuccessEnvelope) {
-            return onRequestSuccess((SuccessEnvelope) command);
+        if (command instanceof SuccessEnvelope successEnvelope) {
+            return onRequestSuccess(successEnvelope);
         }
-
-        if (command instanceof FailureEnvelope) {
-            return internalOnRequestFailure((FailureEnvelope) command);
+        if (command instanceof FailureEnvelope failureEnvelope) {
+            return internalOnRequestFailure(failureEnvelope);
         }
 
         if (MessageAssembler.isHandledMessage(command)) {
@@ -174,10 +169,10 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
     }
 
     private static long extractCookie(final Identifier id) {
-        if (id instanceof TransactionIdentifier) {
-            return ((TransactionIdentifier) id).getHistoryId().getCookie();
-        } else if (id instanceof LocalHistoryIdentifier) {
-            return ((LocalHistoryIdentifier) id).getCookie();
+        if (id instanceof TransactionIdentifier transactionId) {
+            return transactionId.getHistoryId().getCookie();
+        } else if (id instanceof LocalHistoryIdentifier historyId) {
+            return historyId.getCookie();
         } else {
             throw new IllegalArgumentException("Unhandled identifier " + id);
         }
@@ -215,7 +210,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
              * sessionId and if it does not match our current connection just ignore it.
              */
             final Optional<T> optBackend = conn.getBackendInfo();
-            if (optBackend.isPresent() && optBackend.get().getSessionId() != command.getSessionId()) {
+            if (optBackend.isPresent() && optBackend.orElseThrow().getSessionId() != command.getSessionId()) {
                 LOG.debug("{}: Mismatched current connection {} and envelope {}, ignoring response", persistenceId(),
                     conn, command);
                 return this;
@@ -301,7 +296,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
      * @param newConn New connection
      * @return ConnectionConnectCohort which will be used to complete the process of bringing the connection up.
      */
-    @GuardedBy("connectionsLock")
+    @Holding("connectionsLock")
     protected abstract @NonNull ConnectionConnectCohort connectionUp(@NonNull ConnectedClientConnection<T> newConn);
 
     private void backendConnectFinished(final Long shard, final AbstractClientConnection<T> oldConn,
@@ -327,8 +322,8 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
 
             LOG.error("{}: failed to resolve shard {}", persistenceId(), shard, failure);
             final RequestException cause;
-            if (failure instanceof RequestException) {
-                cause = (RequestException) failure;
+            if (failure instanceof RequestException requestException) {
+                cause = requestException;
             } else {
                 cause = new RuntimeRequestException("Failed to resolve shard " + shard, failure);
             }
@@ -420,7 +415,7 @@ public abstract class ClientActorBehavior<T extends BackendInfo> extends
 
         final Long shard = oldConn.cookie();
         LOG.info("{}: refreshing backend for shard {}", persistenceId(), shard);
-        resolver().refreshBackendInfo(shard, conn.getBackendInfo().get()).whenComplete(
+        resolver().refreshBackendInfo(shard, conn.getBackendInfo().orElseThrow()).whenComplete(
             (backend, failure) -> context().executeInActor(behavior -> {
                 backendConnectFinished(shard, conn, backend, failure);
                 return behavior;
index f85e8e244cd0884e687a589125ea62255930a521..abebf02197965fc7b46ccca446009ec208e8b655 100644 (file)
@@ -13,10 +13,8 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Cancellable;
 import akka.actor.Scheduler;
-import com.google.common.annotations.Beta;
 import com.google.common.base.Ticker;
 import java.util.concurrent.TimeUnit;
-import javax.annotation.concurrent.ThreadSafe;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
@@ -32,12 +30,8 @@ import scala.concurrent.duration.FiniteDuration;
  * <p>
  * Time-keeping in a client actor is based on monotonic time. The precision of this time can be expected to be the
  * same as {@link System#nanoTime()}, but it is not tied to that particular clock. Actor clock is exposed as
- * a {@link Ticker}, which can be obtained via {@link #ticker()}.
- *
- * @author Robert Varga
+ * a {@link Ticker}, which can be obtained via {@link #ticker()}. This class is thread-safe.
  */
-@Beta
-@ThreadSafe
 public class ClientActorContext extends AbstractClientActorContext implements Identifiable<ClientIdentifier> {
     private final ExecutionContext executionContext;
     private final ClientIdentifier identifier;
@@ -51,9 +45,9 @@ public class ClientActorContext extends AbstractClientActorContext implements Id
             final ClientIdentifier identifier, final ClientActorConfig config) {
         super(self, persistenceId);
         this.identifier = requireNonNull(identifier);
-        this.scheduler = requireNonNull(system).scheduler();
-        this.executionContext = system.dispatcher();
-        this.dispatchers = new Dispatchers(system.dispatchers());
+        scheduler = requireNonNull(system).scheduler();
+        executionContext = system.dispatcher();
+        dispatchers = new Dispatchers(system.dispatchers());
         this.config = requireNonNull(config);
 
         messageSlicer = MessageSlicer.builder().messageSliceSize(config.getMaximumMessageSliceSize())
index c540142157f328f05f78ebaf505a9b858911c021..8bcce85dd3e85f088016154f74f68513827bc5b2 100644 (file)
@@ -7,14 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
-@Beta
-@NotThreadSafe
+/**
+ * A connected connection.
+ *
+ * @param <T> Backend info type
+ */
 public final class ConnectedClientConnection<T extends BackendInfo> extends AbstractReceivingClientConnection<T> {
-
     ConnectedClientConnection(final AbstractClientConnection<T> oldConnection, final T newBackend) {
         super(oldConnection, newBackend);
     }
index 10159901064c6578f3c530ff00d16aa051c9bb53..445321b474cd725a03e3e41bf6dd688786e9de42 100644 (file)
@@ -7,11 +7,9 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 
-@Beta
 public final class ConnectingClientConnection<T extends BackendInfo> extends AbstractClientConnection<T> {
     /**
      * A wild estimate on how deep a queue should be. Without having knowledge of the remote actor we can only
index 8d769b273814e735691cd66206de5644549fd433..c5e47e76dd8e22b272af4c208250c96765cb83df 100644 (file)
@@ -7,10 +7,10 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
 import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.Response;
@@ -19,19 +19,16 @@ import org.opendaylight.yangtools.concepts.Immutable;
 /**
  * Single entry in a {@link AbstractClientConnection}. Tracks the request, the associated callback and time when
  * the request was first enqueued.
- *
- * @author Robert Varga
  */
-@Beta
 public class ConnectionEntry implements Immutable {
     private final Consumer<Response<?, ?>> callback;
     private final Request<?, ?> request;
     private final long enqueuedTicks;
 
     ConnectionEntry(final Request<?, ?> request, final Consumer<Response<?, ?>> callback, final long now) {
-        this.request = Preconditions.checkNotNull(request);
-        this.callback = Preconditions.checkNotNull(callback);
-        this.enqueuedTicks = now;
+        this.request = requireNonNull(request);
+        this.callback = requireNonNull(callback);
+        enqueuedTicks = now;
     }
 
     ConnectionEntry(final ConnectionEntry entry) {
index 4449dd2fb961e18908d5d0ecf6154e7824c000c9..b739a0bc59fbbe5e404933766f84f7b1d56b6136 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSystem;
 import akka.persistence.SnapshotSelectionCriteria;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 
 /**
@@ -22,7 +23,7 @@ final class InitialClientActorContext extends AbstractClientActorContext {
 
     InitialClientActorContext(final AbstractClientActor actor, final String persistenceId) {
         super(actor.self(), persistenceId);
-        this.actor = Preconditions.checkNotNull(actor);
+        this.actor = requireNonNull(actor);
     }
 
     void saveSnapshot(final ClientIdentifier snapshot) {
index 08bc05346b65d1ce53faa7d3e64e8f38fc6a5359..0917174b654a62895b02f148a5e88b74074815c2 100644 (file)
@@ -7,25 +7,35 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.StampedLock;
 
 /**
  * A lock implementation which allows users to perform optimistic reads and validate them in a fashion similar
  * to {@link StampedLock}. In case a read is contented with a write, the read side will throw
  * an {@link InversibleLockException}, which the caller can catch and use to wait for the write to resolve.
- *
- * @author Robert Varga
  */
-@Beta
 public final class InversibleLock {
-    private static final AtomicReferenceFieldUpdater<InversibleLock, CountDownLatch> LATCH_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(InversibleLock.class, CountDownLatch.class, "latch");
+    private static final VarHandle LATCH;
+
+    static {
+        try {
+            LATCH = MethodHandles.lookup().findVarHandle(InversibleLock.class, "latch", CountDownLatch.class);
+        } catch (NoSuchFieldException | IllegalAccessException e) {
+            throw new ExceptionInInitializerError(e);
+        }
+    }
 
     private final StampedLock lock = new StampedLock();
+
+    @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD",
+        justification = "https://github.com/spotbugs/spotbugs/issues/2749")
     private volatile CountDownLatch latch;
 
     /**
@@ -43,7 +53,7 @@ public final class InversibleLock {
 
             // Write-locked. Read the corresponding latch and if present report an exception, which will propagate
             // and force release of locks.
-            final CountDownLatch local = latch;
+            final var local = latch;
             if (local != null) {
                 throw new InversibleLockException(local);
             }
@@ -57,18 +67,13 @@ public final class InversibleLock {
     }
 
     public long writeLock() {
-        final CountDownLatch local = new CountDownLatch(1);
-        final boolean taken = LATCH_UPDATER.compareAndSet(this, null, local);
-        Verify.verify(taken);
-
+        verify(LATCH.compareAndSet(this, null, new CountDownLatch(1)));
         return lock.writeLock();
     }
 
     public void unlockWrite(final long stamp) {
-        final CountDownLatch local = LATCH_UPDATER.getAndSet(this, null);
-        Verify.verifyNotNull(local);
+        final var local = verifyNotNull((CountDownLatch) LATCH.getAndSet(this, null));
         lock.unlockWrite(stamp);
         local.countDown();
     }
-
 }
index 7ec18a265d20b412de826575fbc4c1c7670e4016..82b6568459a34cdedaf24f7ddb91d32cba61b348 100644 (file)
@@ -7,24 +7,23 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Serial;
 import java.util.concurrent.CountDownLatch;
 
 /**
  * Exception thrown from {@link InversibleLock#optimisticRead()} and can be used to wait for the racing write
  * to complete using {@link #awaitResolution()}.
- *
- * @author Robert Varga
  */
-@Beta
 public final class InversibleLockException extends RuntimeException {
+    @Serial
     private static final long serialVersionUID = 1L;
 
     private final transient CountDownLatch latch;
 
     InversibleLockException(final CountDownLatch latch) {
-        this.latch = Preconditions.checkNotNull(latch);
+        this.latch = requireNonNull(latch);
     }
 
     public void awaitResolution() {
index b4fb3264d18c7f7fd1946f3bc01b3c4774bd2195..677a57e770f6a3dd802d5ef22cddee268c58b0ec 100644 (file)
@@ -9,7 +9,6 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import com.google.common.base.Preconditions;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,7 +49,6 @@ import org.slf4j.LoggerFactory;
  * @author Vratko Polak
  */
 // TODO: Would bulk methods be less taxing than a loop of single task calls?
-@NotThreadSafe
 abstract class ProgressTracker {
     private static final Logger LOG = LoggerFactory.getLogger(ProgressTracker.class);
 
@@ -120,12 +118,13 @@ abstract class ProgressTracker {
      * @param now tick number corresponding to caller's present
      */
     ProgressTracker(final ProgressTracker oldTracker, final long now) {
-        this.defaultTicksPerTask = oldTracker.defaultTicksPerTask;
-        this.tasksEncountered = this.tasksClosed = oldTracker.tasksClosed;
-        this.lastClosed = oldTracker.lastClosed;
-        this.nearestAllowed = oldTracker.nearestAllowed;  // Call cancelDebt explicitly if needed.
-        this.lastIdle = oldTracker.lastIdle;
-        this.elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
+        defaultTicksPerTask = oldTracker.defaultTicksPerTask;
+        tasksEncountered = tasksClosed = oldTracker.tasksClosed;
+        lastClosed = oldTracker.lastClosed;
+        // Call cancelDebt explicitly if needed.
+        nearestAllowed = oldTracker.nearestAllowed;
+        lastIdle = oldTracker.lastIdle;
+        elapsedBeforeIdle = oldTracker.elapsedBeforeIdle;
         if (!oldTracker.isIdle()) {
             transitToIdle(now);
         }
@@ -156,7 +155,8 @@ abstract class ProgressTracker {
      *
      * @return number of tasks started but not finished yet
      */
-    final long tasksOpen() {  // TODO: Should we return int?
+    // TODO: Should we return int?
+    final long tasksOpen() {
         // TODO: Should we check the return value is non-negative?
         return tasksEncountered - tasksClosed;
     }
index 58334f91d79199dd713f7bf8f139800f999c4260..8217b72861ea6f9c58f4af47ea9f229c52f902ee 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
 
 /**
  * Forwarder class responsible for routing requests from the previous connection incarnation back to the originator,
@@ -20,7 +20,7 @@ public abstract class ReconnectForwarder {
     private final AbstractReceivingClientConnection<?> successor;
 
     protected ReconnectForwarder(final AbstractReceivingClientConnection<?> successor) {
-        this.successor = Preconditions.checkNotNull(successor);
+        this.successor = requireNonNull(successor);
     }
 
     protected final void sendToSuccessor(final ConnectionEntry entry) {
index acf876b6ce6eb3f85b9c7a2753f254aae878ddbf..e63e7253bcf9b66731e78dc5c698a9d1ab879215 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -26,7 +27,7 @@ public final class ReconnectingClientConnection<T extends BackendInfo> extends A
 
     ReconnectingClientConnection(final ConnectedClientConnection<T> oldConnection, final RequestException cause) {
         super(oldConnection);
-        this.cause = Preconditions.checkNotNull(cause);
+        this.cause = requireNonNull(cause);
     }
 
     @Override
@@ -38,7 +39,7 @@ public final class ReconnectingClientConnection<T extends BackendInfo> extends A
     @Override
     @SuppressWarnings("checkstyle:hiddenField")
     ClientActorBehavior<T> lockedReconnect(final ClientActorBehavior<T> current, final RequestException cause) {
-        this.cause = Preconditions.checkNotNull(cause);
+        this.cause = requireNonNull(cause);
         LOG.warn("Skipping reconnect of already-reconnecting connection {}", this);
         return current;
     }
index 5e2b455abc4f8f88e685d0aff9cea882d6fbb7f3..b44d54921d9de38b898a06633f6c5dec25b2c36e 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.persistence.RecoveryCompleted;
 import akka.persistence.SnapshotOffer;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.slf4j.Logger;
@@ -22,12 +23,20 @@ import org.slf4j.LoggerFactory;
  */
 final class RecoveringClientActorBehavior extends AbstractClientActorBehavior<InitialClientActorContext> {
     private static final Logger LOG = LoggerFactory.getLogger(RecoveringClientActorBehavior.class);
+
+    /*
+     * Base for the property name which overrides the initial generation when we fail to find anything from persistence.
+     * The actual property name has the frontend type name appended.
+     */
+    private static final String GENERATION_OVERRIDE_PROP_BASE =
+            "org.opendaylight.controller.cluster.access.client.initial.generation.";
+
     private final FrontendIdentifier currentFrontend;
     private ClientIdentifier lastId = null;
 
     RecoveringClientActorBehavior(final InitialClientActorContext context, final FrontendIdentifier frontendId) {
         super(context);
-        currentFrontend = Preconditions.checkNotNull(frontendId);
+        currentFrontend = requireNonNull(frontendId);
     }
 
     @Override
@@ -48,14 +57,14 @@ final class RecoveringClientActorBehavior extends AbstractClientActorBehavior<In
 
                 nextId = ClientIdentifier.create(currentFrontend, lastId.getGeneration() + 1);
             } else {
-                nextId = ClientIdentifier.create(currentFrontend, 0);
+                nextId = ClientIdentifier.create(currentFrontend, initialGeneration());
             }
 
             LOG.debug("{}: persisting new identifier {}", persistenceId(), nextId);
             context().saveSnapshot(nextId);
             return new SavingClientActorBehavior(context(), nextId);
-        } else if (recover instanceof SnapshotOffer) {
-            lastId = (ClientIdentifier) ((SnapshotOffer)recover).snapshot();
+        } else if (recover instanceof SnapshotOffer snapshotOffer) {
+            lastId = (ClientIdentifier) snapshotOffer.snapshot();
             LOG.debug("{}: recovered identifier {}", persistenceId(), lastId);
         } else {
             LOG.warn("{}: ignoring recovery message {}", persistenceId(), recover);
@@ -63,4 +72,25 @@ final class RecoveringClientActorBehavior extends AbstractClientActorBehavior<In
 
         return this;
     }
-}
\ No newline at end of file
+
+    private long initialGeneration() {
+        final String propName = GENERATION_OVERRIDE_PROP_BASE + currentFrontend.getClientType().getName();
+        final String propValue = System.getProperty(propName);
+        if (propValue == null) {
+            LOG.debug("{}: no initial generation override, starting from 0", persistenceId());
+            return 0;
+        }
+
+        final long ret;
+        try {
+            ret = Long.parseUnsignedLong(propValue);
+        } catch (NumberFormatException e) {
+            LOG.warn("{}: failed to parse initial generation override '{}', starting from 0", persistenceId(),
+                propValue, e);
+            return 0;
+        }
+
+        LOG.info("{}: initial generation set to {}", persistenceId(), ret);
+        return ret;
+    }
+}
index 8f6e991519d045234bcc6a7a3ba7784a5b5bb1fc..feca185812c7652c4ef84bb74d204723e9864ff4 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.persistence.DeleteSnapshotsFailure;
 import akka.persistence.DeleteSnapshotsSuccess;
 import akka.persistence.SaveSnapshotFailure;
 import akka.persistence.SaveSnapshotSuccess;
 import akka.persistence.SnapshotSelectionCriteria;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -28,26 +29,24 @@ final class SavingClientActorBehavior extends RecoveredClientActorBehavior<Initi
 
     SavingClientActorBehavior(final InitialClientActorContext context, final ClientIdentifier nextId) {
         super(context);
-        this.myId = Preconditions.checkNotNull(nextId);
+        myId = requireNonNull(nextId);
     }
 
     @Override
     AbstractClientActorBehavior<?> onReceiveCommand(final Object command) {
-        if (command instanceof SaveSnapshotFailure) {
-            LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) command).cause());
+        if (command instanceof SaveSnapshotFailure saveFailure) {
+            LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
             return null;
-        } else if (command instanceof SaveSnapshotSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), command);
-            SaveSnapshotSuccess saved = (SaveSnapshotSuccess)command;
-            context().deleteSnapshots(new SnapshotSelectionCriteria(saved.metadata().sequenceNr(),
+        } else if (command instanceof SaveSnapshotSuccess saved) {
+            LOG.debug("{}: got command: {}", persistenceId(), saved);
+            context().deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
                     saved.metadata().timestamp() - 1, 0L, 0L));
             return this;
-        } else if (command instanceof DeleteSnapshotsSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), command);
-        } else if (command instanceof DeleteSnapshotsFailure) {
+        } else if (command instanceof DeleteSnapshotsSuccess deleteSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+        } else if (command instanceof DeleteSnapshotsFailure deleteFailure) {
             // Not treating this as a fatal error.
-            LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
-                    ((DeleteSnapshotsFailure) command).cause());
+            LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
         } else {
             LOG.debug("{}: stashing command {}", persistenceId(), command);
             context().stash();
index 0313a72a8319fc107a967a2bfbb7c188b4a08ac4..cc3da1e4503118ed5a064ceeb60fd34c4fe9e12d 100644 (file)
@@ -7,21 +7,22 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayDeque;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Deque;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Optional;
 import java.util.Queue;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
-import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.ResponseEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
@@ -52,11 +53,8 @@ import org.slf4j.LoggerFactory;
  *
  * <p>
  * This class is not thread-safe, as it is expected to be guarded by {@link AbstractClientConnection}.
- *
- * @author Robert Varga
  */
-@NotThreadSafe
-abstract class TransmitQueue {
+abstract sealed class TransmitQueue {
     static final class Halted extends TransmitQueue {
         // For ConnectingClientConnection.
         Halted(final int targetDepth) {
@@ -79,7 +77,7 @@ abstract class TransmitQueue {
         }
 
         @Override
-        void preComplete(ResponseEnvelope<?> envelope) {
+        void preComplete(final ResponseEnvelope<?> envelope) {
         }
     }
 
@@ -95,8 +93,8 @@ abstract class TransmitQueue {
         Transmitting(final TransmitQueue oldQueue, final int targetDepth, final BackendInfo backend, final long now,
                 final MessageSlicer messageSlicer) {
             super(oldQueue, targetDepth, now);
-            this.backend = Preconditions.checkNotNull(backend);
-            this.messageSlicer = Preconditions.checkNotNull(messageSlicer);
+            this.backend = requireNonNull(backend);
+            this.messageSlicer = requireNonNull(messageSlicer);
         }
 
         @Override
@@ -135,7 +133,7 @@ abstract class TransmitQueue {
         }
 
         @Override
-        void preComplete(ResponseEnvelope<?> envelope) {
+        void preComplete(final ResponseEnvelope<?> envelope) {
             if (envelope.getTxSequence() == currentSlicedEnvSequenceId) {
                 // Slicing completed for the prior request - clear the cached sequence id field to enable subsequent
                 // requests to be transmitted.
@@ -148,7 +146,8 @@ abstract class TransmitQueue {
 
     private final Deque<TransmittedConnectionEntry> inflight = new ArrayDeque<>();
     private final Deque<ConnectionEntry> pending = new ArrayDeque<>();
-    private final AveragingProgressTracker tracker;  // Cannot be just ProgressTracker as we are inheriting limits.
+    // Cannot be just ProgressTracker as we are inheriting limits.
+    private final AveragingProgressTracker tracker;
     private ReconnectForwarder successor;
 
     /**
@@ -218,7 +217,7 @@ abstract class TransmitQueue {
             return Optional.empty();
         }
 
-        final TransmittedConnectionEntry entry = maybeEntry.get();
+        final TransmittedConnectionEntry entry = maybeEntry.orElseThrow();
         tracker.closeTask(now, entry.getEnqueuedTicks(), entry.getTxTicks(), envelope.getExecutionTimeNanos());
 
         // We have freed up a slot, try to transmit something
@@ -256,7 +255,7 @@ abstract class TransmitQueue {
             return false;
         }
 
-        inflight.addLast(maybeTransmitted.get());
+        inflight.addLast(maybeTransmitted.orElseThrow());
         return true;
     }
 
@@ -338,14 +337,18 @@ abstract class TransmitQueue {
         return pending.peek();
     }
 
-    final void poison(final RequestException cause) {
-        poisonQueue(inflight, cause);
-        poisonQueue(pending, cause);
+    final List<ConnectionEntry> poison() {
+        final List<ConnectionEntry> entries = new ArrayList<>(inflight.size() + pending.size());
+        entries.addAll(inflight);
+        inflight.clear();
+        entries.addAll(pending);
+        pending.clear();
+        return entries;
     }
 
     final void setForwarder(final ReconnectForwarder forwarder, final long now) {
-        Verify.verify(successor == null, "Successor %s already set on connection %s", successor, this);
-        successor = Preconditions.checkNotNull(forwarder);
+        verify(successor == null, "Successor %s already set on connection %s", successor, this);
+        successor = requireNonNull(forwarder);
         LOG.debug("Connection {} superseded by {}, splicing queue", this, successor);
 
         /*
@@ -421,12 +424,10 @@ abstract class TransmitQueue {
             }
 
             // Check if the entry has (ever) been transmitted
-            if (!(e instanceof TransmittedConnectionEntry)) {
+            if (!(e instanceof TransmittedConnectionEntry te)) {
                 return Optional.empty();
             }
 
-            final TransmittedConnectionEntry te = (TransmittedConnectionEntry) e;
-
             // Now check session match
             if (envelope.getSessionId() != te.getSessionId()) {
                 LOG.debug("Expecting session {}, ignoring response {}", te.getSessionId(), envelope);
@@ -444,13 +445,4 @@ abstract class TransmitQueue {
 
         return null;
     }
-
-    private static void poisonQueue(final Queue<? extends ConnectionEntry> queue, final RequestException cause) {
-        for (ConnectionEntry e : queue) {
-            final Request<?, ?> request = e.getRequest();
-            LOG.trace("Poisoning request {}", request, cause);
-            e.complete(request.toRequestFailure(cause));
-        }
-        queue.clear();
-    }
 }
index 889bd5899020fd78755685254cf0924fdd288b5e..699e62191f9e2f69ef1dc3709bfad2eb0c19237f 100644 (file)
@@ -21,8 +21,6 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 
 /**
  * Abstract base class for client actors and their components.
- *
- * @author Robert Varga
  */
 public abstract class AbstractClientActorTest {
     private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
index 59930ca024794c6351baa25b23cc761e5d98ece2..403ecbd6cc4d864e2595b6b231ace3cd4a09befe 100644 (file)
@@ -8,7 +8,11 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import static org.hamcrest.CoreMatchers.hasItems;
-import static org.mockito.Matchers.isA;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.isA;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
@@ -19,13 +23,11 @@ import akka.actor.ActorSystem;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import com.google.common.collect.Iterables;
-import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.function.Consumer;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
 import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
@@ -61,7 +63,6 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         backendProbe = new TestProbe(system);
         contextProbe = new TestProbe(system);
@@ -75,7 +76,7 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
 
     @Test
     public void testLocalActor() {
-        Assert.assertEquals(contextProbe.ref(), connection.localActor());
+        assertEquals(contextProbe.ref(), connection.localActor());
     }
 
     @Test
@@ -97,7 +98,7 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
         final Request<?, ?> request = createRequest(replyToProbe.ref());
         connection.sendRequest(request, callback);
         final RequestEnvelope requestEnvelope = backendProbe.expectMsgClass(RequestEnvelope.class);
-        Assert.assertEquals(request, requestEnvelope.getMessage());
+        assertEquals(request, requestEnvelope.getMessage());
         final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(CLIENT_ID, 0L);
         final RequestSuccess<?, ?> message = new TransactionAbortSuccess(new TransactionIdentifier(historyId, 0L), 0L);
         final ResponseEnvelope<?> envelope = new SuccessEnvelope(message, 0L, 0L, 0L);
@@ -108,13 +109,12 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
     @Test
     public void testRun() {
         final ClientActorBehavior<U> behavior = mock(ClientActorBehavior.class);
-        Assert.assertSame(behavior, connection.runTimer(behavior));
+        assertSame(behavior, connection.runTimer(behavior));
     }
 
     @Test
     public void testCheckTimeoutEmptyQueue() {
-        final Optional<Long> timeout = connection.checkTimeout(context.ticker().read());
-        Assert.assertFalse(timeout.isPresent());
+        assertEquals(OptionalLong.empty(), connection.checkTimeout(context.ticker().read()));
     }
 
     @Test
@@ -122,8 +122,8 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
         final Consumer<Response<?, ?>> callback = mock(Consumer.class);
         connection.sendRequest(createRequest(replyToProbe.ref()), callback);
         final long now = context.ticker().read();
-        final Optional<Long> timeout = connection.checkTimeout(now);
-        Assert.assertTrue(timeout.isPresent());
+        final OptionalLong timeout = connection.checkTimeout(now);
+        assertTrue(timeout.isPresent());
     }
 
     @Test
@@ -134,8 +134,8 @@ public abstract class AbstractClientConnectionTest<T extends AbstractClientConne
         connection.sendRequest(request1, callback);
         connection.sendRequest(request2, callback);
         final Iterable<ConnectionEntry> entries = connection.startReplay();
-        Assert.assertThat(entries, hasItems(entryWithRequest(request1), entryWithRequest(request2)));
-        Assert.assertEquals(2, Iterables.size(entries));
+        assertThat(entries, hasItems(entryWithRequest(request1), entryWithRequest(request2)));
+        assertEquals(2, Iterables.size(entries));
         Iterables.removeIf(entries, e -> true);
         final ReconnectForwarder forwarder = mock(ReconnectForwarder.class);
         connection.finishReplay(forwarder);
index 69788e5da124327760ba5b63388101ab79a00104..6e59daa4b3bdf5db997e447d5f3d18e0dffd85b0 100644 (file)
@@ -8,9 +8,11 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import static org.hamcrest.CoreMatchers.everyItem;
-import static org.mockito.Matchers.any;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.access.client.ConnectionEntryMatcher.entryWithRequest;
 
 import akka.actor.ActorSystem;
@@ -21,10 +23,8 @@ import java.util.Collection;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
@@ -35,7 +35,6 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.Response;
-import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
@@ -82,14 +81,14 @@ public abstract class AbstractTransmitQueueTest<T extends TransmitQueue> {
             queue.enqueueOrForward(new ConnectionEntry(request, callback, now), now);
         }
         final Collection<ConnectionEntry> entries = queue.drain();
-        Assert.assertEquals(sentMessages, entries.size());
-        Assert.assertThat(entries, everyItem(entryWithRequest(request)));
+        assertEquals(sentMessages, entries.size());
+        assertThat(entries, everyItem(entryWithRequest(request)));
     }
 
     @Test
     public void testTicksStalling() {
         final long now = Ticker.systemTicker().read();
-        Assert.assertEquals(0, queue.ticksStalling(now));
+        assertEquals(0, queue.ticksStalling(now));
     }
 
     @Test
@@ -106,38 +105,38 @@ public abstract class AbstractTransmitQueueTest<T extends TransmitQueue> {
         final RequestSuccess<?, ?> success1 = new TransactionPurgeResponse(anotherTxId, requestSequence);
         final Optional<TransmittedConnectionEntry> completed1 =
                 queue.complete(new SuccessEnvelope(success1, sessionId, txSequence, 1L), now);
-        Assert.assertFalse(completed1.isPresent());
+        assertFalse(completed1.isPresent());
         //different response sequence
         final long differentResponseSequence = 1L;
         final RequestSuccess<?, ?> success2 =
                 new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, differentResponseSequence);
         final Optional<TransmittedConnectionEntry> completed2 =
                 queue.complete(new SuccessEnvelope(success2, sessionId, txSequence, 1L), now);
-        Assert.assertFalse(completed2.isPresent());
+        assertFalse(completed2.isPresent());
         //different tx sequence
         final long differentTxSequence = 1L;
         final RequestSuccess<?, ?> success3 =
                 new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, requestSequence);
         final Optional<TransmittedConnectionEntry> completed3 =
                 queue.complete(new SuccessEnvelope(success3, sessionId, differentTxSequence, 1L), now);
-        Assert.assertFalse(completed3.isPresent());
+        assertFalse(completed3.isPresent());
         //different session id
         final long differentSessionId = 1L;
         final RequestSuccess<?, ?> success4 =
                 new TransactionPurgeResponse(TRANSACTION_IDENTIFIER, requestSequence);
         final Optional<TransmittedConnectionEntry> completed4 =
                 queue.complete(new SuccessEnvelope(success4, differentSessionId, differentTxSequence, 1L), now);
-        Assert.assertFalse(completed4.isPresent());
+        assertFalse(completed4.isPresent());
     }
 
     @Test
     public void testIsEmpty() {
-        Assert.assertTrue(queue.isEmpty());
+        assertTrue(queue.isEmpty());
         final Request<?, ?> request = new TransactionPurgeRequest(TRANSACTION_IDENTIFIER, 0L, probe.ref());
         final Consumer<Response<?, ?>> callback = createConsumerMock();
         final long now = Ticker.systemTicker().read();
         queue.enqueueOrForward(new ConnectionEntry(request, callback, now), now);
-        Assert.assertFalse(queue.isEmpty());
+        assertFalse(queue.isEmpty());
     }
 
     @Test
@@ -150,7 +149,7 @@ public abstract class AbstractTransmitQueueTest<T extends TransmitQueue> {
         final ConnectionEntry entry2 = new ConnectionEntry(request2, callback, now);
         queue.enqueueOrForward(entry1, now);
         queue.enqueueOrForward(entry2, now);
-        Assert.assertEquals(entry1.getRequest(), queue.peek().getRequest());
+        assertEquals(entry1.getRequest(), queue.peek().getRequest());
     }
 
     @Test
@@ -159,9 +158,7 @@ public abstract class AbstractTransmitQueueTest<T extends TransmitQueue> {
         final Consumer<Response<?, ?>> callback = createConsumerMock();
         final long now = Ticker.systemTicker().read();
         queue.enqueueOrForward(new ConnectionEntry(request, callback, now), now);
-        queue.poison(new RuntimeRequestException("fail", new RuntimeException("fail")));
-        verify(callback).accept(any(TransactionFailure.class));
-        Assert.assertTrue(queue.isEmpty());
+        assertEquals(1, queue.poison().size());
     }
 
     @SuppressWarnings("unchecked")
index 7521ed639e734f03cb27e217f0730bd863f8f51d..a19fbfee9fb8578e3aed51b33874f5f1e0c15c65 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.lenient;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 
@@ -35,13 +36,13 @@ public final class AccessClientUtil {
 
     public static ClientActorConfig newMockClientActorConfig() {
         ClientActorConfig mockConfig = mock(ClientActorConfig.class);
-        doReturn(2_000_000).when(mockConfig).getMaximumMessageSliceSize();
-        doReturn(1_000_000_000).when(mockConfig).getFileBackedStreamingThreshold();
-        doReturn(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS)
-                .when(mockConfig).getBackendAlivenessTimerInterval();
+        lenient().doReturn(2_000_000).when(mockConfig).getMaximumMessageSliceSize();
+        lenient().doReturn(1_000_000_000).when(mockConfig).getFileBackedStreamingThreshold();
         doReturn(AbstractClientConnection.DEFAULT_REQUEST_TIMEOUT_NANOS).when(mockConfig).getRequestTimeout();
-        doReturn(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS)
-                 .when(mockConfig).getNoProgressTimeout();
+        lenient().doReturn(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS)
+            .when(mockConfig).getBackendAlivenessTimerInterval();
+        lenient().doReturn(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS)
+            .when(mockConfig).getNoProgressTimeout();
         return mockConfig;
     }
 
index 0b630e2da8fd7b0769bf2c7e88da51184ac87bbd..90ffd77a347e7042c8a099169f4bc19b0acc3b03 100644 (file)
@@ -7,11 +7,11 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
@@ -22,34 +22,50 @@ import akka.persistence.SnapshotMetadata;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import com.typesafe.config.ConfigFactory;
-import java.lang.reflect.Field;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Answers;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import scala.concurrent.duration.FiniteDuration;
 
-public class ActorBehaviorTest {
-
+@ExtendWith(MockitoExtension.class)
+class ActorBehaviorTest {
     private static final String MEMBER_1_FRONTEND_TYPE_1 = "member-1-frontend-type-1";
     private static final FiniteDuration TIMEOUT = FiniteDuration.create(5, TimeUnit.SECONDS);
 
+    @Mock
+    private InternalCommand<BackendInfo> cmd;
+    @Mock(answer = Answers.CALLS_REAL_METHODS)
+    private ClientActorBehavior<BackendInfo> initialBehavior;
+    @Mock
+    private AbstractClientActorContext ctx;
+
     private ActorSystem system;
     private TestProbe probe;
-    private ClientActorBehavior<BackendInfo> initialBehavior;
     private MockedSnapshotStore.SaveRequest saveRequest;
     private FrontendIdentifier id;
     private ActorRef mockedActor;
 
-    @Before
-    public void setUp() throws Exception {
-        initialBehavior = createInitialBehaviorMock();
+    @BeforeEach
+    void beforeEach() throws Exception {
+        //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
+        //use reflection to work around this
+        final var context = AbstractClientActorBehavior.class.getDeclaredField("context");
+        context.setAccessible(true);
+        context.set(initialBehavior, ctx);
+        final var persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
+        persistenceId.setAccessible(true);
+        persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
+
         system = ActorSystem.apply("system1");
         final ActorRef storeRef = system.registerExtension(Persistence.lookup()).snapshotStoreFor(null,
             ConfigFactory.empty());
@@ -62,25 +78,23 @@ public class ActorBehaviorTest {
         saveRequest = handleRecovery(null);
     }
 
-    @After
-    public void tearDown() {
+    @AfterEach
+    void afterEach() {
         TestKit.shutdownActorSystem(system);
     }
 
     @Test
-    public void testInitialBehavior() {
-        final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
-        when(cmd.execute(any())).thenReturn(initialBehavior);
+    void testInitialBehavior() {
+        doReturn(initialBehavior).when(cmd).execute(any());
         mockedActor.tell(cmd, ActorRef.noSender());
         verify(cmd, timeout(1000)).execute(initialBehavior);
     }
 
     @Test
-    public void testCommandStashing() {
+    void testCommandStashing() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
-        final InternalCommand<BackendInfo> cmd = mock(InternalCommand.class);
-        when(cmd.execute(any())).thenReturn(initialBehavior);
+        doReturn(initialBehavior).when(cmd).execute(any());
         //send messages before recovery is completed
         mockedActor.tell(cmd, ActorRef.noSender());
         mockedActor.tell(cmd, ActorRef.noSender());
@@ -91,16 +105,16 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestart() {
+    void testRecoveryAfterRestart() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         final MockedSnapshotStore.SaveRequest newSaveRequest =
                 handleRecovery(new SelectedSnapshot(saveRequest.getMetadata(), saveRequest.getSnapshot()));
-        Assert.assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
+        assertEquals(MEMBER_1_FRONTEND_TYPE_1, newSaveRequest.getMetadata().persistenceId());
     }
 
     @Test
-    public void testRecoveryAfterRestartFrontendIdMismatch() {
+    void testRecoveryAfterRestartFrontendIdMismatch() {
         system.stop(mockedActor);
         //start actor again
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
@@ -117,7 +131,7 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestartSaveSnapshotFail() {
+    void testRecoveryAfterRestartSaveSnapshotFail() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         probe.watch(mockedActor);
@@ -130,7 +144,7 @@ public class ActorBehaviorTest {
     }
 
     @Test
-    public void testRecoveryAfterRestartDeleteSnapshotsFail() {
+    void testRecoveryAfterRestartDeleteSnapshotsFail() {
         system.stop(mockedActor);
         mockedActor = system.actorOf(MockedActor.props(id, initialBehavior));
         probe.watch(mockedActor);
@@ -144,21 +158,6 @@ public class ActorBehaviorTest {
         probe.expectNoMessage();
     }
 
-    @SuppressWarnings("unchecked")
-    private static ClientActorBehavior<BackendInfo> createInitialBehaviorMock() throws Exception {
-        final ClientActorBehavior<BackendInfo> initialBehavior = mock(ClientActorBehavior.class);
-        //persistenceId() in AbstractClientActorBehavior is final and can't be mocked
-        //use reflection to work around this
-        final Field context = AbstractClientActorBehavior.class.getDeclaredField("context");
-        context.setAccessible(true);
-        final AbstractClientActorContext ctx = mock(AbstractClientActorContext.class);
-        context.set(initialBehavior, ctx);
-        final Field persistenceId = AbstractClientActorContext.class.getDeclaredField("persistenceId");
-        persistenceId.setAccessible(true);
-        persistenceId.set(ctx, MEMBER_1_FRONTEND_TYPE_1);
-        return initialBehavior;
-    }
-
     private MockedSnapshotStore.SaveRequest handleRecovery(final SelectedSnapshot savedState) {
         probe.expectMsgClass(MockedSnapshotStore.LoadRequest.class);
         //offer snapshot
@@ -173,7 +172,6 @@ public class ActorBehaviorTest {
     }
 
     private static class MockedActor extends AbstractClientActor {
-
         private final ClientActorBehavior<?> initialBehavior;
         private final ClientActorConfig mockConfig = AccessClientUtil.newMockClientActorConfig();
 
@@ -196,5 +194,4 @@ public class ActorBehaviorTest {
             return mockConfig;
         }
     }
-
 }
index 7df1c2f045d98fcdfa87f37c71165462d0e2d5eb..1eca0b563d2c47a240a6fd8a620c7f195c5d2568 100644 (file)
@@ -17,14 +17,16 @@ import java.util.concurrent.TimeUnit;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import scala.concurrent.duration.FiniteDuration;
 
+@RunWith(MockitoJUnitRunner.class)
 public class ClientActorContextTest {
     private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
     private static final FrontendType FRONTEND_TYPE =
@@ -41,7 +43,6 @@ public class ClientActorContextTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         probe = new TestProbe(system);
         ctx = new ClientActorContext(probe.ref(), PERSISTENCE_ID, system,
index 530ac5aaa978131945b364cf3488efe77f704419..819de8b8b4a43bc4927bea3454688a4fbeded39f 100644 (file)
@@ -7,15 +7,14 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static org.junit.Assert.assertNull;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.same;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 
-import java.util.Optional;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
@@ -28,7 +27,8 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 import org.opendaylight.controller.cluster.messaging.MessageSlice;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public class ConnectedClientConnectionTest
         extends AbstractClientConnectionTest<ConnectedClientConnection<BackendInfo>, BackendInfo> {
@@ -38,16 +38,15 @@ public class ConnectedClientConnectionTest
         final Consumer<Response<?, ?>> callback = mock(Consumer.class);
         connection.sendRequest(createRequest(replyToProbe.ref()), callback);
         final long now = context.ticker().read() + ConnectedClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS;
-        final Optional<Long> timeout = connection.checkTimeout(now);
-        Assert.assertNull(timeout);
+        assertNull(connection.checkTimeout(now));
     }
 
     @Override
     protected ConnectedClientConnection<BackendInfo> createConnection() {
-        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
         final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
                 backend.getName());
-        return  new ConnectedClientConnection<>(connectingConn, backend);
+        return new ConnectedClientConnection<>(connectingConn, backend);
     }
 
     @Override
@@ -72,9 +71,10 @@ public class ConnectedClientConnectionTest
                 new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_ID, 0L), 0L);
         ModifyTransactionRequestBuilder reqBuilder =
                 new ModifyTransactionRequestBuilder(identifier, replyToProbe.ref());
-        reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.EMPTY, Builders.containerBuilder()
-                .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(
-                        QName.create("namespace", "localName"))).build()));
+        reqBuilder.addModification(new TransactionWrite(YangInstanceIdentifier.of(),
+            ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create("namespace", "localName")))
+                .build()));
         reqBuilder.setSequence(0L);
         final Request<?, ?> request = reqBuilder.build();
         connection.sendRequest(request, callback);
index a3c4b5c4e269c38d8a231b379304670545390a0e..f77500328d06a01ae1482c1a0fb39ebd1085e2d9 100644 (file)
@@ -24,7 +24,7 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.testkit.TestProbe;
 import com.google.common.testing.FakeTicker;
-import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
@@ -33,12 +33,11 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
@@ -52,9 +51,8 @@ import scala.concurrent.duration.FiniteDuration;
 /**
  * Test suite covering logic contained in {@link ConnectingClientConnection}. It assumes {@link ConnectionEntryTest}
  * passes.
- *
- * @author Robert Varga
  */
+@RunWith(MockitoJUnitRunner.class)
 public class ConnectingClientConnectionTest {
     private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
         private static final long serialVersionUID = 1L;
@@ -64,8 +62,7 @@ public class ConnectingClientConnectionTest {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
-                final ABIVersion version) {
+        protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -88,7 +85,7 @@ public class ConnectingClientConnectionTest {
         }
 
         @Override
-        protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+        protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -136,8 +133,6 @@ public class ConnectingClientConnectionTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
         doNothing().when(mockCallback).accept(any(MockFailure.class));
 
         ticker = new FakeTicker();
@@ -198,7 +193,7 @@ public class ConnectingClientConnectionTest {
     @Test
     public void testSendRequestNeedsBackend() {
         queue.sendRequest(mockRequest, mockCallback);
-        final Optional<Long> ret = queue.checkTimeout(ticker.read());
+        final OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNotNull(ret);
         assertTrue(ret.isPresent());
     }
@@ -214,7 +209,7 @@ public class ConnectingClientConnectionTest {
         setupBackend();
 
         queue.sendRequest(mockRequest, mockCallback);
-        final Optional<Long> ret = queue.checkTimeout(ticker.read());
+        final OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNotNull(ret);
         assertTrue(ret.isPresent());
         assertTransmit(mockRequest, 0);
@@ -222,7 +217,7 @@ public class ConnectingClientConnectionTest {
 
     @Test
     public void testRunTimeoutEmpty() {
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
+        OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNotNull(ret);
         assertFalse(ret.isPresent());
     }
@@ -230,7 +225,7 @@ public class ConnectingClientConnectionTest {
     @Test
     public void testRunTimeoutWithoutShift() {
         queue.sendRequest(mockRequest, mockCallback);
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
+        OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNotNull(ret);
         assertTrue(ret.isPresent());
     }
@@ -241,7 +236,7 @@ public class ConnectingClientConnectionTest {
 
         ticker.advance(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS - 1);
 
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
+        OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNotNull(ret);
         assertTrue(ret.isPresent());
     }
@@ -254,7 +249,7 @@ public class ConnectingClientConnectionTest {
 
         ticker.advance(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS);
 
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
+        OptionalLong ret = queue.checkTimeout(ticker.read());
         assertNull(ret);
     }
 
@@ -266,8 +261,7 @@ public class ConnectingClientConnectionTest {
 
         ticker.advance(AbstractClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS + 1);
 
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
-        assertNull(ret);
+        assertNull(queue.checkTimeout(ticker.read()));
     }
 
     @SuppressWarnings({ "rawtypes", "unchecked" })
@@ -297,9 +291,7 @@ public class ConnectingClientConnectionTest {
         ticker.advance(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS);
 
         // No problem
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
-        assertNotNull(ret);
-        assertFalse(ret.isPresent());
+        assertEquals(OptionalLong.empty(), queue.checkTimeout(ticker.read()));
     }
 
     @Test
@@ -307,9 +299,7 @@ public class ConnectingClientConnectionTest {
         ticker.advance(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS + 1);
 
         // No problem
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
-        assertNotNull(ret);
-        assertFalse(ret.isPresent());
+        assertEquals(OptionalLong.empty(), queue.checkTimeout(ticker.read()));
     }
 
     @Test
@@ -355,8 +345,7 @@ public class ConnectingClientConnectionTest {
 
         ticker.advance(AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS - 11);
 
-        Optional<Long> ret = queue.checkTimeout(ticker.read());
-        assertNull(ret);
+        assertNull(queue.checkTimeout(ticker.read()));
     }
 
     private void setupBackend() {
index a76a36950a3d9a1e1e7b2e5f8a77c346bff8e290..b3bfdec66e9b0c184daeb7e4127495e59c66006d 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
-import static org.mockito.Matchers.any;
+import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.verify;
 
@@ -22,11 +22,10 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestProxy;
 import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
@@ -35,9 +34,8 @@ import org.opendaylight.yangtools.concepts.WritableIdentifier;
 
 /**
  * Test suite covering logic contained in {@link ConnectionEntry}.
- *
- * @author Robert Varga
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ConnectionEntryTest {
     private static class MockFailure extends RequestFailure<WritableIdentifier, MockFailure> {
         private static final long serialVersionUID = 1L;
@@ -47,8 +45,7 @@ public class ConnectionEntryTest {
         }
 
         @Override
-        protected AbstractRequestFailureProxy<WritableIdentifier, MockFailure> externalizableProxy(
-                final ABIVersion version) {
+        protected SerialForm<WritableIdentifier, MockFailure> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -71,7 +68,7 @@ public class ConnectionEntryTest {
         }
 
         @Override
-        protected AbstractRequestProxy<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
+        protected Request.SerialForm<WritableIdentifier, MockRequest> externalizableProxy(final ABIVersion version) {
             return null;
         }
 
@@ -113,8 +110,6 @@ public class ConnectionEntryTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
         doNothing().when(mockCallback).accept(any(MockFailure.class));
 
         ticker = new FakeTicker();
index 3e26ae92cdc50ad1f5d0e6b540780e17d6696bc5..e0dd69ed432125c2322a819ae2dc93e39fb00d3b 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
@@ -14,7 +16,6 @@ import akka.persistence.SelectedSnapshot;
 import akka.persistence.SnapshotMetadata;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.snapshot.japi.SnapshotStore;
-import com.google.common.base.Preconditions;
 import java.util.Optional;
 import scala.concurrent.Future;
 import scala.concurrent.Promise;
@@ -67,9 +68,7 @@ class MockedSnapshotStore extends SnapshotStore {
     }
 
     private <T> Future<T> askDelegate(final MockedSnapshotStoreMessage message) {
-        Preconditions.checkNotNull(delegate, "Delegate ref wasn't sent");
-        final Future<Object> ask = Patterns.ask(delegate, message, TIMEOUT);
-        return transform(ask);
+        return transform(Patterns.ask(requireNonNull(delegate, "Delegate ref was not sent"), message, TIMEOUT));
     }
 
     private <T> Future<T> transform(final Future<Object> future) {
index 0b7d0c26559d999be4a9fdaaec2d4b2d733dcb4f..367acb3b6d74f068c0caf3d26ce2db583432d9d8 100644 (file)
@@ -7,14 +7,16 @@
  */
 package org.opendaylight.controller.cluster.access.client;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.after;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 
-import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortSuccess;
@@ -35,14 +37,14 @@ public class ReconnectingClientConnectionTest
         final Consumer<Response<?, ?>> callback = mock(Consumer.class);
         connection.sendRequest(createRequest(replyToProbe.ref()), callback);
         final long now = context.ticker().read() + ConnectedClientConnection.DEFAULT_BACKEND_ALIVE_TIMEOUT_NANOS;
-        final Optional<Long> timeout = connection.checkTimeout(now);
-        Assert.assertNotNull(timeout);
-        Assert.assertTrue(timeout.isPresent());
+        final OptionalLong timeout = connection.checkTimeout(now);
+        assertNotNull(timeout);
+        assertTrue(timeout.isPresent());
     }
 
     @Override
     protected ReconnectingClientConnection<BackendInfo> createConnection() {
-        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.BORON, 10);
+        final BackendInfo backend = new BackendInfo(backendProbe.ref(), "test", 0L, ABIVersion.current(), 10);
         final ConnectingClientConnection<BackendInfo> connectingConn = new ConnectingClientConnection<>(context, 0L,
                 backend.getName());
         final ConnectedClientConnection<BackendInfo> connectedConn =
@@ -54,7 +56,7 @@ public class ReconnectingClientConnectionTest
     @Test
     public void testReconnectConnection() {
         final ClientActorBehavior<BackendInfo> behavior = mock(ClientActorBehavior.class);
-        Assert.assertSame(behavior, connection.lockedReconnect(behavior, mock(RequestException.class)));
+        assertSame(behavior, connection.lockedReconnect(behavior, mock(RequestException.class)));
     }
 
     @Override
index d6e9f5cec5065e52953f9ce0ea396cb29c8b2cdc..9974e1b1cd88b6e60cffac28f7bbbc5aa65a98d0 100644 (file)
@@ -8,9 +8,9 @@
 package org.opendaylight.controller.cluster.access.client;
 
 import static org.hamcrest.CoreMatchers.everyItem;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
@@ -64,7 +64,7 @@ public class TransmittingTransmitQueueTest extends AbstractTransmitQueueTest<Tra
     @Override
     protected TransmitQueue.Transmitting createQueue() {
         doReturn(false).when(mockMessageSlicer).slice(any());
-        backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.BORON, 3);
+        backendInfo = new BackendInfo(probe.ref(), "test", 0L, ABIVersion.current(), 3);
         return new TransmitQueue.Transmitting(new TransmitQueue.Halted(0), 0, backendInfo, now(), mockMessageSlicer);
     }
 
@@ -146,8 +146,8 @@ public class TransmittingTransmitQueueTest extends AbstractTransmitQueueTest<Tra
 
         Optional<TransmittedConnectionEntry> transmitted = queue.transmit(entry, now);
         assertTrue(transmitted.isPresent());
-        assertEquals(request, transmitted.get().getRequest());
-        assertEquals(callback, transmitted.get().getCallback());
+        assertEquals(request, transmitted.orElseThrow().getRequest());
+        assertEquals(callback, transmitted.orElseThrow().getCallback());
 
         final RequestEnvelope requestEnvelope = probe.expectMsgClass(RequestEnvelope.class);
         assertEquals(request, requestEnvelope.getMessage());
index c6ab4a17ffa93056a1ac4c156ab7df734463f58c..a28781c07d2c2b8e703ed6de09672338bf718566 100644 (file)
@@ -1,35 +1,41 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
-
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>cds-dom-api</artifactId>
-    <version>1.6.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-actor_2.12</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
-
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>concepts</artifactId>
         </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-dom-api</artifactId>
-        </dependency>
     </dependencies>
 
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <configuration>
+                    <instructions>
+                        <Automatic-Module-Name>org.opendaylight.controller.cluster.dom.api</Automatic-Module-Name>
+                    </instructions>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
     <scm>
         <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
         <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSDataTreeProducer.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSDataTreeProducer.java
deleted file mode 100644 (file)
index 5aa88a2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-
-/**
- * An extension to {@link DOMDataTreeProducer}, which allows users access
- * to information about the backing shard.
- *
- * @author Robert Varga
- */
-@Beta
-public interface CDSDataTreeProducer extends DOMDataTreeProducer {
-    /**
-     * Return a {@link CDSShardAccess} handle. This handle will remain valid
-     * as long as this producer is operational. Returned handle can be accessed
-     * independently from this producer and is not subject to the usual access
-     * restrictions imposed on DOMDataTreeProducer state.
-     *
-     * @param subtree One of the subtrees to which are currently under control of this producer
-     * @return A shard access handle.
-     * @throws NullPointerException when subtree is null
-     * @throws IllegalArgumentException if the specified subtree is not controlled by this producer
-     * @throws IllegalStateException if this producer is no longer operational
-     * @throws IllegalThreadStateException if the access rules to this producer
-     *         are violated, for example if this producer is bound and this thread
-     *         is currently not executing from a listener context.
-     */
-    @NonNull CDSShardAccess getShardAccess(@NonNull DOMDataTreeIdentifier subtree);
-}
-
diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSShardAccess.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/CDSShardAccess.java
deleted file mode 100644 (file)
index 2051e21..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import java.util.concurrent.CompletionStage;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Unprivileged access interface to shard information. Provides read-only access to operational details about a CDS
- * shard.
- *
- * @author Robert Varga
- */
-@Beta
-public interface CDSShardAccess {
-    /**
-     * Return the shard identifier.
-     *
-     * @return Shard identifier.
-     * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
-     *         {@link CDSDataTreeProducer} is no longer valid.
-     */
-    @NonNull DOMDataTreeIdentifier getShardIdentifier();
-
-    /**
-     * Return the shard leader location relative to the local node.
-     *
-     * @return Shard leader location.
-     * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
-     *         {@link CDSDataTreeProducer} is no longer valid.
-     */
-    @NonNull LeaderLocation getLeaderLocation();
-
-    /**
-     * Request the shard leader to be moved to the local node. The request will be evaluated against shard state and
-     * satisfied if leader movement is possible. If current shard policy or state prevents the movement from happening,
-     * the returned {@link CompletionStage} will report an exception.
-     *
-     * <p>
-     * This is a one-time operation, which does not prevent further movement happening in future. Even if this request
-     * succeeds, there is no guarantee that the leader will remain local in face of failures, shutdown or any future
-     * movement requests from other nodes.
-     *
-     * <p>
-     * Note that due to asynchronous nature of CDS, the leader may no longer be local by the time the returned
-     * {@link CompletionStage} reports success.
-     *
-     * @return A {@link CompletionStage} representing the request.
-     * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
-     *         {@link CDSDataTreeProducer} is no longer valid.
-     */
-    @NonNull CompletionStage<Void> makeLeaderLocal();
-
-    /**
-     * Register a listener to shard location changes. Each listener object can be registered at most once.
-     *
-     * @param listener Listener object
-     * @return A {@link LeaderLocationListenerRegistration} for the listener.
-     * @throws IllegalArgumentException if the specified listener is already registered.
-     * @throws IllegalStateException if the {@link CDSDataTreeProducer} from which the associated
-     *         {@link CDSDataTreeProducer} is no longer valid.
-     * @throws NullPointerException if listener is null.
-     */
-    @NonNull <L extends LeaderLocationListener> LeaderLocationListenerRegistration<L> registerLeaderLocationListener(
-            @NonNull L listener);
-}
index 696fec275316b7f231babae454f0b5df80c85a23..cd20186349fce062f4a4bd65d00398104274ae87 100644 (file)
@@ -7,14 +7,9 @@
  */
 package org.opendaylight.controller.cluster.dom.api;
 
-import com.google.common.annotations.Beta;
-
 /**
  * Enumeration of possible shard leader locations relative to the local node.
- *
- * @author Robert Varga
  */
-@Beta
 public enum LeaderLocation {
     /**
      * The leader is co-located on this node.
index 4f5fd4e83e1bac0da0021b98eb81578dfacab7b4..69e34ca44cdb64f10e41ea1e1a7d52facfbdebaf 100644 (file)
@@ -7,17 +7,12 @@
  */
 package org.opendaylight.controller.cluster.dom.api;
 
-import com.google.common.annotations.Beta;
-import java.util.EventListener;
 import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Listener for shard leader location changes.
- *
- * @author Robert Varga
  */
-@Beta
-public interface LeaderLocationListener extends EventListener {
+public interface LeaderLocationListener {
     /**
      * Invoked when shard leader location changes.
      *
diff --git a/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java b/opendaylight/md-sal/cds-dom-api/src/main/java/org/opendaylight/controller/cluster/dom/api/LeaderLocationListenerRegistration.java
deleted file mode 100644 (file)
index 61f6426..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.dom.api;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * Registration of a {@link LeaderLocationListener}.
- *
- * @author Robert Varga
- *
- * @param <T> Listener type
- */
-@Beta
-public interface LeaderLocationListenerRegistration<T extends LeaderLocationListener> extends ListenerRegistration<T> {
-
-}
diff --git a/opendaylight/md-sal/cds-mgmt-api/pom.xml b/opendaylight/md-sal/cds-mgmt-api/pom.xml
new file mode 100644 (file)
index 0000000..2ea3c28
--- /dev/null
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>mdsal-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../parent</relativePath>
+    </parent>
+
+    <artifactId>cds-mgmt-api</artifactId>
+    <packaging>bundle</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <configuration>
+                    <instructions>
+                        <Import-Package>
+                            javax.management;resolution:=optional,
+                            *
+                        </Import-Package>
+                    </instructions>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+    <scm>
+        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
+        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+        <tag>HEAD</tag>
+        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
+    </scm>
+</project>
diff --git a/opendaylight/md-sal/cds-mgmt-api/src/main/java/module-info.java b/opendaylight/md-sal/cds-mgmt-api/src/main/java/module-info.java
new file mode 100644 (file)
index 0000000..bba909b
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+module org.opendaylight.controller.cluster.mgmt.api {
+    exports org.opendaylight.controller.cluster.mgmt.api;
+    // FIXME: 4.0.0: collapse these packages
+    exports org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+    exports org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+
+    // Annotation-only dependencies
+    requires static transitive java.management;
+    requires static transitive org.eclipse.jdt.annotation;
+}
@@ -5,16 +5,17 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
 
-package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
+import javax.management.MXBean;
 
 /**
  * MXBean interface for retrieving write Tx commit statistics.
  *
  * @author Thomas Pantelis
  */
+@MXBean
 public interface CommitStatsMXBean {
-
     /**
      * Returns the total number of commits that have occurred.
      *
@@ -7,12 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
 
+import javax.management.MXBean;
 
 /**
  * MXBean interface for data store configuration.
  *
  * @author Thomas Pantelis
  */
+@MXBean
 public interface DatastoreConfigurationMXBean {
     long getShardTransactionIdleTimeoutInSeconds();
 
@@ -28,6 +30,8 @@ public interface DatastoreConfigurationMXBean {
 
     int getShardSnapshotDataThresholdPercentage();
 
+    int getShardSnapshotDataThreshold();
+
     long getShardSnapshotBatchCount();
 
     long getShardTransactionCommitTimeoutInSeconds();
@@ -46,13 +50,5 @@ public interface DatastoreConfigurationMXBean {
 
     boolean getTransactionContextDebugEnabled();
 
-    int getMaxShardDataChangeExecutorPoolSize();
-
-    int getMaxShardDataChangeExecutorQueueSize();
-
-    int getMaxShardDataChangeListenerQueueSize();
-
-    int getMaxShardDataStoreExecutorQueueSize();
-
     int getMaximumMessageSliceSize();
 }
@@ -7,11 +7,27 @@
  */
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
 
+import javax.management.MXBean;
+
 /**
  * JMX bean for general datastore info.
  *
  * @author Thomas Pantelis
  */
+@MXBean
 public interface DatastoreInfoMXBean {
+
     double getTransactionCreationRateLimit();
+
+    /**
+     * Return the number of {@code AskTimeoutException}s encountered by the datastore.
+     *
+     * @return Number of exceptions encountered
+     */
+    long getAskTimeoutExceptionCount();
+
+    /**
+     * Reset the number of {@code AskTimeoutException}s encountered by the datastore.
+     */
+    void resetAskTimeoutExceptionCount();
 }
@@ -8,13 +8,18 @@
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
 
 import java.util.List;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
+import javax.management.MXBean;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
 
 /**
  * MXBean interface for reporting shard data tree change listener information.
  *
  * @author Thomas Pantelis
  */
+@MXBean
+@NonNullByDefault
 public interface ShardDataTreeListenerInfoMXBean {
+
     List<DataTreeListenerInfo> getDataTreeChangeListenerInfo();
 }
@@ -5,17 +5,18 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
 
 import java.util.List;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
+import javax.management.MXBean;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
 
 /**
  * MXBean interface for shard stats.
  *
  * @author syedbahm
  */
+@MXBean
 public interface ShardStatsMXBean {
 
     String getShardName();
@@ -28,8 +29,6 @@ public interface ShardStatsMXBean {
 
     long getReadOnlyTransactionCount();
 
-    long getWriteOnlyTransactionCount();
-
     long getReadWriteTransactionCount();
 
     long getLastLogIndex();
@@ -5,27 +5,30 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.messages;
+package org.opendaylight.controller.cluster.mgmt.api;
 
-import com.google.common.base.Preconditions;
-import java.beans.ConstructorProperties;
+import static java.util.Objects.requireNonNull;
+
+import javax.management.ConstructorParameters;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 
 /**
- * Response to a {@link GetInfo} query from a data tree listener actor.
+ * Information about a registered listener.
  *
  * @author Thomas Pantelis
  */
-public class DataTreeListenerInfo {
+@NonNullByDefault
+public final class DataTreeListenerInfo {
     private final String listener;
     private final String registeredPath;
     private final boolean isEnabled;
     private final long notificationCount;
 
-    @ConstructorProperties({"listener","registeredPath", "isEnabled", "notificationCount"})
+    @ConstructorParameters({"listener","registeredPath", "isEnabled", "notificationCount"})
     public DataTreeListenerInfo(final String listener, final String registeredPath, final boolean isEnabled,
             final long notificationCount) {
-        this.listener = Preconditions.checkNotNull(listener);
-        this.registeredPath = Preconditions.checkNotNull(registeredPath);
+        this.listener = requireNonNull(listener);
+        this.registeredPath = requireNonNull(registeredPath);
         this.isEnabled = isEnabled;
         this.notificationCount = notificationCount;
     }
@@ -5,16 +5,20 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.raft.client.messages;
+package org.opendaylight.controller.cluster.mgmt.api;
 
-import java.beans.ConstructorProperties;
+import static java.util.Objects.requireNonNull;
+
+import javax.management.ConstructorParameters;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 
 /**
  * A bean class containing a snapshot of information for a follower returned from GetOnDemandRaftStats.
  *
  * @author Thomas Pantelis
  */
-public class FollowerInfo {
+@NonNullByDefault
+public final class FollowerInfo {
     private final String id;
     private final long nextIndex;
     private final long matchIndex;
@@ -22,10 +26,10 @@ public class FollowerInfo {
     private final String timeSinceLastActivity;
     private final boolean isVoting;
 
-    @ConstructorProperties({"id","nextIndex", "matchIndex", "active", "timeSinceLastActivity", "voting"})
-    public FollowerInfo(String id, long nextIndex, long matchIndex, boolean active, String timeSinceLastActivity,
-            boolean voting) {
-        this.id = id;
+    @ConstructorParameters({"id","nextIndex", "matchIndex", "active", "timeSinceLastActivity", "voting"})
+    public FollowerInfo(final String id, final long nextIndex, final long matchIndex, final boolean active,
+            final String timeSinceLastActivity, final boolean voting) {
+        this.id = requireNonNull(id);
         this.nextIndex = nextIndex;
         this.matchIndex = matchIndex;
         this.isActive = active;
diff --git a/opendaylight/md-sal/eos-dom-akka/pom.xml b/opendaylight/md-sal/eos-dom-akka/pom.xml
new file mode 100644 (file)
index 0000000..dce797c
--- /dev/null
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>mdsal-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../parent</relativePath>
+    </parent>
+
+    <artifactId>eos-dom-akka</artifactId>
+    <packaging>bundle</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-eos-common-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-eos-dom-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-dom-codec-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal.model</groupId>
+            <artifactId>general-entity</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>concepts</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
+            <scope>provided</scope>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.scala-lang</groupId>
+            <artifactId>scala-library</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-actor-testkit-typed_2.13</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.awaitility</groupId>
+            <artifactId>awaitility</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-dom-codec</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-generator</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-runtime-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-runtime-spi</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-singleton-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-singleton-impl</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.opendaylight.mdsal.model</groupId>
+            <artifactId>ietf-topology</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+    </dependencies>
+</project>
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipService.java
new file mode 100644 (file)
index 0000000..332fb44
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.ActorSystem;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Scheduler;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.cluster.typed.Cluster;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.function.Function;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
+import org.opendaylight.controller.eos.akka.bootstrap.EOSMain;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.Terminate;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipState;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipStateReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.UnregisterListener;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTree;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntities;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwner;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.binding.RpcOutput;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * DOMEntityOwnershipService implementation backed by native Akka clustering constructs. We use distributed-data
+ * to track all registered candidates and cluster-singleton to maintain a single cluster-wide authority which selects
+ * the appropriate owners.
+ */
+@Singleton
+@Component(immediate = true, service = { DOMEntityOwnershipService.class, DataCenterControl.class })
+public class AkkaEntityOwnershipService implements DOMEntityOwnershipService, DataCenterControl, AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(AkkaEntityOwnershipService.class);
+    private static final String DATACENTER_PREFIX = "dc";
+    private static final Duration DATACENTER_OP_TIMEOUT = Duration.ofSeconds(20);
+    private static final Duration QUERY_TIMEOUT = Duration.ofSeconds(10);
+
+    private final Set<DOMEntity> registeredEntities = ConcurrentHashMap.newKeySet();
+    private final String localCandidate;
+    private final Scheduler scheduler;
+    private final String datacenter;
+
+    private final ActorRef<BootstrapCommand> bootstrap;
+    private final RunningContext runningContext;
+    private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+    private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+    private final ActorRef<StateCheckerCommand> ownerStateChecker;
+    protected final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+    private final BindingInstanceIdentifierCodec iidCodec;
+
+    private Registration reg;
+
+    @VisibleForTesting
+    protected AkkaEntityOwnershipService(final ActorSystem actorSystem, final BindingCodecTree codecTree)
+            throws ExecutionException, InterruptedException {
+        final var typedActorSystem = Adapter.toTyped(actorSystem);
+        scheduler = typedActorSystem.scheduler();
+
+        final Cluster cluster = Cluster.get(typedActorSystem);
+        datacenter = cluster.selfMember().dataCenter();
+
+        localCandidate = cluster.selfMember().getRoles().stream()
+            .filter(role -> !role.contains(DATACENTER_PREFIX))
+            .findFirst()
+            .orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+
+        iidCodec = codecTree.getInstanceIdentifierCodec();
+        bootstrap = Adapter.spawn(actorSystem, Behaviors.setup(
+                context -> EOSMain.create(iidCodec)), "EOSBootstrap");
+
+        final CompletionStage<RunningContext> ask = AskPattern.ask(bootstrap,
+                GetRunningContext::new, Duration.ofSeconds(5), scheduler);
+        runningContext = ask.toCompletableFuture().get();
+
+        candidateRegistry = runningContext.getCandidateRegistry();
+        listenerRegistry = runningContext.getListenerRegistry();
+        ownerStateChecker = runningContext.getOwnerStateChecker();
+        ownerSupervisor = runningContext.getOwnerSupervisor();
+    }
+
+    @Inject
+    @Activate
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "Non-final for testing 'this' reference is expected to be stable at registration time")
+    public AkkaEntityOwnershipService(@Reference final ActorSystemProvider actorProvider,
+            @Reference final RpcProviderService rpcProvider, @Reference final BindingCodecTree codecTree)
+            throws ExecutionException, InterruptedException {
+        this(actorProvider.getActorSystem(), codecTree);
+
+        reg = rpcProvider.registerRpcImplementations(
+            (GetEntity) this::getEntity,
+            (GetEntities) this::getEntities,
+            (GetEntityOwner) this::getEntityOwner);
+    }
+
+    @PreDestroy
+    @Deactivate
+    @Override
+    public void close() throws InterruptedException, ExecutionException {
+        if (reg != null) {
+            reg.close();
+            reg = null;
+        }
+        AskPattern.ask(bootstrap, Terminate::new, Duration.ofSeconds(5), scheduler).toCompletableFuture().get();
+    }
+
+    @Override
+    public Registration registerCandidate(final DOMEntity entity)
+            throws CandidateAlreadyRegisteredException {
+        if (!registeredEntities.add(entity)) {
+            throw new CandidateAlreadyRegisteredException(entity);
+        }
+
+        final RegisterCandidate msg = new RegisterCandidate(entity, localCandidate);
+        LOG.debug("Registering candidate with message: {}", msg);
+        candidateRegistry.tell(msg);
+
+        return new CandidateRegistration(entity, this);
+    }
+
+    @Override
+    public Registration registerListener(final String entityType, final DOMEntityOwnershipListener listener) {
+        LOG.debug("Registering listener {} for type {}", listener, entityType);
+        listenerRegistry.tell(new RegisterListener(entityType, listener));
+
+        return new ListenerRegistration(listener, entityType, this);
+    }
+
+    @Override
+    public Optional<EntityOwnershipState> getOwnershipState(final DOMEntity entity) {
+        LOG.debug("Retrieving ownership state for {}", entity);
+
+        final CompletionStage<GetOwnershipStateReply> result = AskPattern.ask(ownerStateChecker,
+            replyTo -> new GetOwnershipState(entity, replyTo),
+            Duration.ofSeconds(5), scheduler);
+
+        final GetOwnershipStateReply reply;
+        try {
+            reply = result.toCompletableFuture().get();
+        } catch (final InterruptedException | ExecutionException exception) {
+            LOG.warn("Failed to retrieve ownership state for {}", entity, exception);
+            return Optional.empty();
+        }
+
+        return Optional.ofNullable(reply.getOwnershipState());
+    }
+
+    @Override
+    public boolean isCandidateRegistered(final DOMEntity forEntity) {
+        return registeredEntities.contains(forEntity);
+    }
+
+    @Override
+    public ListenableFuture<Empty> activateDataCenter() {
+        LOG.debug("Activating datacenter: {}", datacenter);
+
+        return toListenableFuture("Activate",
+            AskPattern.ask(ownerSupervisor, ActivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler));
+    }
+
+    @Override
+    public ListenableFuture<Empty> deactivateDataCenter() {
+        LOG.debug("Deactivating datacenter: {}", datacenter);
+        return toListenableFuture("Deactivate",
+            AskPattern.ask(ownerSupervisor, DeactivateDataCenter::new, DATACENTER_OP_TIMEOUT, scheduler));
+    }
+
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntitiesOutput>> getEntities(final GetEntitiesInput input) {
+        return toRpcFuture(AskPattern.ask(ownerStateChecker, GetEntitiesRequest::new, QUERY_TIMEOUT, scheduler),
+                reply -> reply.toOutput(iidCodec));
+    }
+
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntityOutput>> getEntity(final GetEntityInput input) {
+        return toRpcFuture(AskPattern.ask(ownerStateChecker,
+            (final ActorRef<GetEntityReply> replyTo) -> new GetEntityRequest(replyTo, input), QUERY_TIMEOUT, scheduler),
+            GetEntityReply::toOutput);
+    }
+
+    @VisibleForTesting
+    final ListenableFuture<RpcResult<GetEntityOwnerOutput>> getEntityOwner(final GetEntityOwnerInput input) {
+        return toRpcFuture(AskPattern.ask(ownerStateChecker,
+            (final ActorRef<GetEntityOwnerReply> replyTo) -> new GetEntityOwnerRequest(replyTo, input), QUERY_TIMEOUT,
+            scheduler), GetEntityOwnerReply::toOutput);
+    }
+
+    void unregisterCandidate(final DOMEntity entity) {
+        LOG.debug("Unregistering candidate for {}", entity);
+
+        if (registeredEntities.remove(entity)) {
+            candidateRegistry.tell(new UnregisterCandidate(entity, localCandidate));
+        }
+    }
+
+    void unregisterListener(final String entityType, final DOMEntityOwnershipListener listener) {
+        LOG.debug("Unregistering listener {} for type {}", listener, entityType);
+
+        listenerRegistry.tell(new UnregisterListener(entityType, listener));
+    }
+
+    @VisibleForTesting
+    RunningContext getRunningContext() {
+        return runningContext;
+    }
+
+    private static <R extends StateCheckerReply, O extends RpcOutput> ListenableFuture<RpcResult<O>> toRpcFuture(
+            final CompletionStage<R> stage, final Function<R, O> outputFunction) {
+
+        final SettableFuture<RpcResult<O>> future = SettableFuture.create();
+        stage.whenComplete((reply, failure) -> {
+            if (failure != null) {
+                future.setException(failure);
+            } else {
+                future.set(RpcResultBuilder.success(outputFunction.apply(reply)).build());
+            }
+        });
+        return future;
+    }
+
+    private static ListenableFuture<Empty> toListenableFuture(final String op, final CompletionStage<?> stage) {
+        final SettableFuture<Empty> future = SettableFuture.create();
+        stage.whenComplete((reply, failure) -> {
+            if (failure != null) {
+                LOG.warn("{} DataCenter failed", op, failure);
+                future.setException(failure);
+            } else {
+                LOG.debug("{} DataCenter successful", op);
+                future.set(Empty.value());
+            }
+        });
+        return future;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/CandidateRegistration.java
new file mode 100644 (file)
index 0000000..fd80ee1
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static java.util.Objects.requireNonNull;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+final class CandidateRegistration extends AbstractObjectRegistration<DOMEntity> {
+    private final AkkaEntityOwnershipService service;
+
+    CandidateRegistration(final DOMEntity instance, final AkkaEntityOwnershipService service) {
+        super(instance);
+        this.service = requireNonNull(service);
+    }
+
+    @Override
+    protected void removeRegistration() {
+        service.unregisterCandidate(getInstance());
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/DataCenterControl.java
new file mode 100644 (file)
index 0000000..f70abc9
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+/**
+ * Service used to bring up/down the Entity Ownership Service service in individual datacenters.
+ * Active datacenter in native eos terms means that the candidates from this datacenter can become owners of entities.
+ * Additionally the singleton component makings ownership decisions, runs only in an active datacenter.
+ *
+ * <p>
+ * Caller must make sure that only one datacenter is active at a time, otherwise the singleton actors
+ * in each datacenter will interfere with each other. The methods provided byt this service can be called
+ * on any node from the datacenter to be activated. Datacenters only need to brought up when using non-default
+ * datacenter or multiple datacenters.
+ */
+public interface DataCenterControl {
+    /**
+     * Activates the Entity Ownership Service in the datacenter that this method is called.
+     *
+     * @return Completion future
+     */
+    @NonNull ListenableFuture<Empty> activateDataCenter();
+
+    /**
+     * Deactivates the Entity Ownership Service in the datacenter that this method is called.
+     *
+     * @return Completion future
+     */
+    @NonNull ListenableFuture<Empty> deactivateDataCenter();
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/ListenerRegistration.java
new file mode 100644 (file)
index 0000000..8d101c2
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+
+final class ListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener> {
+    private final AkkaEntityOwnershipService service;
+    private final @NonNull String entityType;
+
+    ListenerRegistration(final DOMEntityOwnershipListener listener, final String entityType,
+            final AkkaEntityOwnershipService service) {
+        super(listener);
+        this.entityType = requireNonNull(entityType);
+        this.service = requireNonNull(service);
+    }
+
+    public String entityType() {
+        return entityType;
+    }
+
+    @Override
+    protected void removeRegistration() {
+        service.unregisterListener(entityType, getInstance());
+    }
+
+    @Override
+    protected MoreObjects.ToStringHelper addToStringAttributes(final MoreObjects.ToStringHelper toStringHelper) {
+        return toStringHelper.add("entityType", entityType);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/EOSMain.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/EOSMain.java
new file mode 100644 (file)
index 0000000..dab699e
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.SupervisorStrategy;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.ClusterSingleton;
+import akka.cluster.typed.SingletonActor;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.Terminate;
+import org.opendaylight.controller.eos.akka.owner.checker.OwnerStateChecker;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.IdleSupervisor;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistryInit;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+public final class EOSMain extends AbstractBehavior<BootstrapCommand> {
+    private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+    private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+    private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+    private final ActorRef<StateCheckerCommand> ownerStateChecker;
+
+    private EOSMain(final ActorContext<BootstrapCommand> context, final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+
+        final String role = Cluster.get(context.getSystem()).selfMember().getRoles().iterator().next();
+
+        listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry");
+
+        final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem());
+        // start the initial sync behavior that switches to the regular one after syncing
+        ownerSupervisor = clusterSingleton.init(
+                SingletonActor.of(Behaviors.supervise(IdleSupervisor.create(iidCodec))
+                        .onFailure(SupervisorStrategy.restart()), "OwnerSupervisor"));
+        candidateRegistry = context.spawn(CandidateRegistryInit.create(ownerSupervisor), "CandidateRegistry");
+
+        ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, iidCodec),
+                "OwnerStateChecker");
+    }
+
+    public static Behavior<BootstrapCommand> create(final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(context -> new EOSMain(context, iidCodec));
+    }
+
+    @Override
+    public Receive<BootstrapCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(GetRunningContext.class, this::onGetRunningContext)
+                .onMessage(Terminate.class, this::onTerminate)
+                .build();
+    }
+
+    private Behavior<BootstrapCommand> onGetRunningContext(final GetRunningContext request) {
+        request.getReplyTo().tell(
+                new RunningContext(listenerRegistry, candidateRegistry, ownerStateChecker, ownerSupervisor));
+        return this;
+    }
+
+    private Behavior<BootstrapCommand> onTerminate(final Terminate request) {
+        request.getReplyTo().tell(Empty.value());
+        return Behaviors.stopped();
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/BootstrapCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/BootstrapCommand.java
new file mode 100644 (file)
index 0000000..122a53f
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+public abstract class BootstrapCommand {
+    BootstrapCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/GetRunningContext.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/GetRunningContext.java
new file mode 100644 (file)
index 0000000..6804dcb
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+
+public final class GetRunningContext extends BootstrapCommand {
+    private final ActorRef<RunningContext> replyTo;
+
+    public GetRunningContext(final ActorRef<RunningContext> replyTo) {
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public ActorRef<RunningContext> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/RunningContext.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/RunningContext.java
new file mode 100644 (file)
index 0000000..6bbffca
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+
+public final class RunningContext extends BootstrapCommand {
+    private final @NonNull ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+    private final @NonNull ActorRef<CandidateRegistryCommand> candidateRegistry;
+    private final @NonNull ActorRef<StateCheckerCommand> ownerStateChecker;
+    private final @NonNull ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+    public RunningContext(final ActorRef<TypeListenerRegistryCommand> listenerRegistry,
+                          final ActorRef<CandidateRegistryCommand> candidateRegistry,
+                          final ActorRef<StateCheckerCommand> ownerStateChecker,
+                          final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+        this.listenerRegistry = requireNonNull(listenerRegistry);
+        this.candidateRegistry = requireNonNull(candidateRegistry);
+        this.ownerStateChecker = requireNonNull(ownerStateChecker);
+        this.ownerSupervisor = requireNonNull(ownerSupervisor);
+    }
+
+    public @NonNull ActorRef<TypeListenerRegistryCommand> getListenerRegistry() {
+        return listenerRegistry;
+    }
+
+    public @NonNull ActorRef<CandidateRegistryCommand> getCandidateRegistry() {
+        return candidateRegistry;
+    }
+
+    public @NonNull ActorRef<StateCheckerCommand> getOwnerStateChecker() {
+        return ownerStateChecker;
+    }
+
+    public @NonNull ActorRef<OwnerSupervisorCommand> getOwnerSupervisor() {
+        return ownerSupervisor;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/Terminate.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/bootstrap/command/Terminate.java
new file mode 100644 (file)
index 0000000..116b5e4
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.bootstrap.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.common.Empty;
+
+public final class Terminate extends BootstrapCommand {
+    private final @NonNull ActorRef<Empty> replyTo;
+
+    public Terminate(final ActorRef<Empty> replyTo) {
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @NonNull ActorRef<Empty> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/EntityRpcHandler.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/EntityRpcHandler.java
new file mode 100644 (file)
index 0000000..6828009
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletionStage;
+import org.opendaylight.controller.eos.akka.owner.checker.command.AbstractEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetCandidates;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetCandidatesForEntity;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnerForEntity;
+import org.opendaylight.controller.eos.akka.owner.checker.command.OwnerDataResponse;
+import org.opendaylight.controller.eos.akka.owner.checker.command.SingleEntityOwnerDataResponse;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Short-lived actor that is spawned purely for execution of rpcs from the entity-owners model.
+ */
+public final class EntityRpcHandler extends AbstractBehavior<StateCheckerCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(EntityRpcHandler.class);
+    private static final Duration ASK_TIMEOUT = Duration.ofSeconds(5);
+
+    private final ReplicatorMessageAdapter<StateCheckerCommand, LWWRegister<String>> ownerReplicator;
+    private final ReplicatorMessageAdapter<StateCheckerCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+
+    private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+    private final ActorRef<Replicator.Command> replicator;
+
+    private final BindingInstanceIdentifierCodec iidCodec;
+
+    private final Map<DOMEntity, Set<String>> currentCandidates = new HashMap<>();
+    private final Map<DOMEntity, String> currentOwners = new HashMap<>();
+    private final Map<String, DOMEntity> entityLookup = new HashMap<>();
+    private int toSync = -1;
+
+    public EntityRpcHandler(final ActorContext<StateCheckerCommand> context,
+                            final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                            final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+
+        replicator = DistributedData.get(context.getSystem()).replicator();
+        ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, ASK_TIMEOUT);
+        candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, ASK_TIMEOUT);
+        this.ownerSupervisor = ownerSupervisor;
+
+        this.iidCodec = iidCodec;
+    }
+
+    public static Behavior<StateCheckerCommand> create(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                                                       final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(ctx -> new EntityRpcHandler(ctx, ownerSupervisor, iidCodec));
+    }
+
+    @Override
+    public Receive<StateCheckerCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(GetEntitiesRequest.class, this::onGetEntities)
+                .onMessage(GetEntityRequest.class, this::onGetEntity)
+                .onMessage(GetEntityOwnerRequest.class, this::onGetEntityOwner)
+                .onMessage(GetCandidates.class, this::onCandidatesReceived)
+                .onMessage(GetCandidatesForEntity.class, this::onCandidatesForEntityReceived)
+                .onMessage(OwnerDataResponse.class, this::onOwnerDataReceived)
+                .onMessage(SingleEntityOwnerDataResponse.class, this::onSingleOwnerReceived)
+                .onMessage(GetOwnerForEntity.class, this::onReplyWithOwner)
+                .build();
+    }
+
+    private Behavior<StateCheckerCommand> onGetEntities(final GetEntitiesRequest request) {
+        LOG.debug("{} : Executing get-entities rpc.", getContext().getSelf());
+        final CompletionStage<GetEntitiesBackendReply> result = AskPattern.askWithStatus(
+                ownerSupervisor,
+                GetEntitiesBackendRequest::new,
+                ASK_TIMEOUT,
+                getContext().getSystem().scheduler()
+        );
+
+        result.whenComplete((response, throwable) -> {
+            if (response != null) {
+                request.getReplyTo().tell(new GetEntitiesReply(response));
+            } else {
+                // retry backed with distributed-data
+                LOG.debug("{} : Get-entities failed with owner supervisor, falling back to distributed-data.",
+                        getContext().getSelf(), throwable);
+                getCandidates(request.getReplyTo());
+            }
+        });
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> onGetEntity(final GetEntityRequest request) {
+        LOG.debug("{} : Executing get-entity rpc.", getContext().getSelf());
+        final CompletionStage<GetEntityBackendReply> result = AskPattern.askWithStatus(
+                ownerSupervisor,
+                replyTo -> new GetEntityBackendRequest(replyTo, request.getEntity()),
+                ASK_TIMEOUT,
+                getContext().getSystem().scheduler()
+        );
+
+        result.whenComplete((response, throwable) -> {
+            if (response != null) {
+                request.getReplyTo().tell(new GetEntityReply(response));
+            } else {
+                // retry backed with distributed-data
+                LOG.debug("{} : Get-entity failed with owner supervisor, falling back to distributed-data.",
+                        getContext().getSelf(), throwable);
+                getCandidatesForEntity(extractEntity(request), request.getReplyTo());
+            }
+        });
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> onGetEntityOwner(final GetEntityOwnerRequest request) {
+        LOG.debug("{} : Executing get-entity-owner rpc.", getContext().getSelf());
+        final CompletionStage<GetEntityOwnerBackendReply> result = AskPattern.askWithStatus(
+                ownerSupervisor,
+                replyTo -> new GetEntityOwnerBackendRequest(replyTo, request.getEntity()),
+                ASK_TIMEOUT,
+                getContext().getSystem().scheduler()
+        );
+
+        result.whenComplete((response, throwable) -> {
+            if (response != null) {
+                request.getReplyTo().tell(new GetEntityOwnerReply(response.getOwner()));
+            } else {
+                // retry backed with distributed-data
+                LOG.debug("{} : Get-entity-owner failed with owner supervisor, falling back to distributed-data.",
+                        getContext().getSelf(), throwable);
+                getOwnerForEntity(extractEntity(request), request.getReplyTo());
+            }
+        });
+        return this;
+    }
+
+    private void getCandidates(final ActorRef<GetEntitiesReply> replyTo) {
+        candidateReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+                replicatorResponse -> new GetCandidates(replicatorResponse, replyTo));
+    }
+
+    private void getCandidatesForEntity(final DOMEntity entity, final ActorRef<GetEntityReply> replyTo) {
+        candidateReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+                replicatorResponse -> new GetCandidatesForEntity(replicatorResponse, entity, replyTo));
+    }
+
+    private void getOwnerForEntity(final DOMEntity entity, final ActorRef<GetEntityOwnerReply> replyTo) {
+        ownerReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(
+                        new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), askReplyTo),
+                replicatorReponse -> new GetOwnerForEntity(replicatorReponse, entity, replyTo));
+    }
+
+    private Behavior<StateCheckerCommand> onReplyWithOwner(final GetOwnerForEntity message) {
+        final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            message.getReplyTo().tell(new GetEntityOwnerReply(
+                    ((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue()));
+        } else {
+            LOG.debug("Unable to retrieve owner for entity: {}, response: {}", message.getEntity(), response);
+            message.getReplyTo().tell(new GetEntityOwnerReply(""));
+        }
+
+        return Behaviors.stopped();
+    }
+
+    private Behavior<StateCheckerCommand> onCandidatesReceived(final GetCandidates message) {
+        final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = message.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            return extractCandidates((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response,
+                    message.getReplyTo());
+        }
+
+        LOG.debug("Unable to retrieve candidates from distributed-data. Response: {}", response);
+        message.getReplyTo().tell(new GetEntitiesReply(Collections.emptyMap(), Collections.emptyMap()));
+        return Behaviors.stopped();
+    }
+
+    private Behavior<StateCheckerCommand> extractCandidates(
+            final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response,
+            final ActorRef<GetEntitiesReply> replyTo) {
+        final ORMap<DOMEntity, ORSet<String>> candidates = response.get(CandidateRegistry.KEY);
+        candidates.getEntries().forEach((key, value) -> currentCandidates.put(key, new HashSet<>(value.getElements())));
+
+        toSync = candidates.keys().size();
+        for (final DOMEntity entity : candidates.keys().getElements()) {
+            entityLookup.put(entity.toString(), entity);
+
+            ownerReplicator.askGet(
+                    askReplyTo -> new Replicator.Get<>(
+                            new LWWRegisterKey<>(entity.toString()),
+                            Replicator.readLocal(),
+                            askReplyTo),
+                    replicatorResponse -> new OwnerDataResponse(replicatorResponse, replyTo));
+        }
+
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> onOwnerDataReceived(final OwnerDataResponse message) {
+        final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            handleOwnerRsp((Replicator.GetSuccess<LWWRegister<String>>) response);
+        } else if (response instanceof Replicator.NotFound) {
+            handleNotFoundOwnerRsp((Replicator.NotFound<LWWRegister<String>>) response);
+        } else {
+            LOG.debug("Owner retrieval failed, response: {}", response);
+        }
+
+        // count the responses, on last respond to rpc and shutdown
+        toSync--;
+        if (toSync == 0) {
+            final GetEntitiesReply getEntitiesReply = new GetEntitiesReply(currentCandidates, currentOwners);
+            message.getReplyTo().tell(getEntitiesReply);
+            return Behaviors.stopped();
+        }
+
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> onCandidatesForEntityReceived(final GetCandidatesForEntity message) {
+        LOG.debug("Received CandidatesForEntity: {}", message);
+        final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = message.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            return extractCandidatesForEntity((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response,
+                    message.getEntity(), message.getReplyTo());
+        } else {
+            LOG.debug("Unable to retrieve candidates for entity: {}. Response:: {}", message.getEntity(), response);
+            message.getReplyTo().tell(new GetEntityReply(null, Collections.emptySet()));
+            return this;
+        }
+    }
+
+    private Behavior<StateCheckerCommand> extractCandidatesForEntity(
+            final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response, final DOMEntity entity,
+            final ActorRef<GetEntityReply> replyTo) {
+        final Map<DOMEntity, ORSet<String>> entries = response.get(CandidateRegistry.KEY).getEntries();
+        currentCandidates.put(entity, entries.get(entity).getElements());
+
+        entityLookup.put(entity.toString(), entity);
+        ownerReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(
+                        new LWWRegisterKey<>(entity.toString()),
+                        Replicator.readLocal(),
+                        askReplyTo),
+                replicatorResponse -> new SingleEntityOwnerDataResponse(replicatorResponse, entity, replyTo));
+
+        return this;
+    }
+
+    private void handleOwnerRsp(final Replicator.GetSuccess<LWWRegister<String>> rsp) {
+        final DOMEntity entity = entityLookup.get(rsp.key().id());
+        final String owner = rsp.get(rsp.key()).getValue();
+
+        currentOwners.put(entity, owner);
+    }
+
+    private static void handleNotFoundOwnerRsp(final Replicator.NotFound<LWWRegister<String>> rsp) {
+        LOG.debug("Owner not found. {}", rsp);
+    }
+
+    private Behavior<StateCheckerCommand> onSingleOwnerReceived(final SingleEntityOwnerDataResponse message) {
+        LOG.debug("Received owner for single entity: {}", message);
+        final Replicator.GetResponse<LWWRegister<String>> response = message.getResponse();
+        final GetEntityReply reply;
+        if (response instanceof Replicator.GetSuccess) {
+            reply = new GetEntityReply(((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue(),
+                    currentCandidates.get(message.getEntity()));
+        } else {
+            reply = new GetEntityReply(null, currentCandidates.get(message.getEntity()));
+        }
+
+        message.getReplyTo().tell(reply);
+        return Behaviors.stopped();
+    }
+
+    private DOMEntity extractEntity(final AbstractEntityRequest<?> request) {
+        final var name = request.getName();
+        final var iid = name.getInstanceIdentifier();
+        if (iid != null) {
+            return new DOMEntity(request.getType().getValue(), iidCodec.fromBinding(iid));
+        }
+        final var str = verifyNotNull(name.getString(), "Unhandled entity name %s", name);
+        return new DOMEntity(request.getType().getValue(), str);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/OwnerStateChecker.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/OwnerStateChecker.java
new file mode 100644 (file)
index 0000000..6d418a8
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.Replicator.Get;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetFailure;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetSuccess;
+import akka.cluster.ddata.typed.javadsl.Replicator.NotFound;
+import akka.cluster.ddata.typed.javadsl.Replicator.ReadMajority;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntitiesRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityOwnerRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipState;
+import org.opendaylight.controller.eos.akka.owner.checker.command.GetOwnershipStateReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.InternalGetReply;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class OwnerStateChecker extends AbstractBehavior<StateCheckerCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(OwnerStateChecker.class);
+    private static final Duration GET_OWNERSHIP_TIMEOUT = Duration.ofSeconds(5);
+    private static final Duration UNEXPECTED_ASK_TIMEOUT = Duration.ofSeconds(5);
+
+    private final ReplicatorMessageAdapter<StateCheckerCommand, LWWRegister<String>> ownerReplicator;
+    private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+    private final BindingInstanceIdentifierCodec iidCodec;
+    private final ActorRef<Replicator.Command> replicator;
+    private final String localMember;
+
+    private OwnerStateChecker(final ActorContext<StateCheckerCommand> context,
+                              final String localMember,
+                              final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                              final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+        this.localMember = requireNonNull(localMember);
+        this.ownerSupervisor = requireNonNull(ownerSupervisor);
+        this.iidCodec = requireNonNull(iidCodec);
+        replicator = DistributedData.get(context.getSystem()).replicator();
+        ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, UNEXPECTED_ASK_TIMEOUT);
+    }
+
+    public static Behavior<StateCheckerCommand> create(final String localMember,
+                                                       final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                                                       final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(ctx -> new OwnerStateChecker(ctx, localMember, ownerSupervisor, iidCodec));
+    }
+
+    @Override
+    public Receive<StateCheckerCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(GetOwnershipState.class, this::onGetOwnershipState)
+                .onMessage(InternalGetReply.class, this::respondWithState)
+                .onMessage(GetEntitiesRequest.class, this::executeEntityRpc)
+                .onMessage(GetEntityRequest.class, this::executeEntityRpc)
+                .onMessage(GetEntityOwnerRequest.class, this::executeEntityRpc)
+                .build();
+    }
+
+    private Behavior<StateCheckerCommand> onGetOwnershipState(final GetOwnershipState message) {
+        ownerReplicator.askGet(
+                askReplyTo -> new Get<>(
+                        new LWWRegisterKey<>(message.getEntity().toString()),
+                        new ReadMajority(GET_OWNERSHIP_TIMEOUT),
+                        askReplyTo),
+                reply -> new InternalGetReply(reply, message.getEntity(), message.getReplyTo()));
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> respondWithState(final InternalGetReply reply) {
+        final GetResponse<LWWRegister<String>> response = reply.getResponse();
+        if (response instanceof NotFound) {
+            LOG.debug("Data for owner not found, most likely no owner has beed picked for entity: {}",
+                    reply.getEntity());
+            reply.getReplyTo().tell(new GetOwnershipStateReply(null));
+        } else if (response instanceof GetFailure) {
+            LOG.warn("Failure retrieving data for entity: {}", reply.getEntity());
+            reply.getReplyTo().tell(new GetOwnershipStateReply(null));
+        } else if (response instanceof GetSuccess) {
+            final String owner = ((GetSuccess<LWWRegister<String>>) response).get(response.key()).getValue();
+            LOG.debug("Data for owner received. {}, owner: {}", response, owner);
+
+            final boolean isOwner = localMember.equals(owner);
+            final boolean hasOwner = !owner.isEmpty();
+
+            reply.getReplyTo().tell(new GetOwnershipStateReply(EntityOwnershipState.from(isOwner, hasOwner)));
+        }
+        return this;
+    }
+
+    private Behavior<StateCheckerCommand> executeEntityRpc(final StateCheckerRequest request) {
+        final ActorRef<StateCheckerCommand> rpcHandler =
+                getContext().spawnAnonymous(EntityRpcHandler.create(ownerSupervisor, iidCodec));
+
+        LOG.debug("Executing entity rpc: {} in actor: {}", request, rpcHandler);
+        rpcHandler.tell(request);
+        return this;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/AbstractEntityRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/AbstractEntityRequest.java
new file mode 100644 (file)
index 0000000..ae1dfc2
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesBuilder;
+
+public abstract class AbstractEntityRequest<T extends StateCheckerReply> extends StateCheckerRequest<T> {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull EntityType type;
+    private final @NonNull EntityName name;
+
+    AbstractEntityRequest(final ActorRef<T> replyTo, final EntityId entity) {
+        super(replyTo);
+        type = entity.requireType();
+        name = entity.requireName();
+    }
+
+    public final @NonNull EntityId getEntity() {
+        return new EntitiesBuilder().setType(type).setName(name).build();
+    }
+
+    public final @NonNull EntityType getType() {
+        return type;
+    }
+
+    public final @NonNull EntityName getName() {
+        return name;
+    }
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("type", type).add("name", name).toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidates.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidates.java
new file mode 100644 (file)
index 0000000..50dddb3
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetCandidates extends StateCheckerCommand {
+    private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final @NonNull ActorRef<GetEntitiesReply> replyTo;
+
+    public GetCandidates(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                         final ActorRef<GetEntitiesReply> replyTo) {
+        this.response = response;
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+    public @NonNull ActorRef<GetEntitiesReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidatesForEntity.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetCandidatesForEntity.java
new file mode 100644 (file)
index 0000000..14a545b
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetCandidatesForEntity extends StateCheckerCommand {
+    private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final @NonNull DOMEntity entity;
+    private final @NonNull ActorRef<GetEntityReply> replyTo;
+
+    public GetCandidatesForEntity(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                                  final DOMEntity entity, final ActorRef<GetEntityReply> replyTo) {
+        this.response = response;
+        this.entity = requireNonNull(entity);
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+    public @NonNull DOMEntity getEntity() {
+        return entity;
+    }
+
+    public @NonNull ActorRef<GetEntityReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesReply.java
new file mode 100644 (file)
index 0000000..c66f84d
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static com.google.common.base.Verify.verify;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSetMultimap;
+import com.google.common.collect.Iterables;
+import java.io.Serializable;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsal.core.general.entity.rev150930.Entity;
+import org.opendaylight.yangtools.yang.binding.util.BindingMap;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+
+public final class GetEntitiesReply extends StateCheckerReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ImmutableSetMultimap<DOMEntity, String> candidates;
+    private final ImmutableMap<DOMEntity, String> owners;
+
+    public GetEntitiesReply(final GetEntitiesBackendReply response) {
+        this.owners = response.getOwners();
+        this.candidates = response.getCandidates();
+    }
+
+    public GetEntitiesReply(final Map<DOMEntity, Set<String>> candidates, final Map<DOMEntity, String> owners) {
+        final ImmutableSetMultimap.Builder<DOMEntity, String> builder = ImmutableSetMultimap.builder();
+        for (Map.Entry<DOMEntity, Set<String>> entry : candidates.entrySet()) {
+            builder.putAll(entry.getKey(), entry.getValue());
+        }
+        this.candidates = builder.build();
+        this.owners = ImmutableMap.copyOf(owners);
+    }
+
+    public @NonNull GetEntitiesOutput toOutput(final BindingInstanceIdentifierCodec iidCodec) {
+        final Set<DOMEntity> entities = new HashSet<>();
+        entities.addAll(owners.keySet());
+        entities.addAll(candidates.keySet());
+
+        return new GetEntitiesOutputBuilder()
+            .setEntities(entities.stream()
+                .map(entity -> {
+                    final EntitiesBuilder eb = new EntitiesBuilder()
+                        .setType(new EntityType(entity.getType()))
+                        .setName(extractName(entity, iidCodec))
+                        .setCandidateNodes(candidates.get(entity).stream()
+                            .map(NodeName::new).collect(Collectors.toUnmodifiableList()));
+
+                    final String owner = owners.get(entity);
+                    if (owner != null) {
+                        eb.setOwnerNode(new NodeName(owner));
+                    }
+                    return eb.build();
+                })
+                .collect(BindingMap.toMap()))
+            .build();
+    }
+
+    /**
+     * if the entity is general entity then shorthand the name to only the last path argument, otherwise return
+     * full YIID path encoded as string.
+     *
+     * @param entity Entity to extract the name from
+     * @param iidCodec codec to encode entity name back to InstanceIdentifier if needed
+     * @return Extracted name
+     */
+    private static EntityName extractName(final DOMEntity entity, final BindingInstanceIdentifierCodec iidCodec) {
+        final var id = entity.getIdentifier();
+        if (id.isEmpty() || !id.getPathArguments().get(0).getNodeType().equals(Entity.QNAME)) {
+            return new EntityName(iidCodec.toBinding(id));
+        }
+
+        final PathArgument last = id.getLastPathArgument();
+        verify(last instanceof NodeIdentifierWithPredicates, "Unexpected last argument %s", last);
+        final Object value = Iterables.getOnlyElement(((NodeIdentifierWithPredicates) last).values());
+        verify(value instanceof String, "Unexpected predicate value %s", value);
+        return new EntityName((String) value);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntitiesRequest.java
new file mode 100644 (file)
index 0000000..8894c27
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+
+public final class GetEntitiesRequest extends StateCheckerRequest<GetEntitiesReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntitiesRequest(final ActorRef<GetEntitiesReply> replyTo) {
+        super(replyTo);
+    }
+
+    @Override
+    public String toString() {
+        return "GetEntitiesRequest{} " + super.toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerReply.java
new file mode 100644 (file)
index 0000000..10ccde7
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+
+public final class GetEntityOwnerReply extends StateCheckerReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final String owner;
+
+    public GetEntityOwnerReply(final @Nullable String owner) {
+        this.owner = owner;
+    }
+
+    public @NonNull GetEntityOwnerOutput toOutput() {
+        final GetEntityOwnerOutputBuilder builder = new GetEntityOwnerOutputBuilder();
+        if (owner != null) {
+            builder.setOwnerNode(new NodeName(owner));
+        }
+        return builder.build();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityOwnerRequest.java
new file mode 100644 (file)
index 0000000..dedccee
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityOwnerRequest extends AbstractEntityRequest<GetEntityOwnerReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntityOwnerRequest(final ActorRef<GetEntityOwnerReply> replyTo, final EntityId entity) {
+        super(replyTo, entity);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityReply.java
new file mode 100644 (file)
index 0000000..2185a86
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+
+public final class GetEntityReply extends StateCheckerReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ImmutableSet<String> candidates;
+    private final String owner;
+
+    public GetEntityReply(final GetEntityBackendReply backendReply) {
+        candidates = backendReply.getCandidates();
+        owner = backendReply.getOwner();
+    }
+
+    public GetEntityReply(final @Nullable String owner, final @Nullable Set<String> candidates) {
+        this.owner = owner;
+        this.candidates = candidates == null ? ImmutableSet.of() : ImmutableSet.copyOf(candidates);
+    }
+
+    public @NonNull GetEntityOutput toOutput() {
+        final GetEntityOutputBuilder builder = new GetEntityOutputBuilder();
+        if (owner != null) {
+            builder.setOwnerNode(new NodeName(owner));
+        }
+        return builder
+            .setCandidateNodes(candidates.stream().map(NodeName::new).collect(Collectors.toUnmodifiableList()))
+            .build();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetEntityRequest.java
new file mode 100644 (file)
index 0000000..2d9d46b
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityRequest extends AbstractEntityRequest<GetEntityReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntityRequest(final ActorRef<GetEntityReply> replyTo, final EntityId entity) {
+        super(replyTo, entity);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnerForEntity.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnerForEntity.java
new file mode 100644 (file)
index 0000000..71105cd
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class GetOwnerForEntity extends StateCheckerCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+    private final DOMEntity entity;
+    private final ActorRef<GetEntityOwnerReply> replyTo;
+
+    public GetOwnerForEntity(final @NonNull GetResponse<LWWRegister<String>> response,
+                             final DOMEntity entity, final ActorRef<GetEntityOwnerReply> replyTo) {
+        this.response = response;
+        this.entity = entity;
+        this.replyTo = replyTo;
+    }
+
+    public GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+
+    public DOMEntity getEntity() {
+        return entity;
+    }
+
+    public ActorRef<GetEntityOwnerReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipState.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipState.java
new file mode 100644 (file)
index 0000000..617b65e
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetOwnershipState extends StateCheckerCommand {
+    private final @NonNull DOMEntity entity;
+    private final @NonNull ActorRef<GetOwnershipStateReply> replyTo;
+
+    public GetOwnershipState(final DOMEntity entity, final ActorRef<GetOwnershipStateReply> replyTo) {
+        this.entity = requireNonNull(entity);
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @NonNull DOMEntity getEntity() {
+        return entity;
+    }
+
+    public @NonNull ActorRef<GetOwnershipStateReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipStateReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/GetOwnershipStateReply.java
new file mode 100644 (file)
index 0000000..58fb5a0
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+
+public final class GetOwnershipStateReply extends StateCheckerReply {
+    private final @Nullable EntityOwnershipState ownershipState;
+
+    public GetOwnershipStateReply(final EntityOwnershipState ownershipState) {
+        this.ownershipState = ownershipState;
+    }
+
+    public @Nullable EntityOwnershipState getOwnershipState() {
+        return ownershipState;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/InternalGetReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/InternalGetReply.java
new file mode 100644 (file)
index 0000000..1a8f305
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InternalGetReply extends StateCheckerCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+    private final @NonNull ActorRef<GetOwnershipStateReply> replyTo;
+    private final @NonNull DOMEntity entity;
+
+    public InternalGetReply(final GetResponse<LWWRegister<String>> response, final DOMEntity entity,
+                            final ActorRef<GetOwnershipStateReply> replyTo) {
+        this.response = requireNonNull(response);
+        this.entity = requireNonNull(entity);
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+
+    public @NonNull DOMEntity getEntity() {
+        return entity;
+    }
+
+    public @NonNull ActorRef<GetOwnershipStateReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/OwnerDataResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/OwnerDataResponse.java
new file mode 100644 (file)
index 0000000..b7b612f
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public class OwnerDataResponse extends StateCheckerCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+    private final ActorRef<GetEntitiesReply> replyTo;
+
+    public OwnerDataResponse(final GetResponse<LWWRegister<String>> response,
+                             final ActorRef<GetEntitiesReply> replyTo) {
+        this.response = requireNonNull(response);
+        this.replyTo = replyTo;
+    }
+
+    public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+
+    public ActorRef<GetEntitiesReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/SingleEntityOwnerDataResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/SingleEntityOwnerDataResponse.java
new file mode 100644 (file)
index 0000000..9453995
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class SingleEntityOwnerDataResponse extends StateCheckerCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+    private final DOMEntity entity;
+    private final ActorRef<GetEntityReply> replyTo;
+
+    public SingleEntityOwnerDataResponse(final @NonNull GetResponse<LWWRegister<String>> response,
+                                         final DOMEntity entity,
+                                         final ActorRef<GetEntityReply> replyTo) {
+        this.response = requireNonNull(response);
+        this.entity = requireNonNull(entity);
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+
+    public DOMEntity getEntity() {
+        return entity;
+    }
+
+    public ActorRef<GetEntityReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerCommand.java
new file mode 100644 (file)
index 0000000..e6b5412
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+public abstract class StateCheckerCommand {
+    StateCheckerCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerReply.java
new file mode 100644 (file)
index 0000000..39347cc
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+public abstract class StateCheckerReply {
+    StateCheckerReply() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/checker/command/StateCheckerRequest.java
new file mode 100644 (file)
index 0000000..62be328
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.checker.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class StateCheckerRequest<T extends StateCheckerReply> extends StateCheckerCommand
+        implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull ActorRef<T> replyTo;
+
+    StateCheckerRequest(final ActorRef<T> replyTo) {
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public final @NonNull ActorRef<T> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/AbstractSupervisor.java
new file mode 100644 (file)
index 0000000..a4366b5
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+
+abstract class AbstractSupervisor extends AbstractBehavior<OwnerSupervisorCommand> {
+
+    final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "getContext() is non-final")
+    AbstractSupervisor(final ActorContext<OwnerSupervisorCommand> context) {
+        super(context);
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+        candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+    }
+
+    Behavior<OwnerSupervisorCommand> onClearCandidatesForMember(final ClearCandidatesForMember command) {
+        getLogger().debug("Clearing candidates for member: {}", command.getCandidate());
+
+        candidateReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY,
+                        new Replicator.ReadMajority(Duration.ofSeconds(15)), askReplyTo),
+                response -> new ClearCandidates(response, command));
+
+        return this;
+    }
+
+    Behavior<OwnerSupervisorCommand> finishClearCandidates(final ClearCandidates command) {
+        if (command.getResponse() instanceof Replicator.GetSuccess) {
+            getLogger().debug("Retrieved candidate data, clearing candidates for {}",
+                    command.getOriginalMessage().getCandidate());
+
+            getContext().spawnAnonymous(CandidateCleaner.create()).tell(command);
+        } else {
+            getLogger().debug("Unable to retrieve candidate data for {}, no candidates present sending empty reply",
+                    command.getOriginalMessage().getCandidate());
+            command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+        }
+
+        return this;
+    }
+
+    abstract Logger getLogger();
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/CandidateCleaner.java
new file mode 100644 (file)
index 0000000..8ce9adb
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import java.util.Map;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesUpdateResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Actor that can be spawned by all the supervisor implementations that executes clearing of candidates once
+ * candidate retrieval succeeds. Once candidates for the member are cleared(or immediately if none need to be cleared),
+ * the actor stops itself.
+ */
+public final class CandidateCleaner extends AbstractBehavior<OwnerSupervisorCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(CandidateCleaner.class);
+
+    private final ReplicatorMessageAdapter<OwnerSupervisorCommand, ORMap<DOMEntity, ORSet<String>>> candidateReplicator;
+    private final SelfUniqueAddress node;
+
+    private int remaining = 0;
+
+    private CandidateCleaner(final ActorContext<OwnerSupervisorCommand> context) {
+        super(context);
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(getContext().getSystem()).replicator();
+        candidateReplicator = new ReplicatorMessageAdapter<>(getContext(), replicator, Duration.ofSeconds(5));
+        node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+
+    }
+
+    public static Behavior<OwnerSupervisorCommand> create() {
+        return Behaviors.setup(CandidateCleaner::new);
+    }
+
+    @Override
+    public Receive<OwnerSupervisorCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(ClearCandidates.class, this::onClearCandidates)
+                .onMessage(ClearCandidatesUpdateResponse.class, this::onClearCandidatesUpdateResponse)
+                .build();
+    }
+
+    private Behavior<OwnerSupervisorCommand> onClearCandidates(final ClearCandidates command) {
+        LOG.debug("Clearing candidates for member: {}", command.getOriginalMessage().getCandidate());
+
+        final ORMap<DOMEntity, ORSet<String>> candidates =
+                ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) command.getResponse())
+                        .get(CandidateRegistry.KEY);
+
+        for (final Map.Entry<DOMEntity, ORSet<String>> entry : candidates.getEntries().entrySet()) {
+            if (entry.getValue().contains(command.getOriginalMessage().getCandidate())) {
+                LOG.debug("Removing {} from {}", command.getOriginalMessage().getCandidate(), entry.getKey());
+
+                remaining++;
+                candidateReplicator.askUpdate(
+                        askReplyTo -> new Replicator.Update<>(
+                                CandidateRegistry.KEY,
+                                ORMap.empty(),
+                                new Replicator.WriteMajority(Duration.ofSeconds(10)),
+                                askReplyTo,
+                                map -> map.update(node, entry.getKey(), ORSet.empty(),
+                                        value -> value.remove(node, command.getOriginalMessage().getCandidate()))),
+                        updateResponse -> new ClearCandidatesUpdateResponse(updateResponse,
+                                command.getOriginalMessage().getReplyTo()));
+            }
+        }
+
+        if (remaining == 0) {
+            LOG.debug("Did not clear any candidates for {}", command.getOriginalMessage().getCandidate());
+            command.getOriginalMessage().getReplyTo().tell(new ClearCandidatesResponse());
+            return Behaviors.stopped();
+        }
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onClearCandidatesUpdateResponse(
+            final ClearCandidatesUpdateResponse command) {
+        remaining--;
+        if (remaining == 0) {
+            LOG.debug("Last update response for candidate removal received, replying to: {}", command.getReplyTo());
+            command.getReplyTo().tell(new ClearCandidatesResponse());
+            return Behaviors.stopped();
+        } else {
+            LOG.debug("Have still {} outstanding requests after {}", remaining, command.getResponse());
+        }
+        return this;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/IdleSupervisor.java
new file mode 100644 (file)
index 0000000..3028552
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.Member;
+import akka.cluster.typed.Cluster;
+import akka.pattern.StatusReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorRequest;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Initial Supervisor behavior that stays idle and only switches itself to the active behavior when its running
+ * in the primary datacenter, or is activated on demand. Once the supervisor instance is no longer needed in the
+ * secondary datacenter it needs to be deactivated manually.
+ */
+public final class IdleSupervisor extends AbstractSupervisor {
+    private static final Logger LOG = LoggerFactory.getLogger(IdleSupervisor.class);
+
+    private static final String DATACENTER_PREFIX = "dc-";
+    private static final String DEFAULT_DATACENTER = "dc-default";
+
+    private final BindingInstanceIdentifierCodec iidCodec;
+
+    private IdleSupervisor(final ActorContext<OwnerSupervisorCommand> context,
+                           final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+        this.iidCodec = requireNonNull(iidCodec);
+        final Cluster cluster = Cluster.get(context.getSystem());
+
+        final String datacenterRole = extractDatacenterRole(cluster.selfMember());
+        if (datacenterRole.equals(DEFAULT_DATACENTER)) {
+            LOG.debug("No datacenter configured, activating default data center");
+            context.getSelf().tell(new ActivateDataCenter(null));
+        }
+
+        LOG.debug("Idle supervisor started on {}.", cluster.selfMember());
+    }
+
+    public static Behavior<OwnerSupervisorCommand> create(final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(context -> new IdleSupervisor(context, iidCodec));
+    }
+
+    @Override
+    public Receive<OwnerSupervisorCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(ActivateDataCenter.class, this::onActivateDataCenter)
+                .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
+                .build();
+    }
+
+    private Behavior<OwnerSupervisorCommand> onFailEntityRpc(final OwnerSupervisorRequest message) {
+        LOG.debug("Failing rpc request. {}", message);
+        message.getReplyTo().tell(StatusReply.error("OwnerSupervisor is inactive so it"
+                + " cannot handle entity rpc requests."));
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onActivateDataCenter(final ActivateDataCenter message) {
+        LOG.debug("Received ActivateDataCenter command switching to syncer behavior,");
+        return OwnerSyncer.create(message.getReplyTo(), iidCodec);
+    }
+
+    private static String extractDatacenterRole(final Member selfMember) {
+        return selfMember.getRoles().stream()
+                .filter(role -> role.startsWith(DATACENTER_PREFIX))
+                .findFirst()
+                .orElseThrow(() -> new IllegalArgumentException(selfMember + " does not have a valid role"));
+    }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisor.java
new file mode 100644 (file)
index 0000000..1e2a41b
--- /dev/null
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ClusterEvent;
+import akka.cluster.ClusterEvent.CurrentClusterState;
+import akka.cluster.Member;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.Subscribe;
+import akka.pattern.StatusReply;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.BiPredicate;
+import java.util.stream.Collectors;
+import java.util.stream.StreamSupport;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.AbstractEntityRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.CandidatesChanged;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterDeactivated;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberDownEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUpEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerChanged;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.collection.JavaConverters;
+
+/**
+ * Responsible for tracking candidates and assigning ownership of entities. This behavior is subscribed to the candidate
+ * registry in distributed-data and picks entity owners based on the current cluster state and registered candidates.
+ * On cluster up/down etc. events the owners are reassigned if possible.
+ */
+public final class OwnerSupervisor extends AbstractSupervisor {
+
+    private static final Logger LOG = LoggerFactory.getLogger(OwnerSupervisor.class);
+    private static final String DATACENTER_PREFIX = "dc-";
+
+    private final ReplicatorMessageAdapter<OwnerSupervisorCommand, LWWRegister<String>> ownerReplicator;
+
+    // Our own clock implementation so we do not have to rely on synchronized clocks. This basically functions as an
+    // increasing counter which is fine for our needs as we only ever have a single writer since t supervisor is
+    // running in a cluster-singleton
+    private static final LWWRegister.Clock<String> CLOCK = (currentTimestamp, value) -> currentTimestamp + 1;
+
+    private final Cluster cluster;
+    private final SelfUniqueAddress node;
+    private final String dataCenter;
+
+    private final Set<String> activeMembers;
+
+    // currently registered candidates
+    private final Map<DOMEntity, Set<String>> currentCandidates;
+    // current owners
+    private final Map<DOMEntity, String> currentOwners;
+    // reverse lookup of owner to entity
+    private final Multimap<String, DOMEntity> ownerToEntity = HashMultimap.create();
+
+    // only reassign owner for those entities that lost this candidate or is not reachable
+    private final BiPredicate<DOMEntity, String> reassignPredicate = (entity, candidate) ->
+            !isActiveCandidate(candidate) || !isCandidateFor(entity, candidate);
+
+    private final BindingInstanceIdentifierCodec iidCodec;
+
+    private OwnerSupervisor(final ActorContext<OwnerSupervisorCommand> context,
+                            final Map<DOMEntity, Set<String>> currentCandidates,
+                            final Map<DOMEntity, String> currentOwners,
+                            final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+        this.iidCodec = requireNonNull(iidCodec);
+
+        final DistributedData distributedData = DistributedData.get(context.getSystem());
+        final ActorRef<Replicator.Command> replicator = distributedData.replicator();
+
+        cluster = Cluster.get(context.getSystem());
+        ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+        dataCenter = extractDatacenterRole(cluster.selfMember());
+
+        node = distributedData.selfUniqueAddress();
+        activeMembers = getActiveMembers();
+
+        this.currentCandidates = currentCandidates;
+        this.currentOwners = currentOwners;
+
+        for (final Map.Entry<DOMEntity, String> entry : currentOwners.entrySet()) {
+            ownerToEntity.put(entry.getValue(), entry.getKey());
+        }
+
+        // check whether we have any unreachable/missing owners
+        reassignUnreachableOwners();
+        assignMissingOwners();
+
+        final ActorRef<ClusterEvent.MemberEvent> memberEventAdapter =
+                context.messageAdapter(ClusterEvent.MemberEvent.class, event -> {
+                    if (event instanceof ClusterEvent.MemberUp) {
+                        return new MemberUpEvent(event.member().address(), event.member().getRoles());
+                    } else {
+                        return new MemberDownEvent(event.member().address(), event.member().getRoles());
+                    }
+                });
+        cluster.subscriptions().tell(Subscribe.create(memberEventAdapter, ClusterEvent.MemberEvent.class));
+
+        final ActorRef<ClusterEvent.ReachabilityEvent> reachabilityEventAdapter =
+                context.messageAdapter(ClusterEvent.ReachabilityEvent.class, event -> {
+                    if (event instanceof ClusterEvent.ReachableMember) {
+                        return new MemberReachableEvent(event.member().address(), event.member().getRoles());
+                    } else {
+                        return new MemberUnreachableEvent(event.member().address(), event.member().getRoles());
+                    }
+                });
+        cluster.subscriptions().tell(Subscribe.create(reachabilityEventAdapter, ClusterEvent.ReachabilityEvent.class));
+
+        candidateReplicator.subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
+
+        LOG.debug("Owner Supervisor started");
+    }
+
+    public static Behavior<OwnerSupervisorCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+            final Map<DOMEntity, String> currentOwners, final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(ctx -> new OwnerSupervisor(ctx, currentCandidates, currentOwners, iidCodec));
+    }
+
+    @Override
+    public Receive<OwnerSupervisorCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(CandidatesChanged.class, this::onCandidatesChanged)
+                .onMessage(DeactivateDataCenter.class, this::onDeactivateDatacenter)
+                .onMessage(OwnerChanged.class, this::onOwnerChanged)
+                .onMessage(MemberUpEvent.class, this::onPeerUp)
+                .onMessage(MemberDownEvent.class, this::onPeerDown)
+                .onMessage(MemberReachableEvent.class, this::onPeerReachable)
+                .onMessage(MemberUnreachableEvent.class, this::onPeerUnreachable)
+                .onMessage(GetEntitiesBackendRequest.class, this::onGetEntities)
+                .onMessage(GetEntityBackendRequest.class, this::onGetEntity)
+                .onMessage(GetEntityOwnerBackendRequest.class, this::onGetEntityOwner)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
+                .build();
+    }
+
+    private Behavior<OwnerSupervisorCommand> onDeactivateDatacenter(final DeactivateDataCenter command) {
+        LOG.debug("Deactivating Owner Supervisor on {}", cluster.selfMember());
+        command.getReplyTo().tell(DataCenterDeactivated.INSTANCE);
+        return IdleSupervisor.create(iidCodec);
+    }
+
+    private Behavior<OwnerSupervisorCommand> onOwnerChanged(final OwnerChanged command) {
+        LOG.debug("Owner has changed for {}", command.getResponse().key());
+        return this;
+    }
+
+    private void reassignUnreachableOwners() {
+        final Set<String> ownersToReassign = new HashSet<>();
+        for (final String owner : ownerToEntity.keys()) {
+            if (!isActiveCandidate(owner)) {
+                ownersToReassign.add(owner);
+            }
+        }
+
+        for (final String owner : ownersToReassign) {
+            reassignCandidatesFor(owner, ImmutableList.copyOf(ownerToEntity.get(owner)), reassignPredicate);
+        }
+    }
+
+    private void assignMissingOwners() {
+        for (final Map.Entry<DOMEntity, Set<String>> entry : currentCandidates.entrySet()) {
+            if (!currentOwners.containsKey(entry.getKey())) {
+                assignOwnerFor(entry.getKey());
+            }
+        }
+    }
+
+    private Behavior<OwnerSupervisorCommand> onCandidatesChanged(final CandidatesChanged message) {
+        LOG.debug("onCandidatesChanged {}", message.getResponse());
+        if (message.getResponse() instanceof Replicator.Changed) {
+            final Replicator.Changed<ORMap<DOMEntity, ORSet<String>>> changed =
+                    (Replicator.Changed<ORMap<DOMEntity, ORSet<String>>>) message.getResponse();
+            processCandidateChanges(changed.get(CandidateRegistry.KEY));
+        }
+        return this;
+    }
+
+    private void processCandidateChanges(final ORMap<DOMEntity, ORSet<String>> candidates) {
+        final Map<DOMEntity, ORSet<String>> entries = candidates.getEntries();
+        for (final Map.Entry<DOMEntity, ORSet<String>> entry : entries.entrySet()) {
+            processCandidatesFor(entry.getKey(), entry.getValue());
+        }
+    }
+
+    private void processCandidatesFor(final DOMEntity entity, final ORSet<String> receivedCandidates) {
+        LOG.debug("Processing candidates for : {}, new value: {}", entity, receivedCandidates.elements());
+
+        final Set<String> candidates = JavaConverters.asJava(receivedCandidates.elements());
+        // only insert candidates if there are any to insert, otherwise we would generate unnecessary notification with
+        // no owner
+        if (!currentCandidates.containsKey(entity) && !candidates.isEmpty()) {
+            LOG.debug("Candidates missing for entity: {} adding all candidates", entity);
+            currentCandidates.put(entity, new HashSet<>(candidates));
+
+            LOG.debug("Current state for {} : {}", entity, currentCandidates.get(entity).toString());
+            assignOwnerFor(entity);
+
+            return;
+        }
+
+        final Set<String> currentlyPresent = currentCandidates.getOrDefault(entity, Set.of());
+        final Set<String> difference = ImmutableSet.copyOf(Sets.symmetricDifference(currentlyPresent, candidates));
+
+        LOG.debug("currently present candidates: {}", currentlyPresent);
+        LOG.debug("difference: {}", difference);
+
+        final List<String> ownersToReassign = new ArrayList<>();
+
+        // first add/remove candidates from entities
+        for (final String toCheck : difference) {
+            if (!currentlyPresent.contains(toCheck)) {
+                // add new candidate
+                LOG.debug("Adding new candidate for entity: {} : {}", entity, toCheck);
+                currentCandidates.get(entity).add(toCheck);
+
+                final String currentOwner = currentOwners.get(entity);
+
+                if (currentOwner == null || !activeMembers.contains(currentOwner)) {
+                    // might as well assign right away when we don't have an owner or its unreachable
+                    assignOwnerFor(entity);
+                }
+
+                LOG.debug("Current state for entity: {} : {}", entity, currentCandidates.get(entity).toString());
+                continue;
+            }
+
+            if (!candidates.contains(toCheck)) {
+                // remove candidate
+                LOG.debug("Removing candidate from entity: {} - {}", entity, toCheck);
+                currentCandidates.get(entity).remove(toCheck);
+                if (ownerToEntity.containsKey(toCheck)) {
+                    ownersToReassign.add(toCheck);
+                }
+            }
+        }
+
+        // then reassign those that need new owners
+        for (final String toReassign : ownersToReassign) {
+            reassignCandidatesFor(toReassign, ImmutableList.copyOf(ownerToEntity.get(toReassign)),
+                    reassignPredicate);
+        }
+
+        if (currentCandidates.get(entity) == null) {
+            LOG.debug("Last candidate removed for {}", entity);
+        } else {
+            LOG.debug("Current state for entity: {} : {}", entity, currentCandidates.get(entity).toString());
+        }
+    }
+
+    private void reassignCandidatesFor(final String oldOwner, final Collection<DOMEntity> entities,
+                                       final BiPredicate<DOMEntity, String> predicate) {
+        LOG.debug("Reassigning owners for {}", entities);
+        for (final DOMEntity entity : entities) {
+            if (predicate.test(entity, oldOwner)) {
+
+                if (!isActiveCandidate(oldOwner) && isCandidateFor(entity, oldOwner) && hasSingleCandidate(entity)) {
+                    // only skip new owner assignment, only if unreachable, still is a candidate and is the ONLY
+                    // candidate
+                    LOG.debug("{} is the only candidate for {}. Skipping reassignment.", oldOwner, entity);
+                    continue;
+                }
+                ownerToEntity.remove(oldOwner, entity);
+                assignOwnerFor(entity);
+            }
+        }
+    }
+
+    private boolean isActiveCandidate(final String candidate) {
+        return activeMembers.contains(candidate);
+    }
+
+    private boolean isCandidateFor(final DOMEntity entity, final String candidate) {
+        return currentCandidates.getOrDefault(entity, Set.of()).contains(candidate);
+    }
+
+    private boolean hasSingleCandidate(final DOMEntity entity) {
+        return currentCandidates.getOrDefault(entity, Set.of()).size() == 1;
+    }
+
+    private void assignOwnerFor(final DOMEntity entity) {
+        final Set<String> candidatesForEntity = currentCandidates.get(entity);
+        if (candidatesForEntity.isEmpty()) {
+            LOG.debug("No candidates present for entity: {}", entity);
+            removeOwner(entity);
+            return;
+        }
+
+        String pickedCandidate = null;
+        for (final String candidate : candidatesForEntity) {
+            if (activeMembers.contains(candidate)) {
+                pickedCandidate = candidate;
+                break;
+            }
+        }
+        if (pickedCandidate == null) {
+            LOG.debug("No candidate is reachable for {}, activeMembers: {}, currentCandidates: {}",
+                    entity, activeMembers, currentCandidates.get(entity));
+            // no candidate is reachable so only remove owner if necessary
+            removeOwner(entity);
+            return;
+        }
+        ownerToEntity.put(pickedCandidate, entity);
+
+        LOG.debug("Entity {} new owner: {}", entity, pickedCandidate);
+        currentOwners.put(entity, pickedCandidate);
+        writeNewOwner(entity, pickedCandidate);
+    }
+
+    private void removeOwner(final DOMEntity entity) {
+        if (currentOwners.containsKey(entity)) {
+            // assign empty owner to dd, as we cannot delete data for a key since that would prevent
+            // writes for the same key
+            currentOwners.remove(entity);
+
+            writeNewOwner(entity, "");
+        }
+    }
+
+    private void writeNewOwner(final DOMEntity entity, final String candidate) {
+        ownerReplicator.askUpdate(
+                askReplyTo -> new Replicator.Update<>(
+                        new LWWRegisterKey<>(entity.toString()),
+                        new LWWRegister<>(node.uniqueAddress(), candidate, 0),
+                        Replicator.writeLocal(),
+                        askReplyTo,
+                        register -> register.withValue(node, candidate, CLOCK)),
+                OwnerChanged::new);
+    }
+
+    private Behavior<OwnerSupervisorCommand> onPeerUp(final MemberUpEvent event) {
+        LOG.debug("Received MemberUp : {}", event);
+
+        handleReachableEvent(event.getRoles());
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onPeerReachable(final MemberReachableEvent event) {
+        LOG.debug("Received MemberReachable : {}", event);
+
+        handleReachableEvent(event.getRoles());
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onGetEntities(final GetEntitiesBackendRequest request) {
+        request.getReplyTo().tell(StatusReply.success(new GetEntitiesBackendReply(currentOwners, currentCandidates)));
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onGetEntity(final GetEntityBackendRequest request) {
+        final DOMEntity entity = extractEntity(request);
+        request.getReplyTo().tell(StatusReply.success(
+                new GetEntityBackendReply(currentOwners.get(entity), currentCandidates.get(entity))));
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onGetEntityOwner(final GetEntityOwnerBackendRequest request) {
+        request.getReplyTo().tell(
+                StatusReply.success(new GetEntityOwnerBackendReply(currentOwners.get(extractEntity(request)))));
+        return this;
+    }
+
+    private void handleReachableEvent(final Set<String> roles) {
+        if (roles.contains(dataCenter)) {
+            activeMembers.add(extractRole(roles));
+            assignMissingOwners();
+        } else {
+            LOG.debug("Received reachable event from a foreign datacenter, Ignoring... Roles: {}", roles);
+        }
+    }
+
+    private Behavior<OwnerSupervisorCommand> onPeerDown(final MemberDownEvent event) {
+        LOG.debug("Received MemberDown : {}", event);
+
+        handleUnreachableEvent(event.getRoles());
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onPeerUnreachable(final MemberUnreachableEvent event) {
+        LOG.debug("Received MemberUnreachable : {}", event);
+
+        handleUnreachableEvent(event.getRoles());
+        return this;
+    }
+
+    private void handleUnreachableEvent(final Set<String> roles) {
+        if (roles.contains(dataCenter)) {
+            activeMembers.remove(extractRole(roles));
+            reassignUnreachableOwners();
+        } else {
+            LOG.debug("Received unreachable event from a foreign datacenter, Ignoring... Roles: {}", roles);
+        }
+    }
+
+    private Set<String> getActiveMembers() {
+        final CurrentClusterState clusterState = cluster.state();
+        final Set<String> unreachableRoles = clusterState.getUnreachable().stream()
+            .map(OwnerSupervisor::extractRole)
+            .collect(Collectors.toSet());
+
+        return StreamSupport.stream(clusterState.getMembers().spliterator(), false)
+            // We are evaluating the set of roles for each member
+            .map(Member::getRoles)
+            // Filter out any members which do not share our dataCenter
+            .filter(roles -> roles.contains(dataCenter))
+            // Find first legal role
+            .map(OwnerSupervisor::extractRole)
+            // filter out unreachable roles
+            .filter(role -> !unreachableRoles.contains(role))
+            .collect(Collectors.toSet());
+    }
+
+    private DOMEntity extractEntity(final AbstractEntityRequest<?> request) {
+        final var name = request.getName();
+        final var iid = name.getInstanceIdentifier();
+        if (iid != null) {
+            return new DOMEntity(request.getType().getValue(), iidCodec.fromBinding(iid));
+        }
+        final var str = verifyNotNull(name.getString(), "Unhandled entity name %s", name);
+        return new DOMEntity(request.getType().getValue(), str);
+    }
+
+    private static String extractRole(final Member member) {
+        return extractRole(member.getRoles());
+    }
+
+    private static String extractRole(final Set<String> roles) {
+        return roles.stream().filter(role -> !role.startsWith(DATACENTER_PREFIX))
+                .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+    }
+
+    private static String extractDatacenterRole(final Member member) {
+        return member.getRoles().stream().filter(role -> role.startsWith(DATACENTER_PREFIX))
+                .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+    }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSyncer.java
new file mode 100644 (file)
index 0000000..32a0a64
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import akka.pattern.StatusReply;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidates;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DataCenterActivated;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntitiesBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.GetEntityOwnerBackendRequest;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialCandidateSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialOwnerSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorRequest;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Behavior that retrieves current candidates/owners from distributed-data and switches to OwnerSupervisor when the
+ * sync has finished.
+ */
+public final class OwnerSyncer extends AbstractSupervisor {
+    private static final Logger LOG = LoggerFactory.getLogger(OwnerSyncer.class);
+
+    private final ReplicatorMessageAdapter<OwnerSupervisorCommand, LWWRegister<String>> ownerReplicator;
+    private final Map<DOMEntity, Set<String>> currentCandidates = new HashMap<>();
+    private final Map<DOMEntity, String> currentOwners = new HashMap<>();
+
+    // String representation of Entity to DOMEntity
+    private final Map<String, DOMEntity> entityLookup = new HashMap<>();
+    private final BindingInstanceIdentifierCodec iidCodec;
+
+    private int toSync = -1;
+
+    private OwnerSyncer(final ActorContext<OwnerSupervisorCommand> context,
+                        final @Nullable ActorRef<OwnerSupervisorReply> notifyDatacenterStarted,
+                        final BindingInstanceIdentifierCodec iidCodec) {
+        super(context);
+        this.iidCodec = requireNonNull(iidCodec);
+        LOG.debug("Starting candidate and owner sync");
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(context.getSystem()).replicator();
+
+        ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+
+        candidateReplicator.askGet(
+                askReplyTo -> new Replicator.Get<>(CandidateRegistry.KEY, Replicator.readLocal(), askReplyTo),
+                InitialCandidateSync::new);
+
+        if (notifyDatacenterStarted != null) {
+            notifyDatacenterStarted.tell(DataCenterActivated.INSTANCE);
+        }
+    }
+
+    public static Behavior<OwnerSupervisorCommand> create(final ActorRef<OwnerSupervisorReply> notifyDatacenterStarted,
+            final BindingInstanceIdentifierCodec iidCodec) {
+        return Behaviors.setup(ctx -> new OwnerSyncer(ctx, notifyDatacenterStarted, iidCodec));
+    }
+
+    @Override
+    public Receive<OwnerSupervisorCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(InitialCandidateSync.class, this::onInitialCandidateSync)
+                .onMessage(InitialOwnerSync.class, this::onInitialOwnerSync)
+                .onMessage(GetEntitiesBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(GetEntityBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(GetEntityOwnerBackendRequest.class, this::onFailEntityRpc)
+                .onMessage(ClearCandidatesForMember.class, this::onClearCandidatesForMember)
+                .onMessage(ClearCandidates.class, this::finishClearCandidates)
+                .build();
+    }
+
+    private Behavior<OwnerSupervisorCommand> onFailEntityRpc(final OwnerSupervisorRequest message) {
+        LOG.debug("Failing rpc request. {}", message);
+        message.getReplyTo().tell(StatusReply.error(
+            "OwnerSupervisor is inactive so it cannot handle entity rpc requests."));
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onInitialCandidateSync(final InitialCandidateSync rsp) {
+        final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = rsp.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            return doInitialSync((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response);
+        } else if (response instanceof Replicator.NotFound) {
+            LOG.debug("No candidates found switching to supervisor");
+            return switchToSupervisor();
+        } else {
+            LOG.debug("Initial candidate sync failed, switching to supervisor. Sync reply: {}", response);
+            return switchToSupervisor();
+        }
+    }
+
+    private Behavior<OwnerSupervisorCommand> doInitialSync(
+            final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> response) {
+
+        final ORMap<DOMEntity, ORSet<String>> candidates = response.get(CandidateRegistry.KEY);
+        candidates.getEntries().entrySet().forEach(entry -> {
+            currentCandidates.put(entry.getKey(), new HashSet<>(entry.getValue().getElements()));
+        });
+
+        toSync = candidates.keys().size();
+        for (final DOMEntity entity : candidates.keys().getElements()) {
+            entityLookup.put(entity.toString(), entity);
+
+            ownerReplicator.askGet(
+                    askReplyTo -> new Replicator.Get<>(
+                            new LWWRegisterKey<>(entity.toString()),
+                            Replicator.readLocal(),
+                            askReplyTo),
+                    InitialOwnerSync::new);
+        }
+
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> onInitialOwnerSync(final InitialOwnerSync rsp) {
+        final Replicator.GetResponse<LWWRegister<String>> response = rsp.getResponse();
+        if (response instanceof Replicator.GetSuccess) {
+            handleOwnerRsp((Replicator.GetSuccess<LWWRegister<String>>) response);
+        } else if (response instanceof Replicator.NotFound) {
+            handleNotFoundOwnerRsp((Replicator.NotFound<LWWRegister<String>>) response);
+        } else {
+            LOG.debug("Initial sync failed response: {}", response);
+        }
+
+        // count the responses, on last switch behaviors
+        toSync--;
+        if (toSync == 0) {
+            return switchToSupervisor();
+        }
+
+        return this;
+    }
+
+    private Behavior<OwnerSupervisorCommand> switchToSupervisor() {
+        LOG.debug("Initial sync done, switching to supervisor. candidates: {}, owners: {}",
+                currentCandidates, currentOwners);
+        return Behaviors.setup(ctx -> OwnerSupervisor.create(currentCandidates, currentOwners, iidCodec));
+    }
+
+    private void handleOwnerRsp(final Replicator.GetSuccess<LWWRegister<String>> rsp) {
+        final DOMEntity entity = entityLookup.get(rsp.key().id());
+        final String owner = rsp.get(rsp.key()).getValue();
+
+        currentOwners.put(entity, owner);
+    }
+
+    private static void handleNotFoundOwnerRsp(final Replicator.NotFound<LWWRegister<String>> rsp) {
+        LOG.debug("Owner not found. {}", rsp);
+    }
+
+    @Override
+    Logger getLogger() {
+        return LOG;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/AbstractEntityRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/AbstractEntityRequest.java
new file mode 100644 (file)
index 0000000..5919a6e
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+
+public abstract class AbstractEntityRequest<T extends OwnerSupervisorReply> extends OwnerSupervisorRequest<T> {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull EntityType type;
+    private final @NonNull EntityName name;
+
+    AbstractEntityRequest(final ActorRef<StatusReply<T>> replyTo, final EntityId entity) {
+        super(replyTo);
+        this.type = entity.requireType();
+        this.name = entity.requireName();
+    }
+
+    public final @NonNull EntityType getType() {
+        return type;
+    }
+
+    public final @NonNull EntityName getName() {
+        return name;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ActivateDataCenter.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ActivateDataCenter.java
new file mode 100644 (file)
index 0000000..29e8502
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class ActivateDataCenter extends OwnerSupervisorCommand implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ActorRef<OwnerSupervisorReply> replyTo;
+
+    public ActivateDataCenter(final @Nullable ActorRef<OwnerSupervisorReply> replyTo) {
+        this.replyTo = replyTo;
+    }
+
+    public ActorRef<OwnerSupervisorReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/CandidatesChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/CandidatesChanged.java
new file mode 100644 (file)
index 0000000..6334d97
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class CandidatesChanged extends OwnerSupervisorCommand {
+    private final @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+    public CandidatesChanged(final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> subscribeResponse) {
+        this.response = requireNonNull(subscribeResponse);
+    }
+
+    public @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("response", response).toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidates.java
new file mode 100644 (file)
index 0000000..64971c8
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ClearCandidates extends OwnerSupervisorCommand {
+
+    private final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final ClearCandidatesForMember originalMessage;
+
+    public ClearCandidates(final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                           final ClearCandidatesForMember originalMessage) {
+        this.response = response;
+        this.originalMessage = originalMessage;
+    }
+
+    public Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+    public ClearCandidatesForMember getOriginalMessage() {
+        return originalMessage;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesForMember.java
new file mode 100644 (file)
index 0000000..1e27cb5
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+
+/**
+ * Request sent from Candidate registration actors to clear the candidate from all entities. Issued at start to clear
+ * candidates from previous iteration of a node. Owner supervisor responds to this request to notify the registration
+ * actor it can start up and process candidate requests.
+ */
+public class ClearCandidatesForMember extends OwnerSupervisorCommand implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ActorRef<ClearCandidatesResponse> replyTo;
+    private final String candidate;
+
+    public ClearCandidatesForMember(final ActorRef<ClearCandidatesResponse> replyTo, final String candidate) {
+        this.replyTo = replyTo;
+        this.candidate = candidate;
+    }
+
+    public ActorRef<ClearCandidatesResponse> getReplyTo() {
+        return replyTo;
+    }
+
+    public String getCandidate() {
+        return candidate;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesResponse.java
new file mode 100644 (file)
index 0000000..7399bd8
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+/**
+ * Response sent from OwnerSupervisor to the ClearCandidatesForMember request, notifying the caller that removal has
+ * finished.
+ */
+public class ClearCandidatesResponse implements Serializable {
+
+    private static final long serialVersionUID = 1L;
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/ClearCandidatesUpdateResponse.java
new file mode 100644 (file)
index 0000000..9f48323
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ClearCandidatesUpdateResponse extends OwnerSupervisorCommand {
+    private final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response;
+    private final ActorRef<ClearCandidatesResponse> replyTo;
+
+    public ClearCandidatesUpdateResponse(final Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> response,
+                                         final ActorRef<ClearCandidatesResponse> replyTo) {
+        this.response = response;
+        this.replyTo = replyTo;
+    }
+
+    public Replicator.UpdateResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+
+    public ActorRef<ClearCandidatesResponse> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterActivated.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterActivated.java
new file mode 100644 (file)
index 0000000..a82bf22
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+public final class DataCenterActivated extends OwnerSupervisorReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+    public static final DataCenterActivated INSTANCE = new DataCenterActivated();
+
+    private DataCenterActivated() {
+        // NOOP
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterDeactivated.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DataCenterDeactivated.java
new file mode 100644 (file)
index 0000000..4879fc7
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+
+public final class DataCenterDeactivated extends OwnerSupervisorReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+    public static final DataCenterDeactivated INSTANCE = new DataCenterDeactivated();
+
+    private DataCenterDeactivated() {
+        // NOOP
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DeactivateDataCenter.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/DeactivateDataCenter.java
new file mode 100644 (file)
index 0000000..039bfc5
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class DeactivateDataCenter extends OwnerSupervisorCommand implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ActorRef<OwnerSupervisorReply> replyTo;
+
+    public DeactivateDataCenter(final @Nullable ActorRef<OwnerSupervisorReply> replyTo) {
+        this.replyTo = replyTo;
+    }
+
+    public ActorRef<OwnerSupervisorReply> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendReply.java
new file mode 100644 (file)
index 0000000..beb858c
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSetMultimap;
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class GetEntitiesBackendReply extends OwnerSupervisorReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ImmutableSetMultimap<DOMEntity, String> candidates;
+    private final ImmutableMap<DOMEntity, String> owners;
+
+    public GetEntitiesBackendReply(final Map<DOMEntity, String> owners, final Map<DOMEntity, Set<String>> candidates) {
+        final ImmutableSetMultimap.Builder<DOMEntity, String> builder = ImmutableSetMultimap.builder();
+        for (Map.Entry<DOMEntity, Set<String>> entry : candidates.entrySet()) {
+            builder.putAll(entry.getKey(), entry.getValue());
+        }
+        this.candidates = builder.build();
+        this.owners = ImmutableMap.copyOf(owners);
+    }
+
+    public ImmutableSetMultimap<DOMEntity, String>  getCandidates() {
+        return candidates;
+    }
+
+    public ImmutableMap<DOMEntity, String> getOwners() {
+        return owners;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntitiesBackendRequest.java
new file mode 100644 (file)
index 0000000..69ea7df
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+
+public final class GetEntitiesBackendRequest extends OwnerSupervisorRequest<GetEntitiesBackendReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntitiesBackendRequest(final ActorRef<StatusReply<GetEntitiesBackendReply>> replyTo) {
+        super(replyTo);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendReply.java
new file mode 100644 (file)
index 0000000..6130603
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import java.util.Set;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class GetEntityBackendReply extends OwnerSupervisorReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final ImmutableSet<String> candidates;
+    private final String owner;
+
+    public GetEntityBackendReply(final @Nullable String owner, final @Nullable Set<String> candidates) {
+        this.owner = owner;
+        this.candidates = candidates == null ? ImmutableSet.of() : ImmutableSet.copyOf(candidates);
+    }
+
+    public ImmutableSet<String> getCandidates() {
+        return candidates;
+    }
+
+    public String getOwner() {
+        return owner;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityBackendRequest.java
new file mode 100644 (file)
index 0000000..0fa7842
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityBackendRequest extends AbstractEntityRequest<GetEntityBackendReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntityBackendRequest(final ActorRef<StatusReply<GetEntityBackendReply>> replyTo, final EntityId entity) {
+        super(replyTo, entity);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendReply.java
new file mode 100644 (file)
index 0000000..d41185a
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+
+public final class GetEntityOwnerBackendReply extends OwnerSupervisorReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final String owner;
+
+    public GetEntityOwnerBackendReply(final @Nullable String owner) {
+        this.owner = owner;
+    }
+
+    public String getOwner() {
+        return owner;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/GetEntityOwnerBackendRequest.java
new file mode 100644 (file)
index 0000000..11802ce
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityId;
+
+public final class GetEntityOwnerBackendRequest extends AbstractEntityRequest<GetEntityOwnerBackendReply> {
+    private static final long serialVersionUID = 1L;
+
+    public GetEntityOwnerBackendRequest(final ActorRef<StatusReply<GetEntityOwnerBackendReply>> replyTo,
+                                        final EntityId entity) {
+        super(replyTo, entity);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialCandidateSync.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialCandidateSync.java
new file mode 100644 (file)
index 0000000..53bf10a
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InitialCandidateSync extends OwnerSupervisorCommand {
+    private final @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+    public InitialCandidateSync(final GetResponse<ORMap<DOMEntity, ORSet<String>>> response) {
+        this.response = response;
+    }
+
+    public @Nullable GetResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialOwnerSync.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InitialOwnerSync.java
new file mode 100644 (file)
index 0000000..e734c66
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class InitialOwnerSync extends OwnerSupervisorCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+
+    public InitialOwnerSync(final GetResponse<LWWRegister<String>> response) {
+        this.response = requireNonNull(response);
+    }
+
+    public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InternalClusterEvent.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/InternalClusterEvent.java
new file mode 100644 (file)
index 0000000..0825c32
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Address;
+import com.google.common.base.MoreObjects;
+import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class InternalClusterEvent extends OwnerSupervisorCommand {
+    private final @NonNull Set<String> roles;
+    private final @NonNull Address address;
+
+    InternalClusterEvent(final Address address, final Set<String> roles) {
+        this.address = requireNonNull(address);
+        this.roles = Set.copyOf(roles);
+    }
+
+    public final @NonNull Address getAddress() {
+        return address;
+    }
+
+    public final @NonNull Set<String> getRoles() {
+        return roles;
+    }
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("address", address).add("roles", roles).toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberDownEvent.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberDownEvent.java
new file mode 100644 (file)
index 0000000..9ff7789
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberDownEvent extends InternalClusterEvent {
+    public MemberDownEvent(final Address address, final Set<String> roles) {
+        super(address, roles);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberReachableEvent.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberReachableEvent.java
new file mode 100644 (file)
index 0000000..dc6d798
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberReachableEvent extends InternalClusterEvent {
+    public MemberReachableEvent(final Address address, final Set<String> roles) {
+        super(address, roles);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUnreachableEvent.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUnreachableEvent.java
new file mode 100644 (file)
index 0000000..24999fb
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberUnreachableEvent extends InternalClusterEvent {
+    public MemberUnreachableEvent(final Address address, final Set<String> roles) {
+        super(address, roles);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUpEvent.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/MemberUpEvent.java
new file mode 100644 (file)
index 0000000..18eb765
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import akka.actor.Address;
+import java.util.Set;
+
+public final class MemberUpEvent extends InternalClusterEvent {
+    public MemberUpEvent(final Address address, final Set<String> roles) {
+        super(address, roles);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerChanged.java
new file mode 100644 (file)
index 0000000..b7ce5b2
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.UpdateResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class OwnerChanged extends OwnerSupervisorCommand {
+    private final @NonNull UpdateResponse<LWWRegister<String>> rsp;
+
+    public OwnerChanged(final UpdateResponse<LWWRegister<String>> rsp) {
+        this.rsp = requireNonNull(rsp);
+    }
+
+    public @NonNull UpdateResponse<LWWRegister<String>> getResponse() {
+        return rsp;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorCommand.java
new file mode 100644 (file)
index 0000000..ba6ca1d
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+public abstract class OwnerSupervisorCommand {
+    OwnerSupervisorCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorReply.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorReply.java
new file mode 100644 (file)
index 0000000..c34d441
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+public abstract class OwnerSupervisorReply {
+    OwnerSupervisorReply() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorRequest.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/owner/supervisor/command/OwnerSupervisorRequest.java
new file mode 100644 (file)
index 0000000..c451681
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.pattern.StatusReply;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+
+public abstract class OwnerSupervisorRequest<T extends OwnerSupervisorReply> extends OwnerSupervisorCommand
+        implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull ActorRef<StatusReply<T>> replyTo;
+
+    OwnerSupervisorRequest(final ActorRef<StatusReply<T>> replyTo) {
+        this.replyTo = requireNonNull(replyTo);
+    }
+
+    public final @NonNull ActorRef<StatusReply<T>> getReplyTo() {
+        return replyTo;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistry.java
new file mode 100644 (file)
index 0000000..03ecbae
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate;
+
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.Cluster;
+import akka.cluster.ddata.Key;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORMapKey;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.SelfUniqueAddress;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.util.Set;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.InternalUpdateResponse;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Actor responsible for handling registrations of candidates into distributed-data.
+ */
+public final class CandidateRegistry extends AbstractBehavior<CandidateRegistryCommand> {
+
+    private static final Logger LOG = LoggerFactory.getLogger(CandidateRegistry.class);
+
+    private static final String DATACENTER_PREFIX = "dc-";
+
+    public static final Key<ORMap<DOMEntity, ORSet<String>>> KEY = new ORMapKey<>("candidateRegistry");
+
+    private final ReplicatorMessageAdapter<CandidateRegistryCommand, ORMap<DOMEntity, ORSet<String>>> replicatorAdapter;
+    private final SelfUniqueAddress node;
+    private final String selfRole;
+
+    private CandidateRegistry(final ActorContext<CandidateRegistryCommand> context,
+                              final ReplicatorMessageAdapter<CandidateRegistryCommand,
+                                      ORMap<DOMEntity, ORSet<String>>> replicatorAdapter) {
+        super(context);
+        this.replicatorAdapter = replicatorAdapter;
+
+        this.node = DistributedData.get(context.getSystem()).selfUniqueAddress();
+        this.selfRole = extractRole(Cluster.get(context.getSystem()).selfMember().getRoles());
+
+        LOG.debug("{} : Candidate registry started", selfRole);
+    }
+
+    public static Behavior<CandidateRegistryCommand> create() {
+        return Behaviors.setup(ctx ->
+                DistributedData.withReplicatorMessageAdapter(
+                        (ReplicatorMessageAdapter<CandidateRegistryCommand,
+                                ORMap<DOMEntity,ORSet<String>>> replicatorAdapter) ->
+                                        new CandidateRegistry(ctx, replicatorAdapter)));
+    }
+
+    @Override
+    public Receive<CandidateRegistryCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(RegisterCandidate.class, this::onRegisterCandidate)
+                .onMessage(UnregisterCandidate.class, this::onUnregisterCandidate)
+                .onMessage(InternalUpdateResponse.class, this::onInternalUpdateResponse)
+                .build();
+    }
+
+    private Behavior<CandidateRegistryCommand> onRegisterCandidate(final RegisterCandidate registerCandidate) {
+        LOG.debug("{} - Registering candidate({}) for entity: {}", selfRole,
+                registerCandidate.getCandidate(), registerCandidate.getEntity());
+        replicatorAdapter.askUpdate(
+                askReplyTo -> new Replicator.Update<>(
+                        KEY,
+                        ORMap.empty(),
+                        Replicator.writeLocal(),
+                        askReplyTo,
+                        map -> map.update(node, registerCandidate.getEntity(), ORSet.empty(),
+                                value -> value.add(node, registerCandidate.getCandidate()))),
+                InternalUpdateResponse::new);
+        return this;
+    }
+
+    private Behavior<CandidateRegistryCommand> onUnregisterCandidate(final UnregisterCandidate unregisterCandidate) {
+        LOG.debug("{} - Removing candidate({}) from entity: {}", selfRole,
+                unregisterCandidate.getCandidate(), unregisterCandidate.getEntity());
+        replicatorAdapter.askUpdate(
+                askReplyTo -> new Replicator.Update<>(
+                        KEY,
+                        ORMap.empty(),
+                        Replicator.writeLocal(),
+                        askReplyTo,
+                        map -> map.update(node, unregisterCandidate.getEntity(), ORSet.empty(),
+                                value -> value.remove(node, unregisterCandidate.getCandidate()))),
+                InternalUpdateResponse::new);
+        return this;
+    }
+
+    private Behavior<CandidateRegistryCommand> onInternalUpdateResponse(final InternalUpdateResponse updateResponse) {
+        LOG.debug("{} : Received update response: {}", selfRole, updateResponse.getRsp());
+        return this;
+    }
+
+    private static String extractRole(final Set<String> roles) {
+        return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX))
+                .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/CandidateRegistryInit.java
new file mode 100644 (file)
index 0000000..f9ca068
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.actor.typed.javadsl.StashBuffer;
+import akka.cluster.Cluster;
+import java.time.Duration;
+import java.util.Set;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesForMember;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ClearCandidatesResponse;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFailed;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRemovalFinished;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RemovePreviousCandidates;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CandidateRegistryInit extends AbstractBehavior<CandidateRegistryCommand> {
+
+    private static final Logger LOG = LoggerFactory.getLogger(CandidateRegistryInit.class);
+
+    private static final String DATACENTER_PREFIX = "dc-";
+
+    private final StashBuffer<CandidateRegistryCommand> stash;
+    private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+    private final String selfRole;
+
+    public CandidateRegistryInit(final ActorContext<CandidateRegistryCommand> ctx,
+                                 final StashBuffer<CandidateRegistryCommand> stash,
+                                 final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+        super(ctx);
+        this.stash = stash;
+        this.ownerSupervisor = ownerSupervisor;
+        this.selfRole = extractRole(Cluster.get(ctx.getSystem()).selfMember().getRoles());
+
+        ctx.getSelf().tell(new RemovePreviousCandidates());
+
+        LOG.debug("{} : CandidateRegistry syncing behavior started.", selfRole);
+    }
+
+    public static Behavior<CandidateRegistryCommand> create(final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+        return Behaviors.withStash(100,
+                stash ->
+                        Behaviors.setup(ctx -> new CandidateRegistryInit(ctx, stash, ownerSupervisor)));
+    }
+
+    @Override
+    public Receive<CandidateRegistryCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(RemovePreviousCandidates.class, this::onRemoveCandidates)
+                .onMessage(CandidateRemovalFinished.class, command -> switchToCandidateRegistry())
+                .onMessage(CandidateRemovalFailed.class, this::candidateRemovalFailed)
+                .onMessage(RegisterCandidate.class, this::stashCommand)
+                .onMessage(UnregisterCandidate.class, this::stashCommand)
+                .build();
+    }
+
+    private Behavior<CandidateRegistryCommand> candidateRemovalFailed(final CandidateRemovalFailed command) {
+        LOG.warn("{} : Initial removal of candidates from previous iteration failed. Rescheduling.", selfRole,
+                command.getThrowable());
+        getContext().getSelf().tell(new RemovePreviousCandidates());
+        return this;
+    }
+
+    private Behavior<CandidateRegistryCommand> onRemoveCandidates(final RemovePreviousCandidates command) {
+        LOG.debug("Sending RemovePreviousCandidates.");
+        getContext().ask(ClearCandidatesResponse.class,
+                ownerSupervisor, Duration.ofSeconds(5),
+                ref -> new ClearCandidatesForMember(ref, selfRole),
+                (response, throwable) -> {
+                    if (response != null) {
+                        return new CandidateRemovalFinished();
+                    } else {
+                        return new CandidateRemovalFailed(throwable);
+                    }
+                });
+
+        return this;
+    }
+
+    private Behavior<CandidateRegistryCommand> stashCommand(final CandidateRegistryCommand command) {
+        LOG.debug("Stashing {}", command);
+        stash.stash(command);
+        return this;
+    }
+
+    private Behavior<CandidateRegistryCommand> switchToCandidateRegistry() {
+        LOG.debug("{} : Clearing of candidates from previous instance done, switching to CandidateRegistry.", selfRole);
+        return stash.unstashAll(CandidateRegistry.create());
+    }
+
+    private static String extractRole(final Set<String> roles) {
+        return roles.stream().filter(role -> !role.contains(DATACENTER_PREFIX))
+                .findFirst().orElseThrow(() -> new IllegalArgumentException("No valid role found."));
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/AbstractCandidateCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/AbstractCandidateCommand.java
new file mode 100644 (file)
index 0000000..5949f4d
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public abstract class AbstractCandidateCommand extends CandidateRegistryCommand {
+    private final @NonNull DOMEntity entity;
+    private final @NonNull String candidate;
+
+    AbstractCandidateCommand(final DOMEntity entity, final String candidate) {
+        this.entity = requireNonNull(entity);
+        this.candidate = requireNonNull(candidate);
+    }
+
+    public final @NonNull DOMEntity getEntity() {
+        return entity;
+    }
+
+    public final @NonNull String getCandidate() {
+        return candidate;
+    }
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("entity", entity).add("candidate", candidate).toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRegistryCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRegistryCommand.java
new file mode 100644 (file)
index 0000000..1cd96a4
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public abstract class CandidateRegistryCommand {
+    CandidateRegistryCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/CandidateRemovalFailed.java
new file mode 100644 (file)
index 0000000..0410942
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+public class CandidateRemovalFailed extends CandidateRegistryCommand {
+
+    private final Throwable throwable;
+
+    public CandidateRemovalFailed(final Throwable throwable) {
+        this.throwable = throwable;
+    }
+
+    public Throwable getThrowable() {
+        return throwable;
+    }
+}
@@ -1,12 +1,11 @@
 /*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.sal.binding.test;
-
-public class BindingTestUtilities {
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
 
+public class CandidateRemovalFinished extends CandidateRegistryCommand {
 }
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InternalUpdateResponse.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/InternalUpdateResponse.java
new file mode 100644 (file)
index 0000000..1759615
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.UpdateResponse;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public final class InternalUpdateResponse extends CandidateRegistryCommand {
+    private final @NonNull UpdateResponse<ORMap<DOMEntity, ORSet<String>>> rsp;
+
+    public InternalUpdateResponse(final UpdateResponse<ORMap<DOMEntity, ORSet<String>>> rsp) {
+        this.rsp = requireNonNull(rsp);
+    }
+
+    public @NonNull UpdateResponse<ORMap<DOMEntity, ORSet<String>>> getRsp() {
+        return rsp;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RegisterCandidate.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RegisterCandidate.java
new file mode 100644 (file)
index 0000000..b76a203
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Sent to Candidate registry to register the candidate for a given entity.
+ */
+public final class RegisterCandidate extends AbstractCandidateCommand {
+    public RegisterCandidate(final DOMEntity entity, final String candidate) {
+        super(entity, candidate);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/RemovePreviousCandidates.java
new file mode 100644 (file)
index 0000000..9e1da1e
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+/**
+ * Message sent to candidate registry initial behavior by self to trigger and retrigger(in case of failures) removal
+ * of candidates registered by the previous iteration of this node.
+ */
+public class RemovePreviousCandidates extends CandidateRegistryCommand {
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/UnregisterCandidate.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/candidate/command/UnregisterCandidate.java
new file mode 100644 (file)
index 0000000..a39f3d2
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.candidate.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Sent to CandidateRegistry to unregister the candidate for a given entity.
+ */
+public final class UnregisterCandidate extends AbstractCandidateCommand {
+    public UnregisterCandidate(final DOMEntity entity, final String candidate) {
+        super(entity, candidate);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/SingleEntityListenerActor.java
new file mode 100644 (file)
index 0000000..4419fdf
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import java.time.Duration;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.InitialOwnerSync;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.ListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.OwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Keeps track of owners for a single entity, which is mapped to a single LWWRegister in distributed-data.
+ * Notifies the listener responsible for tracking the whole entity-type of changes.
+ */
+public class SingleEntityListenerActor extends AbstractBehavior<ListenerCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(SingleEntityListenerActor.class);
+
+    private final String localMember;
+    private final DOMEntity entity;
+    private final ActorRef<TypeListenerCommand> toNotify;
+    private final ReplicatorMessageAdapter<ListenerCommand, LWWRegister<String>> ownerReplicator;
+
+    private String currentOwner = "";
+
+    public SingleEntityListenerActor(final ActorContext<ListenerCommand> context, final String localMember,
+                                     final DOMEntity entity, final ActorRef<TypeListenerCommand> toNotify) {
+        super(context);
+        this.localMember = localMember;
+        this.entity = entity;
+        this.toNotify = toNotify;
+
+        final ActorRef<Replicator.Command> replicator = DistributedData.get(context.getSystem()).replicator();
+        ownerReplicator = new ReplicatorMessageAdapter<>(context, replicator, Duration.ofSeconds(5));
+
+        ownerReplicator.askGet(
+            replyTo -> new Replicator.Get<>(new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), replyTo),
+            InitialOwnerSync::new);
+        LOG.debug("OwnerListenerActor for {} started", entity.toString());
+    }
+
+    public static Behavior<ListenerCommand> create(final String localMember, final DOMEntity entity,
+                                                   final ActorRef<TypeListenerCommand> toNotify) {
+        return Behaviors.setup(ctx -> new SingleEntityListenerActor(ctx, localMember, entity, toNotify));
+    }
+
+    @Override
+    public Receive<ListenerCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(OwnerChanged.class, this::onOwnerChanged)
+                .onMessage(InitialOwnerSync.class, this::onInitialOwnerSync)
+                .build();
+    }
+
+    private Behavior<ListenerCommand> onInitialOwnerSync(final InitialOwnerSync ownerSync) {
+        final Replicator.GetResponse<LWWRegister<String>> response = ownerSync.getResponse();
+        LOG.debug("Received initial sync response for: {}, response: {}", entity, response);
+
+        // only trigger initial notification when there is no owner present as we wont get a subscription callback
+        // when distributed-data does not have any data for a key
+        if (response instanceof Replicator.NotFound) {
+
+            // no data is present, trigger initial notification with no owner
+            triggerNoOwnerNotification();
+        } else if (response instanceof Replicator.GetSuccess) {
+
+            // when we get a success just let subscribe callback handle the initial notification
+            LOG.debug("Owner present for entity: {} at the time of initial sync.", entity);
+        } else {
+            LOG.warn("Get has failed for entity: {}", response);
+        }
+
+        // make sure to subscribe AFTER initial notification
+        ownerReplicator.subscribe(new LWWRegisterKey<>(entity.toString()), OwnerChanged::new);
+
+        return this;
+    }
+
+    private void triggerNoOwnerNotification() {
+        LOG.debug("Triggering initial notification without an owner for: {}", entity);
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.REMOTE_OWNERSHIP_LOST_NO_OWNER, false));
+    }
+
+    private Behavior<ListenerCommand> onOwnerChanged(final OwnerChanged ownerChanged) {
+
+        final Replicator.SubscribeResponse<LWWRegister<String>> response = ownerChanged.getResponse();
+        if (response instanceof Replicator.Changed) {
+
+            final Replicator.Changed<LWWRegister<String>> registerChanged =
+                    (Replicator.Changed<LWWRegister<String>>) response;
+            LOG.debug("Owner changed for: {}, prevOwner: {}, newOwner: {}",
+                    entity, currentOwner, registerChanged.get(registerChanged.key()).getValue());
+            handleOwnerChange(registerChanged);
+        } else if (response instanceof Replicator.Deleted) {
+            handleOwnerLost((Replicator.Deleted<LWWRegister<String>>) response);
+        }
+
+        return this;
+    }
+
+    private void handleOwnerChange(final Replicator.Changed<LWWRegister<String>> changed) {
+        final String newOwner = changed.get(changed.key()).getValue();
+
+        final boolean wasOwner = currentOwner.equals(localMember);
+        final boolean isOwner = newOwner.equals(localMember);
+        final boolean hasOwner = !newOwner.equals("");
+
+        LOG.debug("Owner changed for entity:{}, currentOwner: {}, wasOwner: {}, isOwner: {}, hasOwner:{}",
+                entity, currentOwner, wasOwner, isOwner, hasOwner);
+
+        currentOwner = newOwner;
+
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, isOwner, hasOwner),
+            false));
+    }
+
+    private void handleOwnerLost(final Replicator.Deleted<LWWRegister<String>> changed) {
+        final boolean wasOwner = currentOwner.equals(localMember);
+
+        LOG.debug("Owner lost for entity:{}, currentOwner: {}, wasOwner: {}", entity, currentOwner, wasOwner);
+
+        currentOwner = "";
+        toNotify.tell(new EntityOwnerChanged(entity, EntityOwnershipStateChange.from(wasOwner, false, false), false));
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/InitialOwnerSync.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/InitialOwnerSync.java
new file mode 100644 (file)
index 0000000..402389d
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.GetResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+public final class InitialOwnerSync extends ListenerCommand {
+    private final @NonNull GetResponse<LWWRegister<String>> response;
+
+    public InitialOwnerSync(final GetResponse<LWWRegister<String>> response) {
+        this.response = requireNonNull(response);
+    }
+
+    public @NonNull GetResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/ListenerCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/ListenerCommand.java
new file mode 100644 (file)
index 0000000..b0502f5
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+public abstract class ListenerCommand {
+    ListenerCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/OwnerChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/owner/command/OwnerChanged.java
new file mode 100644 (file)
index 0000000..4e1298a
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.owner.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Notification from distributed-data sent to the SingleEntityListenerActor when owner changes for the tracked entity.
+ */
+public final class OwnerChanged extends ListenerCommand {
+    private final @NonNull SubscribeResponse<LWWRegister<String>> response;
+
+    public OwnerChanged(final SubscribeResponse<LWWRegister<String>> response) {
+        this.response = requireNonNull(response);
+    }
+
+    public @NonNull SubscribeResponse<LWWRegister<String>> getResponse() {
+        return response;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerActor.java
new file mode 100644 (file)
index 0000000..7e445c5
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator.Changed;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import akka.cluster.ddata.typed.javadsl.ReplicatorMessageAdapter;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.SingleEntityListenerActor;
+import org.opendaylight.controller.eos.akka.registry.listener.owner.command.ListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.CandidatesChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TerminateListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class EntityTypeListenerActor extends AbstractBehavior<TypeListenerCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(EntityTypeListenerActor.class);
+
+    private final Map<DOMEntity, ActorRef<ListenerCommand>> activeListeners = new HashMap<>();
+    private final String localMember;
+    private final String entityType;
+    private final DOMEntityOwnershipListener listener;
+
+    public EntityTypeListenerActor(final ActorContext<TypeListenerCommand> context, final String localMember,
+                                   final String entityType, final DOMEntityOwnershipListener listener) {
+        super(context);
+        this.localMember = localMember;
+        this.entityType = entityType;
+        this.listener = listener;
+
+        new ReplicatorMessageAdapter<TypeListenerCommand, ORMap<DOMEntity, ORSet<String>>>(context,
+            DistributedData.get(context.getSystem()).replicator(), Duration.ofSeconds(5))
+                .subscribe(CandidateRegistry.KEY, CandidatesChanged::new);
+    }
+
+    public static Behavior<TypeListenerCommand> create(final String localMember, final String entityType,
+                                                       final DOMEntityOwnershipListener listener) {
+        return Behaviors.setup(ctx -> new EntityTypeListenerActor(ctx, localMember, entityType, listener));
+    }
+
+    @Override
+    public Receive<TypeListenerCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(CandidatesChanged.class, this::onCandidatesChanged)
+                .onMessage(EntityOwnerChanged.class, this::onOwnerChanged)
+                .onMessage(TerminateListener.class, this::onTerminate)
+                .build();
+    }
+
+    private Behavior<TypeListenerCommand> onCandidatesChanged(final CandidatesChanged notification) {
+        final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response = notification.getResponse();
+        if (response instanceof Changed) {
+            processCandidates(((Changed<ORMap<DOMEntity, ORSet<String>>>) response).get(response.key()).getEntries());
+        } else {
+            LOG.warn("Unexpected notification from replicator: {}", response);
+        }
+        return this;
+    }
+
+    private void processCandidates(final Map<DOMEntity, ORSet<String>> entries) {
+        final Map<DOMEntity, ORSet<String>> filteredCandidates = entries.entrySet().stream()
+            .filter(entry -> entry.getKey().getType().equals(entityType))
+            .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
+        LOG.debug("Entity-type: {} current candidates: {}", entityType, filteredCandidates);
+
+        final Set<DOMEntity> removed =
+                ImmutableSet.copyOf(Sets.difference(activeListeners.keySet(), filteredCandidates.keySet()));
+        if (!removed.isEmpty()) {
+            LOG.debug("Stopping listeners for {}", removed);
+            // kill actors for the removed
+            removed.forEach(removedEntity -> getContext().stop(activeListeners.remove(removedEntity)));
+        }
+
+        for (final Entry<DOMEntity, ORSet<String>> entry : filteredCandidates.entrySet()) {
+            activeListeners.computeIfAbsent(entry.getKey(), key -> {
+                // spawn actor for this entity
+                LOG.debug("Starting listener for {}", key);
+                return getContext().spawn(SingleEntityListenerActor.create(localMember, key, getContext().getSelf()),
+                    "SingleEntityListener-" + encodeEntityToActorName(key));
+            });
+        }
+    }
+
+    private Behavior<TypeListenerCommand> onOwnerChanged(final EntityOwnerChanged rsp) {
+        LOG.debug("{} : Entity-type: {} listener, owner change: {}", localMember, entityType, rsp);
+        listener.ownershipChanged(rsp.entity(), rsp.change(), false);
+        return this;
+    }
+
+    private Behavior<TypeListenerCommand> onTerminate(final TerminateListener command) {
+        LOG.debug("Terminating listener for type: {}, listener: {}", entityType, listener);
+        return Behaviors.stopped();
+    }
+
+    private static String encodeEntityToActorName(final DOMEntity entity) {
+        return "type=" + entity.getType() + ",entity="
+                + entity.getIdentifier().getLastPathArgument().getNodeType().getLocalName() + "-" + UUID.randomUUID();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerRegistry.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/EntityTypeListenerRegistry.java
new file mode 100644 (file)
index 0000000..a6183a8
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TerminateListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.UnregisterListener;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class EntityTypeListenerRegistry extends AbstractBehavior<TypeListenerRegistryCommand> {
+    private static final Logger LOG = LoggerFactory.getLogger(EntityTypeListenerRegistry.class);
+
+    private final Map<DOMEntityOwnershipListener, ActorRef<TypeListenerCommand>> spawnedListenerActors =
+            new HashMap<>();
+    private final String localMember;
+
+    public EntityTypeListenerRegistry(final ActorContext<TypeListenerRegistryCommand> context,
+                                      final String localMember) {
+        super(context);
+        this.localMember = requireNonNull(localMember);
+    }
+
+    public static Behavior<TypeListenerRegistryCommand> create(final String role) {
+        return Behaviors.setup(ctx -> new EntityTypeListenerRegistry(ctx, role));
+    }
+
+    @Override
+    public Receive<TypeListenerRegistryCommand> createReceive() {
+        return newReceiveBuilder()
+                .onMessage(RegisterListener.class, this::onRegisterListener)
+                .onMessage(UnregisterListener.class, this::onUnregisterListener)
+                .build();
+    }
+
+    private Behavior<TypeListenerRegistryCommand> onRegisterListener(final RegisterListener command) {
+        LOG.debug("Spawning entity type listener actor for: {}", command.getEntityType());
+
+        final ActorRef<TypeListenerCommand> listenerActor =
+                getContext().spawn(EntityTypeListenerActor.create(localMember,
+                        command.getEntityType(), command.getDelegateListener()),
+                        "TypeListener:" + encodeEntityToActorName(command.getEntityType()));
+        spawnedListenerActors.put(command.getDelegateListener(), listenerActor);
+        return this;
+    }
+
+    private Behavior<TypeListenerRegistryCommand> onUnregisterListener(final UnregisterListener command) {
+        LOG.debug("Stopping entity type listener actor for: {}", command.getEntityType());
+
+        final ActorRef<TypeListenerCommand> actor = spawnedListenerActors.remove(command.getDelegateListener());
+        if (actor != null) {
+            actor.tell(TerminateListener.INSTANCE);
+        }
+        return this;
+    }
+
+    private static String encodeEntityToActorName(final String entityType) {
+        return "type=" + entityType + "-" + UUID.randomUUID();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/CandidatesChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/CandidatesChanged.java
new file mode 100644 (file)
index 0000000..07a4994
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.Replicator.SubscribeResponse;
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Adapted notification from distributed-data sent to EntityTypeListenerActor when candidates change.
+ */
+public final class CandidatesChanged extends TypeListenerCommand {
+    private final @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response;
+
+    public CandidatesChanged(final SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> response) {
+        this.response = requireNonNull(response);
+    }
+
+    public @NonNull SubscribeResponse<ORMap<DOMEntity, ORSet<String>>> getResponse() {
+        return response;
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("response", response).toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/EntityOwnerChanged.java
new file mode 100644 (file)
index 0000000..ee0f54f
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerActor;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+/**
+ * Notification sent to {@link EntityTypeListenerActor} when there is an owner change for an Entity of a given type.
+ */
+@NonNullByDefault
+public final class EntityOwnerChanged extends TypeListenerCommand {
+    private final DOMEntity entity;
+    private final EntityOwnershipStateChange change;
+    private final boolean inJeopardy;
+
+    public EntityOwnerChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+            final boolean inJeopardy) {
+        this.entity = requireNonNull(entity);
+        this.change = requireNonNull(change);
+        this.inJeopardy = requireNonNull(inJeopardy);
+    }
+
+    public DOMEntity entity() {
+        return entity;
+    }
+
+    public EntityOwnershipStateChange change() {
+        return change;
+    }
+
+    public boolean inJeopardy() {
+        return inJeopardy;
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this)
+            .add("entity", entity)
+            .add("change", change)
+            .add("inJeopardy", inJeopardy)
+            .toString();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/RegisterListener.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/RegisterListener.java
new file mode 100644 (file)
index 0000000..ffa3d47
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+/**
+ * Register a DOMEntityOwnershipListener for a given entity-type.
+ */
+public final class RegisterListener extends TypeListenerRegistryCommand {
+    public RegisterListener(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+        super(entityType, delegateListener);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TerminateListener.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TerminateListener.java
new file mode 100644 (file)
index 0000000..a2627e7
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+/**
+ * Sent to the listener actor to stop it on demand ie during listener unregistration.
+ */
+public final class TerminateListener extends TypeListenerCommand {
+
+    public static final TerminateListener INSTANCE = new TerminateListener();
+
+    private TerminateListener() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerCommand.java
new file mode 100644 (file)
index 0000000..939d4a2
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+public abstract class TypeListenerCommand {
+    TypeListenerCommand() {
+        // Hidden on purpose
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerRegistryCommand.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/TypeListenerRegistryCommand.java
new file mode 100644 (file)
index 0000000..3bfce42
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+public abstract class TypeListenerRegistryCommand {
+    private final @NonNull String entityType;
+    private final @NonNull DOMEntityOwnershipListener delegateListener;
+
+    TypeListenerRegistryCommand(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+        this.entityType = requireNonNull(entityType);
+        this.delegateListener = requireNonNull(delegateListener);
+    }
+
+    public final @NonNull String getEntityType() {
+        return entityType;
+    }
+
+    public final @NonNull DOMEntityOwnershipListener getDelegateListener() {
+        return delegateListener;
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/UnregisterListener.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/controller/eos/akka/registry/listener/type/command/UnregisterListener.java
new file mode 100644 (file)
index 0000000..4bd09c8
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.registry.listener.type.command;
+
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+
+/**
+ * Unregister a listener from the EntityTypeListenerRegistry.
+ */
+public final class UnregisterListener extends TypeListenerRegistryCommand {
+    public UnregisterListener(final String entityType, final DOMEntityOwnershipListener delegateListener) {
+        super(entityType, delegateListener);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/yang/gen/v1/urn/opendaylight/params/xml/ns/yang/controller/entity/owners/norev/EntityNameBuilder.java b/opendaylight/md-sal/eos-dom-akka/src/main/java/org/opendaylight/yang/gen/v1/urn/opendaylight/params/xml/ns/yang/controller/entity/owners/norev/EntityNameBuilder.java
new file mode 100644 (file)
index 0000000..f67d2cc
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev;
+
+public final class EntityNameBuilder {
+    private EntityNameBuilder() {
+        // Hidden on purpose
+    }
+
+    public static EntityName getDefaultInstance(final String defaultValue) {
+        throw new UnsupportedOperationException("Not yet implemented");
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/main/yang/odl-akka-eos.yang b/opendaylight/md-sal/eos-dom-akka/src/main/yang/odl-akka-eos.yang
new file mode 100644 (file)
index 0000000..1aa2d41
--- /dev/null
@@ -0,0 +1,96 @@
+module odl-entity-owners {
+  namespace urn:opendaylight:params:xml:ns:yang:controller:entity-owners;
+  prefix entity-owners;
+
+  organization 'OpenDaylight Project';
+  description "An initial cut at modeling entity ownership status information
+               in a way which is not dependent on the datastore.
+
+               This model is considered experimental and
+               implementation-specific. It can change incompatibly between
+               OpenDaylight releases.";
+
+  typedef entity-type {
+    type string {
+      length 1..max;
+      // FIXME: it would be nice to have a pattern here, or even better
+      //        if we turn this into an extensible enum (i.e. identityref)
+    }
+  }
+
+  typedef entity-name {
+    type union {
+      type instance-identifier;
+      type string {
+        length 1..max;
+      }
+    }
+  }
+
+  typedef node-name {
+    type string {
+      length 1..max;
+    }
+  }
+
+  grouping entity-id {
+    leaf type {
+      type entity-type;
+      mandatory true;
+    }
+    leaf name {
+      type entity-name;
+      mandatory true;
+    }
+  }
+
+  grouping owner {
+    leaf owner-node {
+      type node-name;
+    }
+  }
+
+  grouping candidates {
+    leaf-list candidate-nodes {
+      type node-name;
+      ordered-by user;
+      min-elements 1;
+    }
+  }
+
+  grouping details {
+     uses owner;
+     uses candidates;
+  }
+
+  rpc get-entities {
+    output {
+      list entities {
+        key 'type name';
+        uses entity-id;
+        uses details;
+      }
+    }
+  }
+
+  rpc get-entity {
+    input {
+      uses entity-id;
+    }
+
+    output {
+      uses details;
+    }
+  }
+
+  rpc get-entity-owner {
+    input {
+      uses entity-id;
+    }
+
+    output {
+      uses owner;
+    }
+  }
+}
+
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AbstractNativeEosTest.java
new file mode 100644 (file)
index 0000000..6adba42
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.cluster.ddata.LWWRegister;
+import akka.cluster.ddata.LWWRegisterKey;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+import org.opendaylight.controller.eos.akka.bootstrap.EOSMain;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.ActivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.DeactivateDataCenter;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberReachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.MemberUnreachableEvent;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorReply;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.RegisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.UnregisterCandidate;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.EntityOwnerChanged;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.RegisterListener;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
+import org.opendaylight.mdsal.binding.generator.impl.DefaultBindingRuntimeGenerator;
+import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeGenerator;
+import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipStateChange;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class AbstractNativeEosTest {
+
+    public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+    public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+    protected static final String DEFAULT_DATACENTER = "dc-default";
+
+    protected static final List<String> TWO_NODE_SEED_NODES =
+            List.of("akka://ClusterSystem@127.0.0.1:2550",
+                    "akka://ClusterSystem@127.0.0.1:2551");
+
+    protected static final List<String> THREE_NODE_SEED_NODES =
+            List.of("akka://ClusterSystem@127.0.0.1:2550",
+                    "akka://ClusterSystem@127.0.0.1:2551",
+                    "akka://ClusterSystem@127.0.0.1:2552");
+
+    protected static final List<String> DATACENTER_SEED_NODES =
+            List.of("akka://ClusterSystem@127.0.0.1:2550",
+                    "akka://ClusterSystem@127.0.0.1:2551",
+                    "akka://ClusterSystem@127.0.0.1:2552",
+                    "akka://ClusterSystem@127.0.0.1:2553");
+
+    private static final BindingRuntimeGenerator BINDING_RUNTIME_GENERATOR = new DefaultBindingRuntimeGenerator();
+
+    protected static BindingCodecContext CODEC_CONTEXT
+            = new BindingCodecContext(BindingRuntimeHelpers.createRuntimeContext());
+
+    private static final String REMOTE_PROTOCOL = "akka";
+    private static final String PORT_PARAM = "akka.remote.artery.canonical.port";
+    private static final String ROLE_PARAM = "akka.cluster.roles";
+    private static final String SEED_NODES_PARAM = "akka.cluster.seed-nodes";
+    private static final String DATA_CENTER_PARAM = "akka.cluster.multi-data-center.self-data-center";
+
+    protected static MockNativeEntityOwnershipService startupNativeService(final int port, final List<String> roles,
+                                                                           final List<String> seedNodes)
+            throws ExecutionException, InterruptedException {
+        final Map<String, Object> overrides = new HashMap<>();
+        overrides.put(PORT_PARAM, port);
+        overrides.put(ROLE_PARAM, roles);
+        if (!seedNodes.isEmpty()) {
+            overrides.put(SEED_NODES_PARAM, seedNodes);
+        }
+
+        final Config config = ConfigFactory.parseMap(overrides)
+                .withFallback(ConfigFactory.load());
+
+        // Create a classic Akka system since thats what we will have in osgi
+        final akka.actor.ActorSystem system = akka.actor.ActorSystem.create("ClusterSystem", config);
+
+        return new MockNativeEntityOwnershipService(system);
+    }
+
+    protected static ClusterNode startupRemote(final int port, final List<String> roles)
+            throws ExecutionException, InterruptedException {
+        return startup(port, roles, THREE_NODE_SEED_NODES);
+    }
+
+    protected static ClusterNode startupRemote(final int port, final List<String> roles, final List<String> seedNodes)
+            throws ExecutionException, InterruptedException {
+        return startup(port, roles, seedNodes);
+    }
+
+    protected static ClusterNode startup(final int port, final List<String> roles)
+            throws ExecutionException, InterruptedException {
+        return startup(port, roles, List.of());
+    }
+
+    protected static ClusterNode startup(final int port, final List<String> roles, final List<String> seedNodes)
+            throws ExecutionException, InterruptedException {
+
+        return startup(port, roles, seedNodes, AbstractNativeEosTest::rootBehavior);
+    }
+
+    protected static ClusterNode startup(final int port, final List<String> roles, final List<String> seedNodes,
+                                         final Supplier<Behavior<BootstrapCommand>> bootstrap)
+            throws ExecutionException, InterruptedException {
+        // Override the configuration
+        final Map<String, Object> overrides = new HashMap<>(4);
+        overrides.put(PORT_PARAM, port);
+        overrides.put(ROLE_PARAM, roles);
+        if (!seedNodes.isEmpty()) {
+            overrides.put(SEED_NODES_PARAM, seedNodes);
+        }
+
+        final Config config = ConfigFactory.parseMap(overrides).withFallback(ConfigFactory.load());
+
+        // Create a classic Akka system since thats what we will have in osgi
+        final akka.actor.ActorSystem system = akka.actor.ActorSystem.create("ClusterSystem", config);
+        final ActorRef<BootstrapCommand> eosBootstrap =
+                Adapter.spawn(system, bootstrap.get(), "EOSBootstrap");
+
+        final CompletionStage<RunningContext> ask = AskPattern.ask(eosBootstrap,
+                GetRunningContext::new,
+                Duration.ofSeconds(5),
+                Adapter.toTyped(system.scheduler()));
+        final RunningContext runningContext = ask.toCompletableFuture().get();
+
+        return new ClusterNode(port, roles, system, eosBootstrap, runningContext.getListenerRegistry(),
+                runningContext.getCandidateRegistry(), runningContext.getOwnerSupervisor());
+    }
+
+    protected static ClusterNode startupWithDatacenter(final int port, final List<String> roles,
+                                                       final List<String> seedNodes, final String dataCenter)
+            throws ExecutionException, InterruptedException {
+        final akka.actor.ActorSystem system = startupActorSystem(port, roles, seedNodes, dataCenter);
+        final ActorRef<BootstrapCommand> eosBootstrap =
+                Adapter.spawn(system, EOSMain.create(CODEC_CONTEXT.getInstanceIdentifierCodec()), "EOSBootstrap");
+
+        final CompletionStage<RunningContext> ask = AskPattern.ask(eosBootstrap,
+                GetRunningContext::new,
+                Duration.ofSeconds(5),
+                Adapter.toTyped(system.scheduler()));
+        final RunningContext runningContext = ask.toCompletableFuture().get();
+
+        return new ClusterNode(port, roles, system, eosBootstrap, runningContext.getListenerRegistry(),
+                runningContext.getCandidateRegistry(), runningContext.getOwnerSupervisor());
+    }
+
+    protected static akka.actor.ActorSystem startupActorSystem(final int port, final List<String> roles,
+                                                               final List<String> seedNodes) {
+        final Map<String, Object> overrides = new HashMap<>();
+        overrides.put(PORT_PARAM, port);
+        overrides.put(ROLE_PARAM, roles);
+        if (!seedNodes.isEmpty()) {
+            overrides.put(SEED_NODES_PARAM, seedNodes);
+        }
+
+        final Config config = ConfigFactory.parseMap(overrides)
+                .withFallback(ConfigFactory.load());
+
+        // Create a classic Akka system since thats what we will have in osgi
+        return akka.actor.ActorSystem.create("ClusterSystem", config);
+    }
+
+    protected static akka.actor.ActorSystem startupActorSystem(final int port, final List<String> roles,
+                                                               final List<String> seedNodes, final String dataCenter) {
+        final Map<String, Object> overrides = new HashMap<>();
+        overrides.put(PORT_PARAM, port);
+        overrides.put(ROLE_PARAM, roles);
+        if (!seedNodes.isEmpty()) {
+            overrides.put(SEED_NODES_PARAM, seedNodes);
+        }
+        overrides.put(DATA_CENTER_PARAM, dataCenter);
+
+        final Config config = ConfigFactory.parseMap(overrides)
+                .withFallback(ConfigFactory.load());
+
+        // Create a classic Akka system since thats what we will have in osgi
+        return akka.actor.ActorSystem.create("ClusterSystem", config);
+    }
+
+    private static Behavior<BootstrapCommand> rootBehavior() {
+        return Behaviors.setup(context -> EOSMain.create(CODEC_CONTEXT.getInstanceIdentifierCodec()));
+    }
+
+    protected static void registerCandidates(final ClusterNode node, final DOMEntity entity, final String... members) {
+        final ActorRef<CandidateRegistryCommand> candidateRegistry = node.getCandidateRegistry();
+        registerCandidates(candidateRegistry, entity, members);
+    }
+
+    protected static void registerCandidates(final ActorRef<CandidateRegistryCommand> candidateRegistry,
+                                             final DOMEntity entity, final String... members) {
+        for (final String member : members) {
+            candidateRegistry.tell(new RegisterCandidate(entity, member));
+        }
+    }
+
+    protected static void unregisterCandidates(final ClusterNode node, final DOMEntity entity,
+                                               final String... members) {
+        final ActorRef<CandidateRegistryCommand> candidateRegistry = node.getCandidateRegistry();
+        for (final String member : members) {
+            candidateRegistry.tell(new UnregisterCandidate(entity, member));
+        }
+    }
+
+    protected static  MockEntityOwnershipListener registerListener(final ClusterNode node, final DOMEntity entity) {
+        final ActorRef<TypeListenerRegistryCommand> listenerRegistry = node.getListenerRegistry();
+        final MockEntityOwnershipListener listener = new MockEntityOwnershipListener(node.getRoles().get(0));
+        listenerRegistry.tell(new RegisterListener(entity.getType(), listener));
+
+        return listener;
+    }
+
+    protected static void reachableMember(final ClusterNode node, final String... role) {
+        reachableMember(node.getOwnerSupervisor(), role);
+    }
+
+    protected static void reachableMember(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                                          final String... role) {
+        ownerSupervisor.tell(new MemberReachableEvent(
+                new Address(REMOTE_PROTOCOL, "ClusterSystem@127.0.0.1:2550"), Set.of(role)));
+    }
+
+    protected static void unreachableMember(final ClusterNode node, final String... role) {
+        unreachableMember(node.getOwnerSupervisor(), role);
+    }
+
+    protected static void unreachableMember(final ActorRef<OwnerSupervisorCommand> ownerSupervisor,
+                                            final String... role) {
+        ownerSupervisor.tell(new MemberUnreachableEvent(
+                new Address(REMOTE_PROTOCOL, "ClusterSystem@127.0.0.1:2550"), Set.of(role)));
+    }
+
+    protected static void waitUntillOwnerPresent(final ClusterNode clusterNode, final DOMEntity entity) {
+        await().atMost(Duration.ofSeconds(15)).until(() -> {
+            final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
+            final CompletionStage<Replicator.GetResponse<LWWRegister<String>>> ask =
+                    AskPattern.ask(distributedData.replicator(),
+                            replyTo -> new Replicator.Get<>(
+                                    new LWWRegisterKey<>(entity.toString()), Replicator.readLocal(), replyTo),
+                            Duration.ofSeconds(5),
+                            clusterNode.getActorSystem().scheduler());
+
+            final Replicator.GetResponse<LWWRegister<String>> response =
+                    ask.toCompletableFuture().get(5, TimeUnit.SECONDS);
+
+            if (response instanceof Replicator.GetSuccess) {
+                final String owner = ((Replicator.GetSuccess<LWWRegister<String>>) response).dataValue().getValue();
+                return !owner.isEmpty();
+            }
+
+            return false;
+        });
+    }
+
+    protected static void waitUntillCandidatePresent(final ClusterNode clusterNode, final DOMEntity entity,
+                                                     final String candidate) {
+        await().atMost(Duration.ofSeconds(15)).until(() -> {
+            final DistributedData distributedData = DistributedData.get(clusterNode.getActorSystem());
+
+            final CompletionStage<Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>>> ask =
+                    AskPattern.ask(distributedData.replicator(),
+                            replyTo -> new Replicator.Get<>(
+                                    CandidateRegistry.KEY, Replicator.readLocal(), replyTo),
+                            Duration.ofSeconds(5),
+                            clusterNode.getActorSystem().scheduler());
+
+            final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response =
+                    ask.toCompletableFuture().get(5, TimeUnit.SECONDS);
+
+            if (response instanceof Replicator.GetSuccess) {
+                final Map<DOMEntity, ORSet<String>> entries =
+                        ((Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response).dataValue().getEntries();
+
+                return entries.get(entity).contains(candidate);
+
+            }
+            return false;
+        });
+    }
+
+    protected static CompletableFuture<OwnerSupervisorReply> activateDatacenter(final ClusterNode clusterNode) {
+        final CompletionStage<OwnerSupervisorReply> ask =
+                AskPattern.ask(clusterNode.getOwnerSupervisor(),
+                        ActivateDataCenter::new,
+                        Duration.ofSeconds(20),
+                        clusterNode.actorSystem.scheduler());
+        return ask.toCompletableFuture();
+    }
+
+    protected static CompletableFuture<OwnerSupervisorReply> deactivateDatacenter(final ClusterNode clusterNode) {
+        final CompletionStage<OwnerSupervisorReply> ask =
+                AskPattern.ask(clusterNode.getOwnerSupervisor(),
+                        DeactivateDataCenter::new,
+                        Duration.ofSeconds(20),
+                        clusterNode.actorSystem.scheduler());
+        return ask.toCompletableFuture();
+    }
+
+    protected static void verifyListenerState(final MockEntityOwnershipListener listener, final DOMEntity entity,
+                                              final boolean hasOwner, final boolean isOwner, final boolean wasOwner) {
+        await().until(() -> !listener.getChanges().isEmpty());
+
+        await().atMost(Duration.ofSeconds(10)).untilAsserted(() -> {
+            final var changes = listener.getChanges();
+            final var domEntityOwnershipChange = listener.getChanges().get(changes.size() - 1);
+            assertEquals(entity, domEntityOwnershipChange.entity());
+
+            assertEquals(hasOwner, domEntityOwnershipChange.change().hasOwner());
+            assertEquals(isOwner, domEntityOwnershipChange.change().isOwner());
+            assertEquals(wasOwner, domEntityOwnershipChange.change().wasOwner());
+        });
+    }
+
+    protected static void verifyNoNotifications(final MockEntityOwnershipListener listener) {
+        verifyNoNotifications(listener, 2);
+    }
+
+    protected static void verifyNoNotifications(final MockEntityOwnershipListener listener, final long delaySeconds) {
+        await().pollDelay(delaySeconds, TimeUnit.SECONDS).until(() -> listener.getChanges().isEmpty());
+    }
+
+    protected static void verifyNoAdditionalNotifications(
+            final MockEntityOwnershipListener listener, final long delaySeconds) {
+        listener.resetListener();
+        verifyNoNotifications(listener, delaySeconds);
+    }
+
+    protected static final class ClusterNode {
+        private final int port;
+        private final List<String> roles;
+        private final akka.actor.typed.ActorSystem<Void> actorSystem;
+        private final ActorRef<BootstrapCommand> eosBootstrap;
+        private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+        private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+        private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+        private ClusterNode(final int port,
+                            final List<String> roles,
+                            final ActorSystem actorSystem,
+                            final ActorRef<BootstrapCommand> eosBootstrap,
+                            final ActorRef<TypeListenerRegistryCommand> listenerRegistry,
+                            final ActorRef<CandidateRegistryCommand> candidateRegistry,
+                            final ActorRef<OwnerSupervisorCommand> ownerSupervisor) {
+            this.port = port;
+            this.roles = roles;
+            this.actorSystem = Adapter.toTyped(actorSystem);
+            this.eosBootstrap = eosBootstrap;
+            this.listenerRegistry = listenerRegistry;
+            this.candidateRegistry = candidateRegistry;
+            this.ownerSupervisor = ownerSupervisor;
+        }
+
+        public int getPort() {
+            return port;
+        }
+
+        public akka.actor.typed.ActorSystem<Void> getActorSystem() {
+            return actorSystem;
+        }
+
+        public ActorRef<BootstrapCommand> getEosBootstrap() {
+            return eosBootstrap;
+        }
+
+        public ActorRef<TypeListenerRegistryCommand> getListenerRegistry() {
+            return listenerRegistry;
+        }
+
+        public ActorRef<CandidateRegistryCommand> getCandidateRegistry() {
+            return candidateRegistry;
+        }
+
+        public ActorRef<OwnerSupervisorCommand> getOwnerSupervisor() {
+            return ownerSupervisor;
+        }
+
+        public List<String> getRoles() {
+            return roles;
+        }
+    }
+
+    protected static final class MockEntityOwnershipListener implements DOMEntityOwnershipListener {
+        private final List<EntityOwnerChanged> changes = new ArrayList<>();
+        private final String member;
+        private final Logger log;
+
+        public MockEntityOwnershipListener(final String member) {
+            log = LoggerFactory.getLogger("EOS-listener-" + member);
+            this.member = member;
+        }
+
+        @Override
+        public void ownershipChanged(final DOMEntity entity, final EntityOwnershipStateChange change,
+                final boolean inJeopardy) {
+            final var changed = new EntityOwnerChanged(entity, change, inJeopardy);
+            log.info("{} Received ownershipCHanged: {}", member, changed);
+            log.info("{} changes: {}", member, changes.size());
+            changes.add(changed);
+        }
+
+        public List<EntityOwnerChanged> getChanges() {
+            return changes;
+        }
+
+        public void resetListener() {
+            changes.clear();
+        }
+    }
+
+    protected static final class MockNativeEntityOwnershipService extends AkkaEntityOwnershipService {
+        private final ActorSystem classicActorSystem;
+
+        protected MockNativeEntityOwnershipService(final ActorSystem classicActorSystem)
+                throws ExecutionException, InterruptedException {
+            super(classicActorSystem, CODEC_CONTEXT);
+            this.classicActorSystem = classicActorSystem;
+        }
+
+        protected void reachableMember(final String... role) {
+            AbstractNativeEosTest.reachableMember(ownerSupervisor, role);
+        }
+
+        public void unreachableMember(final String... role) {
+            AbstractNativeEosTest.unreachableMember(ownerSupervisor, role);
+        }
+
+        public ActorSystem getActorSystem() {
+            return classicActorSystem;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/AkkaEntityOwnershipServiceTest.java
new file mode 100644 (file)
index 0000000..652102f
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import akka.actor.ActorSystem;
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.javadsl.Adapter;
+import akka.actor.typed.javadsl.AskPattern;
+import akka.cluster.ddata.ORMap;
+import akka.cluster.ddata.ORSet;
+import akka.cluster.ddata.typed.javadsl.DistributedData;
+import akka.cluster.ddata.typed.javadsl.Replicator;
+import com.typesafe.config.ConfigFactory;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.CompletionStage;
+import java.util.concurrent.ExecutionException;
+import org.awaitility.Durations;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
+import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+
+public class AkkaEntityOwnershipServiceTest extends AbstractNativeEosTest {
+    static final String ENTITY_TYPE = "test";
+    static final String ENTITY_TYPE2 = "test2";
+    static final QName QNAME = QName.create("test", "2015-08-11", "foo");
+
+    private ActorSystem system;
+    private akka.actor.typed.ActorSystem<Void> typedSystem;
+    private AkkaEntityOwnershipService service;
+    private ActorRef<Replicator.Command> replicator;
+
+    @Before
+    public void setUp() throws Exception {
+        system = ActorSystem.create("ClusterSystem", ConfigFactory.load());
+        typedSystem = Adapter.toTyped(system);
+        replicator = DistributedData.get(typedSystem).replicator();
+
+        service = new AkkaEntityOwnershipService(system, CODEC_CONTEXT);
+    }
+
+    @After
+    public void tearDown() throws InterruptedException, ExecutionException {
+        service.close();
+        ActorTestKit.shutdown(Adapter.toTyped(system), Duration.ofSeconds(20));
+    }
+
+    @Test
+    public void testRegisterCandidate() throws Exception {
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+        final Registration reg = service.registerCandidate(entity);
+        assertNotNull(reg);
+
+        verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+        try {
+            service.registerCandidate(entity);
+            fail("Expected CandidateAlreadyRegisteredException");
+        } catch (final CandidateAlreadyRegisteredException e) {
+            // expected
+            assertEquals("getEntity", entity, e.getEntity());
+        }
+
+        final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId);
+        final Registration reg2 = service.registerCandidate(entity2);
+
+        assertNotNull(reg2);
+        verifyEntityCandidateRegistered(ENTITY_TYPE2, entityId, "member-1");
+    }
+
+    @Test
+    public void testUnregisterCandidate() throws Exception {
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+        final Registration reg = service.registerCandidate(entity);
+        assertNotNull(reg);
+
+        verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+        reg.close();
+        verifyEntityCandidateMissing(ENTITY_TYPE, entityId, "member-1");
+
+        service.registerCandidate(entity);
+        verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+    }
+
+    @Test
+    public void testListenerRegistration() throws Exception {
+
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+        final MockEntityOwnershipListener listener = new MockEntityOwnershipListener("member-1");
+
+        final Registration reg = service.registerListener(entity.getType(), listener);
+
+        assertNotNull("EntityOwnershipListenerRegistration null", reg);
+
+        final Registration candidate = service.registerCandidate(entity);
+
+        verifyListenerState(listener, entity, true, true, false);
+        final int changes = listener.getChanges().size();
+
+        reg.close();
+        candidate.close();
+
+        verifyEntityCandidateMissing(ENTITY_TYPE, entityId, "member-1");
+
+        service.registerCandidate(entity);
+        // check listener not called when listener registration closed
+        await().pollDelay(Durations.TWO_SECONDS).until(() -> listener.getChanges().size() == changes);
+    }
+
+    @Test
+    public void testGetOwnershipState() throws Exception {
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, "one");
+
+        final Registration registration = service.registerCandidate(entity);
+        verifyGetOwnershipState(service, entity, EntityOwnershipState.IS_OWNER);
+
+        final RunningContext runningContext = service.getRunningContext();
+        registerCandidates(runningContext.getCandidateRegistry(), entity, "member-2");
+
+        final ActorRef<OwnerSupervisorCommand> ownerSupervisor = runningContext.getOwnerSupervisor();
+        reachableMember(ownerSupervisor, "member-2", DEFAULT_DATACENTER);
+        unreachableMember(ownerSupervisor, "member-1", DEFAULT_DATACENTER);
+        verifyGetOwnershipState(service, entity, EntityOwnershipState.OWNED_BY_OTHER);
+
+        final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, "two");
+        final Optional<EntityOwnershipState> state = service.getOwnershipState(entity2);
+        assertFalse(state.isPresent());
+
+        unreachableMember(ownerSupervisor, "member-2", DEFAULT_DATACENTER);
+        verifyGetOwnershipState(service, entity, EntityOwnershipState.NO_OWNER);
+    }
+
+    @Test
+    public void testIsCandidateRegistered() throws Exception {
+        final DOMEntity test = new DOMEntity("test-type", "test");
+
+        assertFalse(service.isCandidateRegistered(test));
+
+        service.registerCandidate(test);
+
+        assertTrue(service.isCandidateRegistered(test));
+    }
+
+    @Test
+    public void testEntityRetrievalWithYiid() throws Exception {
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
+                new NodeIdentifier(Topology.QNAME),
+                NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
+                new NodeIdentifier(Node.QNAME),
+                NodeIdentifierWithPredicates.of(Node.QNAME, QName.create(Node.QNAME, "node-id"), "test://test-node"));
+
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+        final Registration reg = service.registerCandidate(entity);
+
+        assertNotNull(reg);
+        verifyEntityCandidateRegistered(ENTITY_TYPE, entityId, "member-1");
+
+        var result = service.getEntity(new GetEntityInputBuilder()
+            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build())
+            .get()
+            .getResult();
+
+        assertEquals(result.getOwnerNode().getValue(), "member-1");
+        assertEquals(result.getCandidateNodes().get(0).getValue(), "member-1");
+
+        // we should not be able to retrieve the entity when using string
+        final String entityPathEncoded =
+                "/network-topology:network-topology/topology[topology-id='test']/node[node-id='test://test-node']";
+
+        result = service.getEntity(new GetEntityInputBuilder()
+            .setName(new EntityName(entityPathEncoded))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build())
+            .get()
+            .getResult();
+
+        assertNull(result.getOwnerNode());
+        assertEquals(List.of(), result.getCandidateNodes());
+
+        final var getEntitiesResult = service.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+        final var entities = getEntitiesResult.nonnullEntities();
+        assertEquals(1, entities.size());
+        assertTrue(entities.get(new EntitiesKey(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+            new EntityType(ENTITY_TYPE))).getCandidateNodes().contains(new NodeName("member-1")));
+        assertTrue(entities.get(new EntitiesKey(
+                        new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+                        new EntityType(ENTITY_TYPE)))
+                .getOwnerNode().getValue().equals("member-1"));
+
+        final var getOwnerResult = service.getEntityOwner(new GetEntityOwnerInputBuilder()
+            .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+            .setType(new EntityType(ENTITY_TYPE))
+            .build()).get().getResult();
+
+        assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
+    }
+
+    private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity,
+                                                final EntityOwnershipState expState) {
+        await().atMost(Duration.ofSeconds(5)).untilAsserted(() -> {
+            assertEquals(Optional.of(expState), service.getOwnershipState(entity));
+        });
+    }
+
+    private void verifyEntityCandidateRegistered(final String entityType,
+                                                 final YangInstanceIdentifier entityId,
+                                                 final String candidateName) {
+        await().atMost(Duration.ofSeconds(5))
+                .untilAsserted(() -> doVerifyEntityCandidateRegistered(entityType, entityId, candidateName));
+    }
+
+    private void doVerifyEntityCandidateRegistered(final String entityType,
+                                                   final YangInstanceIdentifier entityId,
+                                                   final String candidateName)
+            throws ExecutionException, InterruptedException {
+        final Map<DOMEntity, ORSet<String>> entries = getCandidateData();
+        final DOMEntity entity = new DOMEntity(entityType, entityId);
+        assertTrue(entries.containsKey(entity));
+        assertTrue(entries.get(entity).getElements().contains(candidateName));
+    }
+
+    private void verifyEntityCandidateMissing(final String entityType,
+                                              final YangInstanceIdentifier entityId,
+                                              final String candidateName) {
+        await().atMost(Duration.ofSeconds(5))
+                .untilAsserted(() -> doVerifyEntityCandidateMissing(entityType, entityId, candidateName));
+    }
+
+    private void doVerifyEntityCandidateMissing(final String entityType,
+                                                final YangInstanceIdentifier entityId,
+                                                final String candidateName)
+            throws ExecutionException, InterruptedException {
+        final Map<DOMEntity, ORSet<String>> entries = getCandidateData();
+        final DOMEntity entity = new DOMEntity(entityType, entityId);
+        assertTrue(entries.containsKey(entity));
+        assertFalse(entries.get(entity).getElements().contains(candidateName));
+    }
+
+    private Map<DOMEntity, ORSet<String>> getCandidateData() throws ExecutionException, InterruptedException {
+        final CompletionStage<Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>>> ask =
+                AskPattern.ask(replicator, replyTo ->
+                                new Replicator.Get<>(
+                                        CandidateRegistry.KEY,
+                                        Replicator.readLocal(),
+                                        replyTo),
+                        Duration.ofSeconds(5),
+                        typedSystem.scheduler());
+
+        final Replicator.GetResponse<ORMap<DOMEntity, ORSet<String>>> response = ask.toCompletableFuture().get();
+        assertTrue(response instanceof Replicator.GetSuccess);
+
+        final Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>> success =
+                (Replicator.GetSuccess<ORMap<DOMEntity, ORSet<String>>>) response;
+
+        return success.get(CandidateRegistry.KEY).getEntries();
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/DataCentersTest.java
new file mode 100644 (file)
index 0000000..e4927ca
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class DataCentersTest extends AbstractNativeEosTest {
+
+    private ClusterNode node1 = null;
+    private ClusterNode node2 = null;
+    private ClusterNode node3 = null;
+    private ClusterNode node4 = null;
+    public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+    public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+    @Before
+    public void setUp() throws Exception {
+        node1 = startupWithDatacenter(2550, Collections.singletonList("member-1"), DATACENTER_SEED_NODES, "dc-primary");
+        node2 = startupWithDatacenter(2551, Collections.singletonList("member-2"), DATACENTER_SEED_NODES, "dc-primary");
+        node3 = startupWithDatacenter(2552, Collections.singletonList("member-3"), DATACENTER_SEED_NODES, "dc-backup");
+        node4 = startupWithDatacenter(2553, Collections.singletonList("member-4"), DATACENTER_SEED_NODES, "dc-backup");
+
+        // need to wait until all nodes are ready
+        final Cluster cluster = Cluster.get(node4.getActorSystem());
+        Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = new ArrayList<>();
+            cluster.state().getMembers().forEach(members::add);
+            if (members.size() != 4) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+
+    @Test
+    public void testDatacenterActivation() throws Exception {
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node3, ENTITY_1, "member-3");
+
+        activateDatacenter(node1).get();
+
+        waitUntillOwnerPresent(node1, ENTITY_1);
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+        final MockEntityOwnershipListener listener2 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        unregisterCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(listener1, ENTITY_1, false, false, true);
+        verifyListenerState(listener2, ENTITY_1, false, false, false);
+
+        deactivateDatacenter(node1).get();
+        activateDatacenter(node4).get();
+
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener2, ENTITY_1, true, true, false);
+
+        registerCandidates(node4, ENTITY_1, "member-4");
+        unregisterCandidates(node3, ENTITY_1, "member-3");
+
+        // checking index after notif so current + 1
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        deactivateDatacenter(node3).get();
+        activateDatacenter(node2).get();
+    }
+
+    @Test
+    public void testDataCenterShutdown() throws Exception {
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node3, ENTITY_1, "member-3");
+        registerCandidates(node4, ENTITY_1, "member-4");
+
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-1");
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-3");
+        waitUntillCandidatePresent(node1, ENTITY_1, "member-4");
+
+        activateDatacenter(node1).get();
+
+        waitUntillOwnerPresent(node4, ENTITY_1);
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+        final MockEntityOwnershipListener listener2 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        unregisterCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(listener1, ENTITY_1, false, false, true);
+        verifyListenerState(listener2, ENTITY_1, false, false, false);
+
+        ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+
+        activateDatacenter(node3).get();
+        verifyListenerState(listener2, ENTITY_1, true, true, false);
+
+        waitUntillOwnerPresent(node3, ENTITY_1);
+        unregisterCandidates(node3, ENTITY_1, "member-3");
+        verifyListenerState(listener2, ENTITY_1, true, false, true);
+    }
+
+    @After
+    public void tearDown() {
+        ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node4.getActorSystem(), Duration.ofSeconds(20));
+    }
+
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/EntityRpcHandlerTest.java
new file mode 100644 (file)
index 0000000..28991c1
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import akka.actor.ActorSystem;
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.javadsl.Adapter;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.EntityType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntitiesInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.GetEntityOwnerInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.NodeName;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.entity.owners.norev.get.entities.output.EntitiesKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+
+public class EntityRpcHandlerTest extends AbstractNativeEosTest {
+    static final String ENTITY_TYPE = "test";
+
+    private ActorSystem system1;
+    private ActorSystem system2;
+
+    private AkkaEntityOwnershipService service1;
+    private AkkaEntityOwnershipService service2;
+
+    @Before
+    public void setUp() throws Exception {
+        system1 = startupActorSystem(2550, List.of("member-1"), TWO_NODE_SEED_NODES);
+        system2 = startupActorSystem(2551, List.of("member-2"), TWO_NODE_SEED_NODES, "dc-backup");
+
+        service1 = new AkkaEntityOwnershipService(system1, CODEC_CONTEXT);
+        service2 = new AkkaEntityOwnershipService(system2, CODEC_CONTEXT);
+
+        // need to wait until all nodes are ready
+        final var cluster = Cluster.get(Adapter.toTyped(system2));
+        Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = new ArrayList<>();
+            cluster.state().getMembers().forEach(members::add);
+            if (members.size() != 2) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+
+    @After
+    public void tearDown() throws InterruptedException, ExecutionException {
+        service1.close();
+        service2.close();
+        ActorTestKit.shutdown(Adapter.toTyped(system1), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(Adapter.toTyped(system2), Duration.ofSeconds(20));
+    }
+
+    /*
+     * Tests entity rpcs handled both by the owner supervisor(service1) and with an idle supervisor(falling
+     * back to distributed-data in an inactive datacenter). This covers both the available cases, datacenters and case
+     * in which the node with active akka-singleton is shutdown and another one takes over.
+     */
+    @Test
+    public void testEntityRetrievalWithUnavailableSupervisor() throws Exception {
+        final YangInstanceIdentifier entityId = YangInstanceIdentifier.of(new NodeIdentifier(NetworkTopology.QNAME),
+                new NodeIdentifier(Topology.QNAME),
+                NodeIdentifierWithPredicates.of(Topology.QNAME, QName.create(Topology.QNAME, "topology-id"), "test"),
+                new NodeIdentifier(Node.QNAME),
+                NodeIdentifierWithPredicates.of(Node.QNAME, QName.create(Node.QNAME, "node-id"), "test://test-node"));
+
+        final DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
+
+        final Registration reg = service1.registerCandidate(entity);
+
+        await().untilAsserted(() -> {
+            final var getEntityResult = service1.getEntity(new GetEntityInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get();
+
+            assertEquals(getEntityResult.getResult().getOwnerNode().getValue(), "member-1");
+            assertEquals(getEntityResult.getResult().getCandidateNodes().get(0).getValue(), "member-1");
+        });
+
+        // keep this under ask timeout to make sure the singleton actor in the inactive datacenter responds with failure
+        // immediately, so that the rpc actor retries with distributed-data asap
+        await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+            final var getEntitiesResult = service2.getEntities(new GetEntitiesInputBuilder().build()).get().getResult();
+            final var entities = getEntitiesResult.nonnullEntities();
+            assertEquals(1, entities.size());
+            assertTrue(entities.get(new EntitiesKey(
+                new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+                new EntityType(ENTITY_TYPE)))
+                .getCandidateNodes().contains(new NodeName("member-1")));
+            assertTrue(entities.get(new EntitiesKey(
+                new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)),
+                new EntityType(ENTITY_TYPE)))
+                .getOwnerNode().getValue().equals("member-1"));
+        });
+
+        await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+            final var getEntityResult = service2.getEntity(new GetEntityInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get().getResult();
+
+            assertEquals(getEntityResult.getOwnerNode().getValue(), "member-1");
+            assertEquals(getEntityResult.getCandidateNodes().get(0).getValue(), "member-1");
+        });
+
+        await().atMost(Duration.ofSeconds(2)).untilAsserted(() -> {
+            final var getOwnerResult = service2.getEntityOwner(new GetEntityOwnerInputBuilder()
+                .setName(new EntityName(CODEC_CONTEXT.fromYangInstanceIdentifier(entityId)))
+                .setType(new EntityType(ENTITY_TYPE))
+                .build()).get().getResult();
+
+            assertEquals(getOwnerResult.getOwnerNode().getValue(), "member-1");
+        });
+
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/SingleNodeTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/SingleNodeTest.java
new file mode 100644 (file)
index 0000000..e497178
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import java.time.Duration;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class SingleNodeTest extends AbstractNativeEosTest {
+
+    public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+    public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+    private ClusterNode clusterNode;
+
+    @Before
+    public void setUp() throws Exception {
+        clusterNode = startup(2550, List.of("member-1"));
+    }
+
+    @After
+    public void tearDown() {
+        ActorTestKit.shutdown(clusterNode.getActorSystem(), Duration.ofSeconds(20));
+    }
+
+    @Test
+    public void testNotificationPriorToCandidateRegistration() {
+        final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+        verifyNoNotifications(listener);
+
+        registerCandidates(clusterNode, ENTITY_1, "member-1");
+        verifyListenerState(listener, ENTITY_1, true, true, false);
+    }
+
+    @Test
+    public void testListenerPriorToAddingCandidates() {
+        final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+
+        registerCandidates(clusterNode, ENTITY_1, "member-1");
+        waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+        reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+        reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+        registerCandidates(clusterNode, ENTITY_1, "member-2", "member-3");
+        verifyListenerState(listener, ENTITY_1, true, true, false);
+        verifyNoAdditionalNotifications(listener, 5);
+
+        unregisterCandidates(clusterNode, ENTITY_1, "member-1");
+        verifyListenerState(listener, ENTITY_1, true, false, true);
+    }
+
+    @Test
+    public void testListenerRegistrationAfterCandidates() {
+        registerCandidates(clusterNode, ENTITY_1, "member-1", "member-2", "member-3");
+        waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+        reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+        reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+        final MockEntityOwnershipListener listener = registerListener(clusterNode, ENTITY_1);
+        verifyListenerState(listener, ENTITY_1, true, true, false);
+        verifyNoAdditionalNotifications(listener, 5);
+
+        unregisterCandidates(clusterNode, ENTITY_1, "member-1", "member-2");
+        verifyListenerState(listener, ENTITY_1, true, false, true);
+    }
+
+    @Test
+    public void testMultipleEntities() {
+        registerCandidates(clusterNode, ENTITY_1, "member-1", "member-2", "member-3");
+        waitUntillOwnerPresent(clusterNode, ENTITY_1);
+
+        reachableMember(clusterNode, "member-2", DEFAULT_DATACENTER);
+        reachableMember(clusterNode, "member-3", DEFAULT_DATACENTER);
+
+        final MockEntityOwnershipListener listener1 = registerListener(clusterNode, ENTITY_1);
+        final MockEntityOwnershipListener listener2 = registerListener(clusterNode, ENTITY_2);
+
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+        verifyNoNotifications(listener2);
+
+        unregisterCandidates(clusterNode, ENTITY_1, "member-1");
+        verifyListenerState(listener1, ENTITY_1, true, false, true);
+        verifyNoNotifications(listener2);
+
+        registerCandidates(clusterNode, ENTITY_2, "member-2");
+        verifyListenerState(listener1, ENTITY_1, true, false, true);
+        verifyListenerState(listener2, ENTITY_2, true, false, false);
+
+        unregisterCandidates(clusterNode, ENTITY_2, "member-2");
+
+        verifyListenerState(listener1, ENTITY_1, true, false, true);
+        verifyListenerState(listener2, ENTITY_2, false, false, false);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeBaseTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeBaseTest.java
new file mode 100644 (file)
index 0000000..352a842
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import com.google.common.collect.ImmutableList;
+import java.time.Duration;
+import java.util.List;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ThreeNodeBaseTest extends AbstractNativeEosTest {
+    public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+    public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+    private ClusterNode node1;
+    private ClusterNode node2;
+    private ClusterNode node3;
+
+    @Before
+    public void setUp() throws Exception {
+        node1 = startupRemote(2550, List.of("member-1"));
+        node2 = startupRemote(2551, List.of("member-2"));
+        node3 = startupRemote(2552, List.of("member-3"));
+
+        // need to wait until all nodes are ready
+        final Cluster cluster = Cluster.get(node3.getActorSystem());
+        // need a longer timeout with classic remoting, artery.tcp doesnt need to wait as long for init
+        Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+            if (members.size() != 3) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+
+    @After
+    public void tearDown() {
+        // same issue with classic remoting as in setup
+        ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+    }
+
+    @Test
+    public void testInitialNotificationsWithoutOwner() throws Exception {
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        verifyNoNotifications(listener1);
+
+        final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+        verifyNoNotifications(listener2);
+
+        final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+        verifyNoNotifications(listener3);
+    }
+
+    @Test
+    public void testInitialNotificationsWithOwner() {
+        registerCandidates(node1, ENTITY_1, "member-1");
+        // make sure we register other candidates after the first is seen everywhere to prevent different results due
+        // to timing
+        waitUntillOwnerPresent(node3, ENTITY_1);
+
+        registerCandidates(node2, ENTITY_1, "member-2");
+        registerCandidates(node3, ENTITY_1, "member-3");
+
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+
+        final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener3, ENTITY_1, true, false, false);
+    }
+
+    @Test
+    public void testMultipleEntities() {
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node2, ENTITY_1, "member-2");
+        registerCandidates(node3, ENTITY_1, "member-3");
+
+        waitUntillOwnerPresent(node3, ENTITY_1);
+
+        registerCandidates(node2, ENTITY_2, "member-2");
+        waitUntillOwnerPresent(node2, ENTITY_2);
+        registerCandidates(node1, ENTITY_2, "member-1");
+
+        final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+        final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+        final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+        final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+        final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+        final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+
+        verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+        verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+        verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+        unregisterCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, true);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+        unregisterCandidates(node2, ENTITY_1, "member-2");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, true, false);
+
+        unregisterCandidates(node3, ENTITY_1, "member-3");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, false, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, false, false, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, false, false, true);
+
+        // check second listener hasnt moved
+        verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+        verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+        verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+        registerCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/ThreeNodeReachabilityTest.java
new file mode 100644 (file)
index 0000000..7699799
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka;
+
+import static org.awaitility.Awaitility.await;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import com.google.common.collect.ImmutableList;
+import java.time.Duration;
+import java.util.List;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class ThreeNodeReachabilityTest extends AbstractNativeEosTest {
+    public static final DOMEntity ENTITY_1 = new DOMEntity("test-type", "entity-1");
+    public static final DOMEntity ENTITY_2 = new DOMEntity("test-type-2", "entity-2");
+
+    private ClusterNode node1 = null;
+    private ClusterNode node2 = null;
+    private ClusterNode node3 = null;
+
+    @Before
+    public void setUp() throws Exception {
+        node1 = startupRemote(2550, List.of("member-1"), TWO_NODE_SEED_NODES);
+        node2 = startupRemote(2551, List.of("member-2"), TWO_NODE_SEED_NODES);
+
+        // need to wait until all nodes are ready
+        final Cluster cluster = Cluster.get(node2.getActorSystem());
+        await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+            if (members.size() != 2) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+
+    @After
+    public void tearDown() {
+        ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(node2.getActorSystem(), Duration.ofSeconds(20));
+
+        if (node3 != null) {
+            ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+        }
+    }
+
+    @Test
+    public void testNodeLateStart() throws Exception {
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node2, ENTITY_1, "member-2");
+
+        registerCandidates(node2, ENTITY_2, "member-2");
+        waitUntillOwnerPresent(node2, ENTITY_2);
+        registerCandidates(node1, ENTITY_2, "member-1");
+
+        final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+        final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+
+        final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+        final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+
+        verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+        verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+
+        unregisterCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, true);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+
+        unregisterCandidates(node2, ENTITY_1, "member-2");
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, false, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, false, false, true);
+
+        startNode3();
+
+        final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(firstEntityListener3, ENTITY_1, false, false, false);
+
+        final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+        verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+        registerCandidates(node3, ENTITY_1, "member-3");
+        waitUntillOwnerPresent(node3, ENTITY_1);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, false);
+
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, true, false);
+    }
+
+    @Test
+    public void testReachabilityChangesDuringRuntime() throws Exception {
+        startNode3();
+
+        registerCandidates(node2, ENTITY_1, "member-2");
+        // we want singleton on node1 but owner on node2
+        waitUntillOwnerPresent(node2, ENTITY_1);
+
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node3, ENTITY_1, "member-3");
+
+        registerCandidates(node2, ENTITY_2, "member-2");
+        waitUntillOwnerPresent(node2, ENTITY_2);
+        registerCandidates(node1, ENTITY_2, "member-1");
+
+        final MockEntityOwnershipListener firstEntityListener1 = registerListener(node1, ENTITY_1);
+        final MockEntityOwnershipListener firstEntityListener2 = registerListener(node2, ENTITY_1);
+        final MockEntityOwnershipListener firstEntityListener3 = registerListener(node3, ENTITY_1);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+        final MockEntityOwnershipListener secondEntityListener1 = registerListener(node1, ENTITY_2);
+        final MockEntityOwnershipListener secondEntityListener2 = registerListener(node2, ENTITY_2);
+        final MockEntityOwnershipListener secondEntityListener3 = registerListener(node3, ENTITY_2);
+
+        verifyListenerState(secondEntityListener1, ENTITY_2, true, false, false);
+        verifyListenerState(secondEntityListener2, ENTITY_2, true, true, false);
+        verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+        unreachableMember(node1, "member-2", DEFAULT_DATACENTER);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+        verifyListenerState(secondEntityListener1, ENTITY_2, true, true, false);
+        verifyListenerState(secondEntityListener2, ENTITY_2, true, false, true);
+        verifyListenerState(secondEntityListener3, ENTITY_2, true, false, false);
+
+        unreachableMember(node1, "member-3", DEFAULT_DATACENTER);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, false, true);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+
+        unregisterCandidates(node1, ENTITY_1, "member-1", DEFAULT_DATACENTER);
+        unregisterCandidates(node1, ENTITY_2, "member-1", DEFAULT_DATACENTER);
+
+        verifyListenerState(firstEntityListener1, ENTITY_1, false, false, true);
+        verifyListenerState(firstEntityListener2, ENTITY_1, false, false, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, false, false, false);
+
+        verifyListenerState(secondEntityListener1, ENTITY_2, false, false, true);
+        verifyListenerState(secondEntityListener2, ENTITY_2, false, false, false);
+        verifyListenerState(secondEntityListener3, ENTITY_2, false, false, false);
+
+        reachableMember(node1, "member-2", DEFAULT_DATACENTER);
+        verifyListenerState(firstEntityListener1, ENTITY_1, true, false, false);
+        verifyListenerState(firstEntityListener2, ENTITY_1, true, true, false);
+        verifyListenerState(firstEntityListener3, ENTITY_1, true, false, false);
+    }
+
+    @Test
+    public void testSingletonMoving() throws Exception {
+        final MockEntityOwnershipListener listener1 = registerListener(node2, ENTITY_1);
+        final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_2);
+        verifyNoNotifications(listener1);
+        verifyNoNotifications(listener2);
+
+        registerCandidates(node1, ENTITY_1, "member-1");
+        registerCandidates(node2, ENTITY_1, "member-2");
+
+        registerCandidates(node2, ENTITY_2, "member-2");
+        waitUntillOwnerPresent(node2, ENTITY_2);
+        registerCandidates(node1, ENTITY_2, "member-1");
+        // end up with node1 - member-1, node2 - member-2 owners
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener2, ENTITY_2, true, true, false);
+
+        ActorTestKit.shutdown(node1.getActorSystem(), Duration.ofSeconds(20));
+
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+        verifyListenerState(listener2, ENTITY_2, true, true, false);
+
+        startNode3(2);
+
+        final MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_2);
+        verifyListenerState(listener3, ENTITY_2, true, false, false);
+
+        node1 = startupRemote(2550, List.of("member-1"));
+
+        final Cluster cluster = Cluster.get(node2.getActorSystem());
+        await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+            if (members.size() != 3) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+
+        final MockEntityOwnershipListener node1Listener = registerListener(node1, ENTITY_1);
+        verifyListenerState(node1Listener, ENTITY_1, true, false, false);
+    }
+
+    @Test
+    public void testOwnerNotReassignedWhenOnlyCandidate() throws Exception {
+        startNode3();
+        final MockEntityOwnershipListener listener1 = registerListener(node1, ENTITY_1);
+        final MockEntityOwnershipListener listener2 = registerListener(node2, ENTITY_1);
+        verifyNoNotifications(listener1);
+        verifyNoNotifications(listener2);
+
+        registerCandidates(node3, ENTITY_1, "member-3");
+        waitUntillOwnerPresent(node1, ENTITY_1);
+
+        MockEntityOwnershipListener listener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener3, ENTITY_1, true, true, false);
+
+        ActorTestKit.shutdown(node3.getActorSystem(), Duration.ofSeconds(20));
+
+        verifyListenerState(listener1, ENTITY_1, true, false, false);
+        verifyListenerState(listener2, ENTITY_1, true, false, false);
+
+        startNode3();
+        verifyListenerState(listener1, ENTITY_1, false, false, false);
+
+        listener3 = registerListener(node3, ENTITY_1);
+        verifyListenerState(listener3, ENTITY_1, false, false, false);
+
+        registerCandidates(node1, ENTITY_1, "member-1");
+
+        verifyListenerState(listener1, ENTITY_1, true, true, false);
+        verifyListenerState(listener3, ENTITY_1, true, false, false);
+
+    }
+
+    private void startNode3() throws Exception {
+        startNode3(3);
+    }
+
+    private void startNode3(final int membersPresent) throws Exception {
+        node3 = startupRemote(2552, List.of("member-3"), THREE_NODE_SEED_NODES);
+
+        // need to wait until all nodes are ready
+        final Cluster cluster = Cluster.get(node2.getActorSystem());
+        await().atMost(Duration.ofSeconds(30)).until(() -> {
+            final List<Member> members = ImmutableList.copyOf(cluster.state().getMembers());
+            if (members.size() != membersPresent) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisorTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/owner/supervisor/OwnerSupervisorTest.java
new file mode 100644 (file)
index 0000000..4bf68a7
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.owner.supervisor;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.ActorRef;
+import akka.actor.typed.Behavior;
+import akka.actor.typed.javadsl.AbstractBehavior;
+import akka.actor.typed.javadsl.ActorContext;
+import akka.actor.typed.javadsl.Behaviors;
+import akka.actor.typed.javadsl.Receive;
+import akka.cluster.typed.Cluster;
+import akka.cluster.typed.ClusterSingleton;
+import akka.cluster.typed.SingletonActor;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.AbstractNativeEosTest;
+import org.opendaylight.controller.eos.akka.bootstrap.command.BootstrapCommand;
+import org.opendaylight.controller.eos.akka.bootstrap.command.GetRunningContext;
+import org.opendaylight.controller.eos.akka.bootstrap.command.RunningContext;
+import org.opendaylight.controller.eos.akka.owner.checker.OwnerStateChecker;
+import org.opendaylight.controller.eos.akka.owner.checker.command.StateCheckerCommand;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.InitialCandidateSync;
+import org.opendaylight.controller.eos.akka.owner.supervisor.command.OwnerSupervisorCommand;
+import org.opendaylight.controller.eos.akka.registry.candidate.CandidateRegistry;
+import org.opendaylight.controller.eos.akka.registry.candidate.command.CandidateRegistryCommand;
+import org.opendaylight.controller.eos.akka.registry.listener.type.EntityTypeListenerRegistry;
+import org.opendaylight.controller.eos.akka.registry.listener.type.command.TypeListenerRegistryCommand;
+import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
+
+public class OwnerSupervisorTest extends AbstractNativeEosTest {
+
+    @Test
+    public void testCandidatePickingWhenUnreachableCandidates() throws Exception {
+
+        final ClusterNode node = startup(2550, Collections.singletonList("member-1"));
+        try {
+            reachableMember(node, "member-2", DEFAULT_DATACENTER);
+            reachableMember(node, "member-3", DEFAULT_DATACENTER);
+            registerCandidates(node, ENTITY_1, "member-1", "member-2", "member-3");
+
+            final MockEntityOwnershipListener listener = registerListener(node, ENTITY_1);
+            verifyListenerState(listener, ENTITY_1,true, true, false);
+
+            unreachableMember(node, "member-1", DEFAULT_DATACENTER);
+            verifyListenerState(listener, ENTITY_1, true, false, true);
+
+            unreachableMember(node, "member-2", DEFAULT_DATACENTER);
+            verifyListenerState(listener, ENTITY_1, true, false, false);
+
+            unreachableMember(node, "member-3", DEFAULT_DATACENTER);
+            verifyListenerState(listener, ENTITY_1, false, false, false);
+
+            reachableMember(node, "member-2", DEFAULT_DATACENTER);
+            verifyListenerState(listener, ENTITY_1, true, false, false);
+
+            // no notification here as member-2 is already the owner
+            reachableMember(node, "member-1", DEFAULT_DATACENTER);
+
+            unreachableMember(node, "member-2", DEFAULT_DATACENTER);
+            verifyListenerState(listener, ENTITY_1,true, true, false);
+        } finally {
+            ActorTestKit.shutdown(node.getActorSystem());
+        }
+    }
+
+    @Test
+    public void testSupervisorInitWithMissingOwners() throws Exception {
+        final Map<DOMEntity, Set<String>> candidates = new HashMap<>();
+        candidates.put(ENTITY_1, Set.of("member-1"));
+        candidates.put(ENTITY_2, Set.of("member-2"));
+
+        final ClusterNode node = startup(2550, Collections.singletonList("member-1"), Collections.emptyList(),
+                () -> mockedBootstrap(candidates, new HashMap<>()));
+
+        try {
+            waitUntillOwnerPresent(node, ENTITY_1);
+
+            // also do a proper register so the listener from the type lister actor are spawned
+            registerCandidates(node, ENTITY_1, "member-1");
+            registerCandidates(node, ENTITY_2, "member-2");
+
+            final MockEntityOwnershipListener listener1 = registerListener(node, ENTITY_1);
+            final MockEntityOwnershipListener listener2 = registerListener(node, ENTITY_2);
+
+            // first entity should have correctly assigned owner as its reachable
+            verifyListenerState(listener1, ENTITY_1, true, true, false);
+            // this one could not be assigned during init as we dont have member-2 thats reachable
+            verifyListenerState(listener2, ENTITY_2, false, false, false);
+
+            reachableMember(node, "member-2", DEFAULT_DATACENTER);
+            verifyListenerState(listener2, ENTITY_2, true, false, false);
+        } finally {
+            ActorTestKit.shutdown(node.getActorSystem());
+        }
+    }
+
+    private static Behavior<BootstrapCommand> mockedBootstrap(final Map<DOMEntity, Set<String>> currentCandidates,
+                                                              final Map<DOMEntity, String> currentOwners) {
+        return Behaviors.setup(context -> MockBootstrap.create(currentCandidates, currentOwners));
+    }
+
+    /**
+     * Initial behavior that skips initial sync and instead initializes OwnerSupervisor with provided values.
+     */
+    private static final class MockSyncer extends AbstractBehavior<OwnerSupervisorCommand> {
+
+        private final Map<DOMEntity, Set<String>> currentCandidates;
+        private final Map<DOMEntity, String> currentOwners;
+
+        private MockSyncer(final ActorContext<OwnerSupervisorCommand> context,
+                           final Map<DOMEntity, Set<String>> currentCandidates,
+                           final Map<DOMEntity, String> currentOwners) {
+            super(context);
+            this.currentCandidates = currentCandidates;
+            this.currentOwners = currentOwners;
+
+            context.getSelf().tell(new InitialCandidateSync(null));
+        }
+
+        public static Behavior<OwnerSupervisorCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+                                                              final Map<DOMEntity, String> currentOwners) {
+            return Behaviors.setup(ctx -> new MockSyncer(ctx, currentCandidates, currentOwners));
+        }
+
+        @Override
+        public Receive<OwnerSupervisorCommand> createReceive() {
+            return newReceiveBuilder()
+                    .onMessage(InitialCandidateSync.class, this::switchToSupervisor)
+                    .build();
+        }
+
+        private Behavior<OwnerSupervisorCommand> switchToSupervisor(final InitialCandidateSync message) {
+            return OwnerSupervisor.create(currentCandidates, currentOwners, CODEC_CONTEXT.getInstanceIdentifierCodec());
+        }
+    }
+
+    /**
+     * Bootstrap with OwnerSyncer replaced with the testing syncer behavior.
+     */
+    private static final class MockBootstrap extends AbstractBehavior<BootstrapCommand> {
+
+        private final ActorRef<TypeListenerRegistryCommand> listenerRegistry;
+        private final ActorRef<CandidateRegistryCommand> candidateRegistry;
+        private final ActorRef<StateCheckerCommand> ownerStateChecker;
+        private final ActorRef<OwnerSupervisorCommand> ownerSupervisor;
+
+        private MockBootstrap(final ActorContext<BootstrapCommand> context,
+                              final Map<DOMEntity, Set<String>> currentCandidates,
+                              final Map<DOMEntity, String> currentOwners) {
+            super(context);
+
+            final Cluster cluster = Cluster.get(context.getSystem());
+            final String role = cluster.selfMember().getRoles().iterator().next();
+
+            listenerRegistry = context.spawn(EntityTypeListenerRegistry.create(role), "ListenerRegistry");
+            candidateRegistry = context.spawn(CandidateRegistry.create(), "CandidateRegistry");
+
+            final ClusterSingleton clusterSingleton = ClusterSingleton.get(context.getSystem());
+            // start the initial sync behavior that switches to the regular one after syncing
+            ownerSupervisor = clusterSingleton.init(SingletonActor.of(
+                    MockSyncer.create(currentCandidates, currentOwners), "OwnerSupervisor"));
+
+            ownerStateChecker = context.spawn(OwnerStateChecker.create(role, ownerSupervisor, null),
+                    "OwnerStateChecker");
+        }
+
+        public static Behavior<BootstrapCommand> create(final Map<DOMEntity, Set<String>> currentCandidates,
+                                                        final Map<DOMEntity, String> currentOwners) {
+            return Behaviors.setup(ctx -> new MockBootstrap(ctx, currentCandidates, currentOwners));
+        }
+
+        @Override
+        public Receive<BootstrapCommand> createReceive() {
+            return newReceiveBuilder()
+                    .onMessage(GetRunningContext.class, this::onGetRunningContext)
+                    .build();
+        }
+
+        private Behavior<BootstrapCommand> onGetRunningContext(final GetRunningContext request) {
+            request.getReplyTo().tell(
+                    new RunningContext(listenerRegistry, candidateRegistry,ownerStateChecker, ownerSupervisor));
+            return this;
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java b/opendaylight/md-sal/eos-dom-akka/src/test/java/org/opendaylight/controller/eos/akka/service/ClusterSingletonIntegrationTest.java
new file mode 100644 (file)
index 0000000..f544ed1
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.eos.akka.service;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import akka.actor.testkit.typed.javadsl.ActorTestKit;
+import akka.actor.typed.javadsl.Adapter;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.typed.Cluster;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.awaitility.Awaitility;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.eos.akka.AbstractNativeEosTest;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.impl.EOSClusterSingletonServiceProvider;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ClusterSingletonIntegrationTest extends AbstractNativeEosTest {
+
+    private static final Logger LOG = LoggerFactory.getLogger(ClusterSingletonIntegrationTest.class);
+
+    private AbstractNativeEosTest.MockNativeEntityOwnershipService node1;
+    private MockNativeEntityOwnershipService node2;
+    private MockNativeEntityOwnershipService node3;
+
+    private EOSClusterSingletonServiceProvider singletonNode1;
+    private EOSClusterSingletonServiceProvider singletonNode2;
+    private EOSClusterSingletonServiceProvider singletonNode3;
+
+
+    @Before
+    public void setUp() throws Exception {
+        node1 = startupNativeService(2550, List.of("member-1"), THREE_NODE_SEED_NODES);
+        node2 = startupNativeService(2551, List.of("member-2"), THREE_NODE_SEED_NODES);
+        node3 = startupNativeService(2552, List.of("member-3"), THREE_NODE_SEED_NODES);
+
+        singletonNode1 = new EOSClusterSingletonServiceProvider(node1);
+        singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
+        singletonNode3 = new EOSClusterSingletonServiceProvider(node3);
+
+        waitUntillNodeReady(node3);
+    }
+
+    @After
+    public void tearDown() {
+        ActorTestKit.shutdown(Adapter.toTyped(node1.getActorSystem()), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(Adapter.toTyped(node2.getActorSystem()), Duration.ofSeconds(20));
+        ActorTestKit.shutdown(Adapter.toTyped(node3.getActorSystem()), Duration.ofSeconds(20));
+    }
+
+    @Test
+    public void testSingletonOwnershipNotDropped() {
+        final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1");
+        singletonNode1.registerClusterSingletonService(service);
+
+        verifyServiceActive(service);
+
+        final MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+        singletonNode2.registerClusterSingletonService(service2);
+
+        verifyServiceInactive(service2, 2);
+    }
+
+    @Test
+    public void testSingletonOwnershipHandoff() {
+        final MockClusterSingletonService service = new MockClusterSingletonService("member-1", "service-1");
+        final Registration registration = singletonNode1.registerClusterSingletonService(service);
+
+        verifyServiceActive(service);
+
+        final MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+        singletonNode2.registerClusterSingletonService(service2);
+
+        verifyServiceInactive(service2, 2);
+
+        registration.close();
+        verifyServiceInactive(service);
+        verifyServiceActive(service2);
+    }
+
+    @Test
+    public void testSingletonOwnershipHandoffOnNodeShutdown() throws Exception {
+        MockClusterSingletonService service2 = new MockClusterSingletonService("member-2", "service-1");
+        Registration registration2 = singletonNode2.registerClusterSingletonService(service2);
+
+        verifyServiceActive(service2);
+
+        final MockClusterSingletonService service3 = new MockClusterSingletonService("member-3", "service-1");
+        final Registration registration3 = singletonNode3.registerClusterSingletonService(service3);
+
+        verifyServiceInactive(service3, 2);
+
+        LOG.debug("Shutting down node2");
+        TestKit.shutdownActorSystem(node2.getActorSystem());
+        verifyServiceActive(service3);
+
+        node2 = startupNativeService(2551, List.of("member-1"), THREE_NODE_SEED_NODES);
+        singletonNode2 = new EOSClusterSingletonServiceProvider(node2);
+
+        waitUntillNodeReady(node2);
+        service2 = new MockClusterSingletonService("member-2", "service-1");
+        singletonNode2.registerClusterSingletonService(service2);
+
+        verifyServiceActive(service3);
+        verifyServiceInactive(service2, 5);
+    }
+
+    private static void waitUntillNodeReady(final MockNativeEntityOwnershipService node) {
+        // need to wait until all nodes are ready
+        final Cluster cluster = Cluster.get(Adapter.toTyped(node.getActorSystem()));
+        Awaitility.await().atMost(Duration.ofSeconds(20)).until(() -> {
+            final List<Member> members = new ArrayList<>();
+            cluster.state().getMembers().forEach(members::add);
+            if (members.size() != 3) {
+                return false;
+            }
+
+            for (final Member member : members) {
+                if (!member.status().equals(MemberStatus.up())) {
+                    return false;
+                }
+            }
+
+            return true;
+        });
+    }
+
+    private static void verifyServiceActive(final MockClusterSingletonService service) {
+        await().untilAsserted(() -> assertTrue(service.isActivated()));
+    }
+
+    private static void verifyServiceActive(final MockClusterSingletonService service, final long delay) {
+        await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertTrue(service.isActivated()));
+    }
+
+    private static void verifyServiceInactive(final MockClusterSingletonService service) {
+        await().untilAsserted(() -> assertFalse(service.isActivated()));
+    }
+
+    private static void verifyServiceInactive(final MockClusterSingletonService service, final long delay) {
+        await().pollDelay(delay, TimeUnit.SECONDS).untilAsserted(() -> assertFalse(service.isActivated()));
+    }
+
+    private static class MockClusterSingletonService implements ClusterSingletonService {
+
+        private final String member;
+        private final ServiceGroupIdentifier identifier;
+        private boolean activated = false;
+
+        MockClusterSingletonService(final String member, final String identifier) {
+            this.member = member;
+            this.identifier = new ServiceGroupIdentifier(identifier);
+        }
+
+        @Override
+        public void instantiateServiceInstance() {
+            LOG.debug("{} : Activating service: {}", member, identifier);
+            activated = true;
+        }
+
+        @Override
+        public ListenableFuture<? extends Object> closeServiceInstance() {
+            LOG.debug("{} : Closing service: {}", member, identifier);
+            activated = false;
+            return Futures.immediateFuture(null);
+        }
+
+        @Override
+        public ServiceGroupIdentifier getIdentifier() {
+            return identifier;
+        }
+
+        public boolean isActivated() {
+            return activated;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf b/opendaylight/md-sal/eos-dom-akka/src/test/resources/application.conf
new file mode 100644 (file)
index 0000000..08c2a36
--- /dev/null
@@ -0,0 +1,41 @@
+akka {
+  loglevel = debug
+  actor {
+    warn-about-java-serializer-usage = off
+    allow-java-serialization = on
+    provider = cluster
+  }
+
+  remote {
+    artery {
+      enabled = on
+      canonical.hostname = "127.0.0.1"
+      canonical.port = 2550
+    }
+  }
+  cluster {
+    seed-nodes = [
+      "akka://ClusterSystem@127.0.0.1:2550"]
+    roles = [
+      "member-1"
+    ]
+    downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+    distributed-data {
+        # How often the Replicator should send out gossip information.
+        # This value controls how quickly Entity Ownership Service data is replicated
+        # across cluster nodes.
+        gossip-interval = 100 ms
+
+        # How often the subscribers will be notified of changes, if any.
+        # This value controls how quickly Entity Ownership Service decisions are
+        # propagated within a node.
+        notify-subscribers-interval = 20 ms
+    }
+    split-brain-resolver {
+      active-strategy = keep-majority
+      stable-after = 7s
+    }
+  }
+}
+
diff --git a/opendaylight/md-sal/eos-dom-akka/src/test/resources/simplelogger.properties b/opendaylight/md-sal/eos-dom-akka/src/test/resources/simplelogger.properties
new file mode 100644 (file)
index 0000000..5afb5e4
--- /dev/null
@@ -0,0 +1,8 @@
+org.slf4j.simpleLogger.defaultLogLevel=info
+org.slf4j.simpleLogger.showDateTime=true
+org.slf4j.simpleLogger.dateTimeFormat=hh:mm:ss,S a
+org.slf4j.simpleLogger.logFile=System.out
+org.slf4j.simpleLogger.showShortLogName=true
+org.slf4j.simpleLogger.levelInBrackets=true
+org.slf4j.simpleLogger.log.org.opendaylight.controller.eos.akka=debug
+org.slf4j.simpleLogger.log.org.opendaylight.mdsal.singleton=debug
\ No newline at end of file
index ce51fc95cbcf5b01c71d3b699a19e28b7ea9ae14..77e751dd0e4bb94a38d39022d3d840cde52c3d80 100644 (file)
@@ -8,24 +8,31 @@ terms of the Eclipse Public License v1.0 which accompanies this distribution,
 and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
+  <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
-  <modelVersion>4.0.0</modelVersion>
+
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-it-base</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencyManagement>
     <dependencies>
       <dependency>
-        <groupId>org.opendaylight.controller</groupId>
+        <groupId>org.opendaylight.mdsal</groupId>
         <artifactId>mdsal-artifacts</artifactId>
+        <version>13.0.1</version>
+        <type>pom</type>
+        <scope>import</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>controller-artifacts</artifactId>
         <version>${project.version}</version>
         <type>pom</type>
         <scope>import</scope>
@@ -40,8 +47,8 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       <type>zip</type>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-api</artifactId>
     </dependency>
         <!-- Dependencies for pax exam karaf container -->
     <dependency>
@@ -76,20 +83,28 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         <artifactId>pax-url-aether</artifactId>
         <scope>compile</scope>
     </dependency>
+
+    <!-- Referenced by pax-exam-features, needs to be pulled into local repository -->
     <dependency>
-        <groupId>javax.inject</groupId>
-        <artifactId>javax.inject</artifactId>
-        <scope>compile</scope>
+        <groupId>org.ops4j.base</groupId>
+        <artifactId>ops4j-base-monitors</artifactId>
+        <version>1.5.1</version>
     </dependency>
+    <dependency>
+        <groupId>org.apache.geronimo.specs</groupId>
+        <artifactId>geronimo-atinject_1.0_spec</artifactId>
+        <!-- FIXME: remove this override once pax-exam is aligned with pax-web -->
+        <version>1.0</version>
+    </dependency>
+
     <dependency>
         <groupId>org.apache.karaf.features</groupId>
         <artifactId>org.apache.karaf.features.core</artifactId>
-        <version>${karaf.version}</version>
         <scope>compile</scope>
     </dependency>
     <dependency>
         <groupId>org.osgi</groupId>
-        <artifactId>org.osgi.core</artifactId>
+        <artifactId>org.osgi.framework</artifactId>
         <scope>compile</scope>
     </dependency>
     <dependency>
index 6d31034d8faaf097ac0f3c95f0171133123d1c90..e937f99d2cbf87c2524d2762f081efd598951d7e 100644 (file)
@@ -24,7 +24,6 @@ import org.ops4j.pax.exam.Configuration;
 import org.ops4j.pax.exam.MavenUtils;
 import org.ops4j.pax.exam.Option;
 import org.ops4j.pax.exam.OptionUtils;
-import org.ops4j.pax.exam.karaf.container.internal.JavaVersionUtil;
 import org.ops4j.pax.exam.karaf.options.KarafDistributionOption;
 import org.ops4j.pax.exam.karaf.options.LogLevelOption.LogLevel;
 import org.ops4j.pax.exam.options.MavenUrlReference;
@@ -38,7 +37,6 @@ public abstract class AbstractMdsalTestBase {
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractMdsalTestBase.class);
     private static final String MAVEN_REPO_LOCAL = "maven.repo.local";
-    private static final String ORG_OPS4J_PAX_URL_MVN_LOCAL_REPOSITORY = "org.ops4j.pax.url.mvn.localRepository";
     private static final String ETC_ORG_OPS4J_PAX_URL_MVN_CFG = "etc/org.ops4j.pax.url.mvn.cfg";
     private static final String ETC_ORG_OPS4J_PAX_LOGGING_CFG = "etc/org.ops4j.pax.logging.cfg";
 
@@ -91,7 +89,7 @@ public abstract class AbstractMdsalTestBase {
      * @return An array of additional config options
      */
     protected Option[] getAdditionalOptions() {
-        return null;
+        return new Option[0];
     }
 
     /**
@@ -117,8 +115,8 @@ public abstract class AbstractMdsalTestBase {
     protected Option mvnLocalRepoOption() {
         String mvnRepoLocal = System.getProperty(MAVEN_REPO_LOCAL, "");
         LOG.info("mvnLocalRepo \"{}\"", mvnRepoLocal);
-        return editConfigurationFilePut(ETC_ORG_OPS4J_PAX_URL_MVN_CFG, ORG_OPS4J_PAX_URL_MVN_LOCAL_REPOSITORY,
-                mvnRepoLocal);
+        return editConfigurationFilePut(ETC_ORG_OPS4J_PAX_URL_MVN_CFG,
+            "org.ops4j.pax.url.mvn.localRepository", mvnRepoLocal);
     }
 
     @Configuration
@@ -130,41 +128,43 @@ public abstract class AbstractMdsalTestBase {
                         .unpackDirectory(new File(PAX_EXAM_UNPACK_DIRECTORY)).useDeployFolder(false),
                 when(Boolean.getBoolean(KEEP_UNPACK_DIRECTORY_PROP)).useOptions(keepRuntimeFolder()),
                 features(getFeatureRepo(), getFeatureName()),
-                //mavenBundle("org.apache.aries.quiesce", "org.apache.aries.quiesce.api", "1.0.0"), getLoggingOption(),
                 mvnLocalRepoOption(),
+
+                // Make sure karaf's default repository is consulted before anything else
+                editConfigurationFilePut(ETC_ORG_OPS4J_PAX_URL_MVN_CFG, "org.ops4j.pax.url.mvn.defaultRepositories",
+                        "file:${karaf.home}/${karaf.default.repository}@id=system.repository"),
+
                 configureConsole().ignoreLocalConsole().ignoreRemoteShell(),
                 editConfigurationFilePut(ETC_ORG_OPS4J_PAX_LOGGING_CFG, "log4j2.rootLogger.level", "INFO") };
 
-        if (JavaVersionUtil.getMajorVersion() >= 9) {
-            final String karafVersion = MavenUtils.getArtifactVersion("org.apache.karaf.features",
+        final String karafVersion = MavenUtils.getArtifactVersion("org.apache.karaf.features",
                 "org.apache.karaf.features.core");
-            options = OptionUtils.combine(options, new VMOption[] {
-                    new VMOption("--add-reads=java.xml=java.logging"),
-                    new VMOption("--add-exports=java.base/org.apache.karaf.specs.locator=java.xml,ALL-UNNAMED"),
-                    new VMOption("--patch-module"),
-                    new VMOption("java.base=lib/endorsed/org.apache.karaf.specs.locator-" + karafVersion + ".jar"),
-                    new VMOption("--patch-module"),
-                    new VMOption("java.xml=lib/endorsed/org.apache.karaf.specs.java.xml-" + karafVersion + ".jar"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.base/java.security=ALL-UNNAMED"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.base/java.net=ALL-UNNAMED"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.base/java.lang=ALL-UNNAMED"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.base/java.util=ALL-UNNAMED"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.naming/javax.naming.spi=ALL-UNNAMED"),
-                    new VMOption("--add-opens"),
-                    new VMOption("java.rmi/sun.rmi.transport.tcp=ALL-UNNAMED"),
-                    new VMOption("--add-exports=java.base/sun.net.www.protocol.http=ALL-UNNAMED"),
-                    new VMOption("--add-exports=java.base/sun.net.www.protocol.https=ALL-UNNAMED"),
-                    new VMOption("--add-exports=java.base/sun.net.www.protocol.jar=ALL-UNNAMED"),
-                    new VMOption("--add-exports=jdk.naming.rmi/com.sun.jndi.url.rmi=ALL-UNNAMED"),
-                    new VMOption("-classpath"),
-                    new VMOption("lib/jdk9plus/*" + File.pathSeparator + "lib/boot/*")
-            });
-        }
+        options = OptionUtils.combine(options, new VMOption[] {
+            new VMOption("--add-reads=java.xml=java.logging"),
+            new VMOption("--add-exports=java.base/org.apache.karaf.specs.locator=java.xml,ALL-UNNAMED"),
+            new VMOption("--patch-module"),
+            new VMOption("java.base=lib/endorsed/org.apache.karaf.specs.locator-" + karafVersion + ".jar"),
+            new VMOption("--patch-module"),
+            new VMOption("java.xml=lib/endorsed/org.apache.karaf.specs.java.xml-" + karafVersion + ".jar"),
+            new VMOption("--add-opens"),
+            new VMOption("java.base/java.security=ALL-UNNAMED"),
+            new VMOption("--add-opens"),
+            new VMOption("java.base/java.net=ALL-UNNAMED"),
+            new VMOption("--add-opens"),
+            new VMOption("java.base/java.lang=ALL-UNNAMED"),
+            new VMOption("--add-opens"),
+            new VMOption("java.base/java.util=ALL-UNNAMED"),
+            new VMOption("--add-opens"),
+            new VMOption("java.naming/javax.naming.spi=ALL-UNNAMED"),
+            new VMOption("--add-opens"),
+            new VMOption("java.rmi/sun.rmi.transport.tcp=ALL-UNNAMED"),
+            new VMOption("--add-exports=java.base/sun.net.www.protocol.http=ALL-UNNAMED"),
+            new VMOption("--add-exports=java.base/sun.net.www.protocol.https=ALL-UNNAMED"),
+            new VMOption("--add-exports=java.base/sun.net.www.protocol.jar=ALL-UNNAMED"),
+            new VMOption("--add-exports=jdk.naming.rmi/com.sun.jndi.url.rmi=ALL-UNNAMED"),
+            new VMOption("-classpath"),
+            new VMOption("lib/jdk9plus/*" + File.pathSeparator + "lib/boot/*")
+        });
 
         return OptionUtils.combine(options, getAdditionalOptions());
     }
index 12f99b10934c357a4d14db9b4b81020f4832f221..3ea3d8e4a026aab70959b55271bedc4a618f6961 100644 (file)
@@ -13,29 +13,31 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>bundle-parent</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-it-parent</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <properties>
-    <mdsal.version>1.10.0-SNAPSHOT</mdsal.version>
     <karaf.distro.groupId>org.opendaylight.odlparent</karaf.distro.groupId>
     <karaf.distro.artifactId>opendaylight-karaf-empty</karaf.distro.artifactId>
     <karaf.distro.type>zip</karaf.distro.type>
     <karaf.keep.unpack>false</karaf.keep.unpack>
+
+    <!-- FIXME: Remove this -->
+    <odlparent.modernizer.enforce>false</odlparent.modernizer.enforce>
   </properties>
 
   <dependencyManagement>
     <dependencies>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>${mdsal.version}</version>
+        <artifactId>controller-artifacts</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
@@ -46,7 +48,6 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>mdsal-it-base</artifactId>
-      <version>${mdsal.version}</version>
     </dependency>
 
     <!-- Dependencies for pax exam karaf container -->
@@ -83,11 +84,6 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         <groupId>org.ops4j.pax.url</groupId>
         <artifactId>pax-url-aether</artifactId>
     </dependency>
-    <dependency>
-        <groupId>javax.inject</groupId>
-        <artifactId>javax.inject</artifactId>
-        <version>1</version>
-    </dependency>
     <dependency>
         <groupId>org.apache.karaf.features</groupId>
         <artifactId>org.apache.karaf.features.core</artifactId>
@@ -95,25 +91,28 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     </dependency>
     <dependency>
         <groupId>org.osgi</groupId>
-        <artifactId>org.osgi.core</artifactId>
-    </dependency>
-    <dependency>
-        <groupId>junit</groupId>
-        <artifactId>junit</artifactId>
+        <artifactId>org.osgi.framework</artifactId>
     </dependency>
 
-    <!-- Testing Dependencies -->
+    <!--
+        Unfortunately default mockito-inline does not work in OSGi.
+        See https://github.com/mockito/mockito/issues/2203#issuecomment-926372053
+      -->
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
+      <version>4.11.0</version>
       <scope>test</scope>
     </dependency>
   </dependencies>
+
   <build>
     <plugins>
       <plugin>
           <artifactId>maven-surefire-plugin</artifactId>
           <configuration>
+              <!-- Overridden to fix corruption, where the process would hang after test -->
+              <forkNode implementation="org.apache.maven.plugin.surefire.extensions.SurefireForkNodeFactory"/>
               <systemPropertyVariables>
                 <!-- CONTROLLER-1799: Use the same repository for Pax Exam as is used for Maven -->
                 <org.ops4j.pax.url.mvn.localRepository>${settings.localRepository}</org.ops4j.pax.url.mvn.localRepository>
@@ -177,20 +176,20 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         <artifactId>maven-dependency-plugin</artifactId>
         <executions>
           <execution>
-           <id>unpack-karaf-resources</id>
-           <goals>
-            <goal>unpack-dependencies</goal>
-           </goals>
-           <phase>process-test-resources</phase>
-           <configuration>
-            <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
-            <groupId>org.opendaylight.controller</groupId>
-            <includeArtifactIds>mockito-core,objenesis,mdsal-it-base</includeArtifactIds>
-            <excludes>META-INF\/**</excludes>
-            <ignorePermissions>false</ignorePermissions>
-           </configuration>
+            <id>unpack-karaf-resources</id>
+            <goals>
+              <goal>unpack-dependencies</goal>
+            </goals>
+            <phase>process-test-resources</phase>
+            <configuration>
+              <outputDirectory>${project.build.directory}/test-classes</outputDirectory>
+              <groupId>org.opendaylight.controller</groupId>
+              <includeArtifactIds>mockito-core,byte-buddy,objenesis,mdsal-it-base</includeArtifactIds>
+              <excludes>META-INF\/**</excludes>
+              <ignorePermissions>false</ignorePermissions>
+            </configuration>
           </execution>
-         </executions>
+        </executions>
       </plugin>
     </plugins>
   </build>
diff --git a/opendaylight/md-sal/mdsal-trace/api/pom.xml b/opendaylight/md-sal/mdsal-trace/api/pom.xml
deleted file mode 100644 (file)
index cf9493c..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-api</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>attach-artifacts</id>
-            <goals>
-              <goal>attach-artifact</goal>
-            </goals>
-            <phase>package</phase>
-            <configuration>
-              <artifacts>
-                <artifact>
-                  <file>${project.build.directory}/classes/initial/mdsaltrace_config.xml</file>
-                  <type>xml</type>
-                  <classifier>config</classifier>
-                </artifact>
-              </artifacts>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/api/src/main/java/org/opendaylight/controller/md/sal/trace/api/TracingDOMDataBroker.java b/opendaylight/md-sal/mdsal-trace/api/src/main/java/org/opendaylight/controller/md/sal/trace/api/TracingDOMDataBroker.java
deleted file mode 100644 (file)
index 88e2261..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.api;
-
-import java.io.PrintStream;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-
-/**
- * Interface so that the tracing broker service can be more explicitly imported.
- */
-public interface TracingDOMDataBroker extends DOMDataBroker {
-
-    /**
-     * Prints a human-readable "report" of all opened but not closed transactions,
-     * including transactions chains and transactions opened by them, onto the printStream.
-     * @param minOpenTransactions minimum open number of transactions (leaks with fewer are not printed)
-     * @return true if there were any open transactions, false if none
-     */
-    boolean printOpenTransactions(PrintStream printStream, int minOpenTransactions);
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/api/src/main/resources/initial/mdsaltrace_config.xml b/opendaylight/md-sal/mdsal-trace/api/src/main/resources/initial/mdsaltrace_config.xml
deleted file mode 100644 (file)
index 27c8bdd..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-<config xmlns="urn:opendaylight:params:xml:ns:yang:mdsaltrace">
-    <!--  Both registration-watches as well as write-watches will
-          log EVERYTHING by default, if we do not constrain any paths;
-          therefore we set a fake one to get nothing out-of-the-box;
-          please remove this first fake one when you configure this
-          to watch what you are really interested in instead: -->
-    <registration-watches>/this/will/never/exist</registration-watches>
-    <!-- <registration-watches>/neutron-router-dpns/router-dpn-list</registration-watches> -->
-    <!-- <registration-watches>/tunnels_state/state-tunnel-list</registration-watches> -->
-
-    <write-watches>/this/will/never/exist</write-watches>
-    <!-- <write-watches> /NetworkTopology/Topology</write-watches> -->
-
-    <!-- Enable or disable transaction context debug. This will preserve the call site trace for
-         transactions, so that the original caller of un-close'd() transaction can be identified.
-         NB: This is a different property from the equally named one in etc/org.opendaylight.controller.cluster.datastore.cfg;
-         that one does something somewhat similar, but serves to include the stack trace on failed transaction submit,
-         whereas this one is specific to odl-mdsal-trace's trace:transaction leak troubleshooting command.
-         [This documentation has been copy/pasted from mdsaltrace.yang, and should be kept in line.] -->
-    <transaction-debug-context-enabled>true</transaction-debug-context-enabled>
-</config>
diff --git a/opendaylight/md-sal/mdsal-trace/api/src/main/yang/mdsaltrace.yang b/opendaylight/md-sal/mdsal-trace/api/src/main/yang/mdsaltrace.yang
deleted file mode 100644 (file)
index 60e0d91..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-module mdsaltrace {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:mdsaltrace";
-    prefix "mdsaltrace";
-
-    organization
-        "Red Hat, Inc.";
-
-    description
-        "Copyright (c) 2016 Red Hat, Inc. and others.  All rights reserved.
-
-        This program and the accompanying materials are made available under the
-        terms of the Eclipse Public License v1.0 which accompanies this distribution,
-        and is available at http://www.eclipse.org/legal/epl-v10.html";
-
-    revision "2016-09-08" {
-        description "Initial revision of mdsaltrace model";
-    }
-
-    container config {
-        leaf-list registration-watches {
-            type string;
-        }
-        leaf-list write-watches {
-            type string;
-        }
-        leaf transaction-debug-context-enabled {
-            default false;
-            type boolean;
-            description "Enable or disable transaction context debug. This will preserve the call site trace for
-                         transactions, so that the original caller of un-close'd() transaction can be identified.
-                         NB: This is a different property from the equally named one in etc/org.opendaylight.controller.cluster.datastore.cfg;
-                         that one does something somewhat similar, but serves to include the stack trace on failed transaction submit,
-                         whereas this one is specific to odl-mdsal-trace's trace:transaction leak troubleshooting command.";
-            // This ^^^ description is also copy/pasted in mdsaltrace_config.xml, and should be kept in line.]
-        }
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/binding-impl/pom.xml b/opendaylight/md-sal/mdsal-trace/binding-impl/pom.xml
deleted file mode 100644 (file)
index 6915bed..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-binding-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>mdsal-trace-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>mdsal-trace-dom-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-broker-impl</artifactId>
-    </dependency>
-
-    <!-- Testing Dependencies -->
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-        <plugin>
-            <groupId>org.apache.felix</groupId>
-            <artifactId>maven-bundle-plugin</artifactId>
-            <extensions>true</extensions>
-            <configuration>
-                <instructions>
-                    <Import-Package>
-                      org.opendaylight.controller.md.sal.trace.api,
-                      org.opendaylight.controller.md.sal.dom.api,
-                      org.opendaylight.controller.md.sal.binding.api,
-                      org.opendaylight.controller.md.sal.binding.spi,
-                      *
-                    </Import-Package>
-                </instructions>
-            </configuration>
-        </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/binding-impl/src/main/java/org/opendaylight/controller/md/sal/trace/binding/impl/TracingBindingBrokerWiring.java b/opendaylight/md-sal/mdsal-trace/binding-impl/src/main/java/org/opendaylight/controller/md/sal/trace/binding/impl/TracingBindingBrokerWiring.java
deleted file mode 100644 (file)
index 0876ae0..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2018 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.binding.impl;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterFactory;
-import org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker;
-
-/**
- * Programmatic "wiring" for dependency injection.
- *
- * <p>See org.opendaylight.controller.md.sal.binding.impl.BindingBrokerWiring.
- *
- * @author Michael Vorburger.ch
- */
-public class TracingBindingBrokerWiring {
-
-    private final DataBroker dataBroker;
-    private final DataBroker pingPongDataBroker;
-
-    public TracingBindingBrokerWiring(TracingDOMDataBroker tracingDOMDataBroker,
-            TracingDOMDataBroker tracingPingPongDOMDataBroker, AdapterFactory adapterFactory) {
-
-        dataBroker = adapterFactory.createDataBroker(tracingDOMDataBroker);
-        pingPongDataBroker = adapterFactory.createDataBroker(tracingPingPongDOMDataBroker);
-    }
-
-    public DataBroker getTracingDataBroker() {
-        return dataBroker;
-    }
-
-    public DataBroker getTracingPingPongDataBroker() {
-        return pingPongDataBroker;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/binding-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml b/opendaylight/md-sal/mdsal-trace/binding-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml
deleted file mode 100644 (file)
index 30b8a99..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-  xmlns:ext="http://aries.apache.org/blueprint/xmlns/blueprint-ext/v1.0.0"
-  xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-  odl:use-default-for-reference-types="true">
-
-  <reference id="adapterFactory" interface="org.opendaylight.controller.md.sal.binding.spi.AdapterFactory"/>
-
-  <reference id="tracingDefaultDOMBroker"
-      interface="org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker"
-      odl:type="default" />
-
-  <reference id="tracingPingPongDOMDataBroker"
-      interface="org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker"
-      odl:type="pingpong"/>
-
-  <bean id="wiring" class="org.opendaylight.controller.md.sal.trace.binding.impl.TracingBindingBrokerWiring">
-    <argument ref="tracingDefaultDOMBroker"/>
-    <argument ref="tracingPingPongDOMDataBroker"/>
-    <argument ref="adapterFactory"/>
-  </bean>
-
-  <bean id="tracingBindingDataBroker" factory-ref="wiring" factory-method="getTracingDataBroker" />
-  <service id="tracingBindingDataBrokerSvc" ref="tracingBindingDataBroker"
-           interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
-           odl:type="default" ranking="10"/>
-
-  <bean id="bindingTracingPingPongDataBroker" factory-ref="wiring" factory-method="getTracingPingPongDataBroker" />
-  <service ref="bindingTracingPingPongDataBroker"
-           interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
-           odl:type="pingpong" ranking="10"/>
-</blueprint>
diff --git a/opendaylight/md-sal/mdsal-trace/cli/pom.xml b/opendaylight/md-sal/mdsal-trace/cli/pom.xml
deleted file mode 100644 (file)
index 0337c6d..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright © 2017 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-cli</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>mdsal-trace-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.karaf.shell</groupId>
-      <artifactId>org.apache.karaf.shell.core</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>*</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <extensions>true</extensions>
-        <configuration>
-          <instructions>
-            <Karaf-Commands>*</Karaf-Commands>
-          </instructions>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/cli/src/main/java/org/opendaylight/controller/md/sal/trace/cli/PrintOpenTransactionsCommand.java b/opendaylight/md-sal/mdsal-trace/cli/src/main/java/org/opendaylight/controller/md/sal/trace/cli/PrintOpenTransactionsCommand.java
deleted file mode 100644 (file)
index 3ebd506..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.cli;
-
-import java.util.List;
-import org.apache.karaf.shell.api.action.Action;
-import org.apache.karaf.shell.api.action.Argument;
-import org.apache.karaf.shell.api.action.Command;
-import org.apache.karaf.shell.api.action.lifecycle.Reference;
-import org.apache.karaf.shell.api.action.lifecycle.Service;
-import org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker;
-
-/**
- * Karaf CLI command to dump all open transactions.
- *
- * @author Michael Vorburger.ch
- */
-@Service
-@Command(scope = "trace", name = "transactions",
-    description = "Show all (still) open transactions; including stack trace of creator, "
-    + "if transaction-debug-context-enabled is true in mdsaltrace_config.xml")
-public class PrintOpenTransactionsCommand implements Action {
-
-    @Argument(index = 0, name = "minOpenTransactions", required = false, multiValued = false,
-            description = "Minimum open number of transactions (leaks with fewer are suppressed)")
-    Integer minOpenTransactions = 1;
-
-    @Reference
-    private List<TracingDOMDataBroker> tracingDOMDataBrokers;
-
-    // NB: Do NOT have a non-default constructor for injection of @Reference
-    // Karaf needs a default constructor to create the command - and it works as is.
-
-    @Override
-    @SuppressWarnings("checkstyle:RegexpSingleLineJava")
-    public Object execute() {
-        boolean hasFound = false;
-        for (TracingDOMDataBroker tracingDOMDataBroker : tracingDOMDataBrokers) {
-            hasFound |= tracingDOMDataBroker.printOpenTransactions(System.out, minOpenTransactions);
-        }
-        if (hasFound) {
-            System.out.println(
-                    "Actually did find real leaks with more than " + minOpenTransactions + " open transactions");
-        } else {
-            System.out.println(
-                    "Did not find any real leaks with more than " + minOpenTransactions + " open transactions");
-        }
-        return hasFound;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/deploy-site.xml b/opendaylight/md-sal/mdsal-trace/deploy-site.xml
deleted file mode 100644 (file)
index 6a72564..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=2 tabstop=2: -->
-<!--
-    Copyright (c) 2016 Red Hat and others.  All rights reserved.
-
-    This program and the accompanying materials are made available under the
-    terms of the Eclipse Public License v1.0 which accompanies this distribution,
-    and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <groupId>org.opendaylight.mdsal</groupId>
-  <artifactId>deploy-site</artifactId>
-  <version>0.1.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <properties>
-    <stream>latest</stream>
-    <nexus.site.url>dav:https://nexus.opendaylight.org/content/sites/site/${project.groupId}/${stream}/</nexus.site.url>
-  </properties>
-
-  <build>
-    <extensions>
-      <extension>
-        <groupId>org.apache.maven.wagon</groupId>
-         <artifactId>wagon-webdav-jackrabbit</artifactId>
-         <version>2.9</version>
-      </extension>
-    </extensions>
-
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-site-plugin</artifactId>
-        <version>3.4</version>
-        <configuration>
-          <inputDirectory>${project.build.directory}/staged-site</inputDirectory>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <distributionManagement>
-    <site>
-      <id>opendaylight-site</id>
-      <url>${nexus.site.url}</url>
-    </site>
-  </distributionManagement>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/pom.xml b/opendaylight/md-sal/mdsal-trace/dom-impl/pom.xml
deleted file mode 100644 (file)
index 85127f1..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-dom-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>mdsal-trace-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>sal-broker-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-codec</artifactId>
-    </dependency>
-
-    <!-- Testing Dependencies -->
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>sal-binding-broker-impl</artifactId>
-      <version>${project.version}</version>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>${project.groupId}</groupId>
-      <artifactId>sal-binding-broker-impl</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.truth</groupId>
-      <artifactId>truth</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-    <build>
-    <plugins>
-        <plugin>
-            <groupId>org.apache.felix</groupId>
-            <artifactId>maven-bundle-plugin</artifactId>
-            <extensions>true</extensions>
-            <configuration>
-                <instructions>
-                    <Import-Package>
-                      org.opendaylight.controller.md.sal.dom.broker.impl,
-                      *
-                    </Import-Package>
-                    <Export-Package>org.opendaylight.controller.md.sal.trace.dom.impl</Export-Package>
-                </instructions>
-            </configuration>
-        </plugin>
-    </plugins>
-    </build>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/AbstractCloseTracked.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/AbstractCloseTracked.java
deleted file mode 100644 (file)
index fa27d98..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
-
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Convenience abstract base class for {@link CloseTracked} implementors.
- *
- * @author Michael Vorburger.ch
- */
-public abstract class AbstractCloseTracked<T extends AbstractCloseTracked<T>> implements CloseTracked<T> {
-
-    private final CloseTrackedTrait<T> closeTracker;
-
-    protected AbstractCloseTracked(CloseTrackedRegistry<T> transactionChainRegistry) {
-        this.closeTracker = new CloseTrackedTrait<>(transactionChainRegistry, this);
-    }
-
-    protected void removeFromTrackedRegistry() {
-        closeTracker.removeFromTrackedRegistry();
-    }
-
-    @Override
-    public @Nullable StackTraceElement[] getAllocationContextStackTrace() {
-        return closeTracker.getAllocationContextStackTrace();
-    }
-
-    @Override
-    public final CloseTracked<T> getRealCloseTracked() {
-        return this;
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTracked.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTracked.java
deleted file mode 100644 (file)
index af77e18..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
-
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Object which can track where something has been created, and if it has been correctly "closed".
- *
- * <p>Includes preserving the context of the call stack which created an object, and the instant it was created.
- *
- * @author Michael Vorburger.ch
- */
-public interface CloseTracked<T extends CloseTracked<T>> {
-
-    /**
-     * This returns the allocation context as {@link StackTraceElement}s. NB that
-     * this is a relatively <b>EXPENSIVE</b> operation! You should only ever call
-     * this when you really need to, e.g. when you actually produce output for
-     * humans, but not too early.
-     */
-    // TODO When we're on Java 9, then instead return a StackWalker.StackFrame[] here?
-    @Nullable StackTraceElement[] getAllocationContextStackTrace();
-
-    CloseTracked<T> getRealCloseTracked();
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistry.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistry.java
deleted file mode 100644 (file)
index 7f99824..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
-
-import static java.util.Arrays.asList;
-import static java.util.Collections.emptyList;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentSkipListSet;
-import javax.annotation.concurrent.ThreadSafe;
-
-/**
- * Registry of {@link CloseTracked} instances.
- *
- * @author Michael Vorburger.ch
- */
-@ThreadSafe
-public class CloseTrackedRegistry<T extends CloseTracked<T>> {
-
-    private final Object anchor;
-    private final String createDescription;
-
-    private final Set<CloseTracked<T>> tracked =
-        new ConcurrentSkipListSet<>(Comparator.comparingInt(System::identityHashCode));
-
-    private final boolean isDebugContextEnabled;
-
-    /**
-     * Constructor.
-     *
-     * @param anchor
-     *            object where this registry is stored in, used for human output in
-     *            logging and other output
-     * @param createDescription
-     *            description of creator of instances of this registry, typically
-     *            e.g. name of method in the anchor class
-     * @param isDebugContextEnabled
-     *            whether or not the call stack should be preserved; this is (of
-     *            course) an expensive operation, and should only be used during
-     *            troubleshooting
-     */
-    public CloseTrackedRegistry(Object anchor, String createDescription, boolean isDebugContextEnabled) {
-        this.anchor = anchor;
-        this.createDescription = createDescription;
-        this.isDebugContextEnabled = isDebugContextEnabled;
-    }
-
-    public boolean isDebugContextEnabled() {
-        return isDebugContextEnabled;
-    }
-
-    public Object getAnchor() {
-        return anchor;
-    }
-
-    public String getCreateDescription() {
-        return createDescription;
-    }
-
-    // package protected, not public; only CloseTrackedTrait invokes this
-    void add(CloseTracked<T> closeTracked) {
-        tracked.add(closeTracked);
-    }
-
-    // package protected, not public; only CloseTrackedTrait invokes this
-    void remove(CloseTracked<T> closeTracked) {
-        tracked.remove(closeTracked);
-    }
-
-    /**
-     * Creates and returns a "report" of (currently) tracked but not (yet) closed
-     * instances.
-     *
-     * @return Set of CloseTrackedRegistryReportEntry, of which each the stack trace
-     *         element identifies a unique allocation context (or an empty List if
-     *         debugContextEnabled is false), and value is the number of open
-     *         instances created at that place in the code.
-     */
-    // For some reason, FB sees 'map' as useless but it clearly isn't.
-    @SuppressFBWarnings("UC_USELESS_OBJECT")
-    public Set<CloseTrackedRegistryReportEntry<T>> getAllUnique() {
-        Map<List<StackTraceElement>, Long> map = new HashMap<>();
-        Set<CloseTracked<T>> copyOfTracked = new HashSet<>(tracked);
-        for (CloseTracked<T> closeTracked : copyOfTracked) {
-            final StackTraceElement[] stackTraceArray = closeTracked.getAllocationContextStackTrace();
-            List<StackTraceElement> stackTraceElements =
-                    stackTraceArray != null ? Arrays.asList(stackTraceArray) : Collections.emptyList();
-            map.merge(stackTraceElements, 1L, (oldValue, value) -> oldValue + 1);
-        }
-
-        Set<CloseTrackedRegistryReportEntry<T>> report = new HashSet<>();
-        map.forEach((stackTraceElements, number) -> copyOfTracked.stream().filter(closeTracked -> {
-            StackTraceElement[] closeTrackedStackTraceArray = closeTracked.getAllocationContextStackTrace();
-            List<StackTraceElement> closeTrackedStackTraceElements =
-                closeTrackedStackTraceArray != null ? asList(closeTrackedStackTraceArray) : emptyList();
-            return closeTrackedStackTraceElements.equals(stackTraceElements);
-        }).findAny().ifPresent(exampleCloseTracked -> report.add(
-            new CloseTrackedRegistryReportEntry<>(exampleCloseTracked, number, stackTraceElements))));
-        return report;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistryReportEntry.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedRegistryReportEntry.java
deleted file mode 100644 (file)
index 2dd161a..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import java.util.List;
-
-/**
- * Element of a "report" created by a {@link CloseTrackedRegistry}.
- *
- * @author Michael Vorburger.ch
- */
-public class CloseTrackedRegistryReportEntry<T extends CloseTracked<T>> {
-
-    private final CloseTracked<T> exampleCloseTracked;
-    private final long numberAddedNotRemoved;
-    private final List<StackTraceElement> stackTraceElements;
-
-    public CloseTrackedRegistryReportEntry(CloseTracked<T> exampleCloseTracked, long numberAddedNotRemoved,
-            List<StackTraceElement> stackTraceElements) {
-        this.exampleCloseTracked = requireNonNull(exampleCloseTracked, "closeTracked");
-        this.numberAddedNotRemoved = requireNonNull(numberAddedNotRemoved, "numberAddedNotRemoved");
-        this.stackTraceElements = requireNonNull(stackTraceElements, "stackTraceElements");
-    }
-
-    public long getNumberAddedNotRemoved() {
-        return numberAddedNotRemoved;
-    }
-
-    public CloseTracked<T> getExampleCloseTracked() {
-        return exampleCloseTracked;
-    }
-
-    public List<StackTraceElement> getStackTraceElements() {
-        return stackTraceElements;
-    }
-
-    @Override
-    public String toString() {
-        return "CloseTrackedRegistryReportEntry [numberAddedNotRemoved=" + numberAddedNotRemoved + ", closeTracked="
-                + exampleCloseTracked + ", stackTraceElements.size=" + stackTraceElements.size() + "]";
-    }
-
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedTrait.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/CloseTrackedTrait.java
deleted file mode 100644 (file)
index f96daaa..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Implementation of {@link CloseTracked} which can be used as a field in
- * another class which implements {@link CloseTracked} and delegates its methods
- * to this.
- *
- * <p>This is useful if that class already has another parent class.
- * If it does not, then it's typically more convenient to just extend AbstractCloseTracked.
- *
- * @author Michael Vorburger.ch
- */
-public class CloseTrackedTrait<T extends CloseTracked<T>> implements CloseTracked<T> {
-
-    // NB: It's important that we keep a Throwable here, and not directly the StackTraceElement[] !
-    // This is because creating a new Throwable() is a lot less expensive in terms of runtime overhead
-    // than actually calling its getStackTrace(), which we can delay until we really need to.
-    // see also e.g. https://stackoverflow.com/a/26122232/421602
-    private final @Nullable Throwable allocationContext;
-    private final CloseTrackedRegistry<T> closeTrackedRegistry;
-    private final CloseTracked<T> realCloseTracked;
-
-    @SuppressFBWarnings(value = "NP_STORE_INTO_NONNULL_FIELD", justification = "SpotBugs and JDT annotations")
-    public CloseTrackedTrait(CloseTrackedRegistry<T> transactionChainRegistry, CloseTracked<T> realCloseTracked) {
-        if (transactionChainRegistry.isDebugContextEnabled()) {
-            // NB: We're NOT doing the (expensive) getStackTrace() here just yet (only below)
-            // TODO When we're on Java 9, then instead use the new java.lang.StackWalker API..
-            this.allocationContext = new Throwable();
-        } else {
-            this.allocationContext = null;
-        }
-        this.realCloseTracked = requireNonNull(realCloseTracked, "realCloseTracked");
-        this.closeTrackedRegistry = requireNonNull(transactionChainRegistry, "transactionChainRegistry");
-        this.closeTrackedRegistry.add(this);
-    }
-
-    @Override
-    @SuppressFBWarnings("PZLA_PREFER_ZERO_LENGTH_ARRAYS")
-    public StackTraceElement[] getAllocationContextStackTrace() {
-        return allocationContext != null ? allocationContext.getStackTrace() : null;
-    }
-
-    public void removeFromTrackedRegistry() {
-        closeTrackedRegistry.remove(this);
-    }
-
-    @Override
-    public CloseTracked<T> getRealCloseTracked() {
-        return realCloseTracked;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/package-info.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/closetracker/impl/package-info.java
deleted file mode 100644 (file)
index 60d9674..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-/**
- * Utilities to track (non) "closing" of objects.
- */
-// This generic infra may be moved somewhere else, later
-@org.eclipse.jdt.annotation.NonNullByDefault
-package org.opendaylight.controller.md.sal.trace.closetracker.impl;
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/AbstractTracingWriteTransaction.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/AbstractTracingWriteTransaction.java
deleted file mode 100644 (file)
index a8f21f9..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-abstract class AbstractTracingWriteTransaction implements DOMDataWriteTransaction {
-
-    private final DOMDataWriteTransaction delegate;
-    private final TracingBroker tracingBroker;
-    private final List<String> logs = new ArrayList<>();
-
-    AbstractTracingWriteTransaction(DOMDataWriteTransaction delegate, TracingBroker tracingBroker) {
-        this.delegate = requireNonNull(delegate);
-        this.tracingBroker = requireNonNull(tracingBroker);
-        recordOp(null, null, "instantiate", null);
-    }
-
-    private void recordOp(LogicalDatastoreType store, YangInstanceIdentifier yiid, String method,
-            NormalizedNode<?, ?> node) {
-        if (yiid != null && !tracingBroker.isWriteWatched(yiid, store)) {
-            return;
-        }
-
-        final Object value = node != null ? node.getValue() : null;
-
-        if (value != null && value instanceof ImmutableSet && ((Set<?>)value).isEmpty()) {
-            if (TracingBroker.LOG.isDebugEnabled()) {
-                TracingBroker.LOG.debug("Empty data set write to {}", tracingBroker.toPathString(yiid));
-            }
-        } else {
-            StringBuilder sb = new StringBuilder();
-            sb.append("Method \"").append(method).append('"');
-            if (store != null) {
-                sb.append(" to ").append(store);
-            }
-            if (yiid != null) {
-                sb.append(" at ").append(tracingBroker.toPathString(yiid));
-            }
-            sb.append('.');
-            if (yiid != null) {
-                // If we don’t have an id, we don’t expect data either
-                sb.append(" Data: ");
-                if (node != null) {
-                    sb.append(node.getValue());
-                } else {
-                    sb.append("null");
-                }
-            }
-            sb.append(" Stack:").append(tracingBroker.getStackSummary());
-            synchronized (this) {
-                logs.add(sb.toString());
-            }
-        }
-    }
-
-    private synchronized void logOps() {
-        synchronized (this) {
-            if (TracingBroker.LOG.isWarnEnabled()) {
-                TracingBroker.LOG.warn("Transaction {} contains the following operations:", getIdentifier());
-                logs.forEach(TracingBroker.LOG::warn);
-            }
-            logs.clear();
-        }
-    }
-
-    @Override
-    public void put(LogicalDatastoreType store, YangInstanceIdentifier yiid, NormalizedNode<?, ?> node) {
-        recordOp(store, yiid, "put", node);
-        delegate.put(store, yiid, node);
-    }
-
-    @Override
-    public void merge(LogicalDatastoreType store, YangInstanceIdentifier yiid, NormalizedNode<?, ?> node) {
-        recordOp(store, yiid, "merge", node);
-        delegate.merge(store, yiid, node);
-    }
-
-    @Override
-    public boolean cancel() {
-        synchronized (this) {
-            logs.clear();
-        }
-        return delegate.cancel();
-    }
-
-    @Override
-    public void delete(LogicalDatastoreType store, YangInstanceIdentifier yiid) {
-        recordOp(store, yiid, "delete", null);
-        delegate.delete(store, yiid);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        recordOp(null, null, "commit", null);
-        logOps();
-        return delegate.commit();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    // https://jira.opendaylight.org/browse/CONTROLLER-1792
-
-    @Override
-    public final boolean equals(Object object) {
-        return object == this || delegate.equals(object);
-    }
-
-    @Override
-    public final int hashCode() {
-        return delegate.hashCode();
-    }
-
-    @Override
-    public final String toString() {
-        return getClass().getName() + "; delegate=" + delegate;
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingBroker.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingBroker.java
deleted file mode 100644 (file)
index 8291a72..0000000
+++ /dev/null
@@ -1,441 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.PrintStream;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistryReportEntry;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908.Config;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@SuppressWarnings("checkstyle:JavadocStyle")
-//...because otherwise it whines about the elements in the @code block even though it's completely valid Javadoc
-
-/**
- * TracingBroker logs "write" operations and listener registrations to the md-sal. It logs the instance identifier path,
- * the objects themselves, as well as the stack trace of the call invoking the registration or write operation.
- * It works by operating as a "bump on the stack" between the application and actual DataBroker, intercepting write
- * and registration calls and writing to the log.
- *
- * <p>In addition, it (optionally) can also keep track of the stack trace of all new transaction allocations
- * (including TransactionChains, and transactions created in turn from them), in order to detect and report leaks
- * from transactions which were not closed.
- *
- * <h1>Wiring:</h1>
- * TracingBroker is designed to be easy to use. In fact, for bundles using Blueprint to inject their DataBroker
- * TracingBroker can be used without modifying your code at all in two simple steps:
- * <ol>
- * <li>
- * Simply add the dependency "mdsaltrace-features" to
- * your Karaf pom:
- * <pre>
- * {@code
- *  <dependency>
- *    <groupId>org.opendaylight.controller</groupId>
- *    <artifactId>features-mdsal-trace</artifactId>
- *    <version>1.7.0-SNAPSHOT</version>
- *    <classifier>features</classifier>
- *    <type>xml</type>
- *    <scope>runtime</scope>
- *  </dependency>
- * }
- * </pre>
- * </li>
- * <li>
- * Then just "feature:install odl-mdsal-trace" before you install your "real" feature(s) and you're done.
- * Beware that with Karaf 4 due to <a href="https://bugs.opendaylight.org/show_bug.cgi?id=9068">Bug 9068</a>
- * you'll probably have to use feature:install's --no-auto-refresh flag when installing your "real" feature.
- * </li>
- * </ol>
- * This works because the mdsaltrace-impl bundle registers its service implementing DOMDataBroker with a higher
- * rank than sal-binding-broker. As such, any OSGi service lookup for DataBroker will receive the TracingBroker.
- * <p> </p>
- * <h1>Avoiding log bloat:</h1>
- * TracingBroker can be configured to only print registrations or write ops pertaining to certain subtrees of the
- * md-sal. This can be done in the code via the methods of this class or via a config file. TracingBroker uses a more
- * convenient but non-standard representation of the instance identifiers. Each instance identifier segment's
- * class.getSimpleName() is used separated by a '/'.
- * <p> </p>
- * <h1>Known issues</h1>
- * <ul>
- *     <li>
- *        Filtering by paths. For some registrations the codec that converts back from the DOM to binding paths is
- *        busted. As such, an aproximated path is used in the output. For now it is recommended not to use
- *        watchRegistrations and allow all registrations to be logged.
- *     </li>
- * </ul>
- *
- */
-public class TracingBroker implements TracingDOMDataBroker {
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
-    static final Logger LOG = LoggerFactory.getLogger(TracingBroker.class);
-
-    private static final int STACK_TRACE_FIRST_RELEVANT_FRAME = 2;
-
-    private final String type; // "default" VS "pingpong"
-    private final BindingNormalizedNodeSerializer codec;
-    private final DOMDataBroker delegate;
-    private final List<Watch> registrationWatches = new ArrayList<>();
-    private final List<Watch> writeWatches = new ArrayList<>();
-
-    private final boolean isDebugging;
-    private final CloseTrackedRegistry<TracingTransactionChain> transactionChainsRegistry;
-    private final CloseTrackedRegistry<TracingReadOnlyTransaction> readOnlyTransactionsRegistry;
-    private final CloseTrackedRegistry<TracingWriteTransaction> writeTransactionsRegistry;
-    private final CloseTrackedRegistry<TracingReadWriteTransaction> readWriteTransactionsRegistry;
-
-    private class Watch {
-        final String iidString;
-        final LogicalDatastoreType store;
-
-        Watch(String iidString, LogicalDatastoreType storeOrNull) {
-            this.store = storeOrNull;
-            this.iidString = iidString;
-        }
-
-        private String toIidCompString(YangInstanceIdentifier iid) {
-            StringBuilder builder = new StringBuilder();
-            toPathString(iid, builder);
-            builder.append('/');
-            return builder.toString();
-        }
-
-        private boolean isParent(String parent, String child) {
-            int parentOffset = 0;
-            if (parent.length() > 0 && parent.charAt(0) == '<') {
-                parentOffset = parent.indexOf('>') + 1;
-            }
-
-            int childOffset = 0;
-            if (child.length() > 0 && child.charAt(0) == '<') {
-                childOffset = child.indexOf('>') + 1;
-            }
-
-            return child.startsWith(parent.substring(parentOffset), childOffset);
-        }
-
-        @SuppressWarnings({ "checkstyle:hiddenField", "hiding" })
-        public boolean subtreesOverlap(YangInstanceIdentifier iid, LogicalDatastoreType store) {
-            if (this.store != null && !this.store.equals(store)) {
-                return false;
-            }
-
-            String otherIidString = toIidCompString(iid);
-            return isParent(iidString, otherIidString) || isParent(otherIidString, iidString);
-        }
-
-        @SuppressWarnings({ "checkstyle:hiddenField", "hiding" })
-        public boolean eventIsOfInterest(YangInstanceIdentifier iid, LogicalDatastoreType store) {
-            if (this.store != null && !this.store.equals(store)) {
-                return false;
-            }
-
-            return isParent(iidString, toPathString(iid));
-        }
-    }
-
-    public TracingBroker(String type, DOMDataBroker delegate, Config config, BindingNormalizedNodeSerializer codec) {
-        this.type = requireNonNull(type, "type");
-        this.delegate = requireNonNull(delegate, "delegate");
-        this.codec = requireNonNull(codec, "codec");
-        configure(config);
-
-        if (config.isTransactionDebugContextEnabled() != null) {
-            this.isDebugging = config.isTransactionDebugContextEnabled();
-        } else {
-            this.isDebugging = false;
-        }
-        final String db = "DataBroker";
-        this.transactionChainsRegistry     = new CloseTrackedRegistry<>(db, "createTransactionChain()", isDebugging);
-        this.readOnlyTransactionsRegistry  = new CloseTrackedRegistry<>(db, "newReadOnlyTransaction()", isDebugging);
-        this.writeTransactionsRegistry     = new CloseTrackedRegistry<>(db, "newWriteOnlyTransaction()", isDebugging);
-        this.readWriteTransactionsRegistry = new CloseTrackedRegistry<>(db, "newReadWriteTransaction()", isDebugging);
-    }
-
-    private void configure(Config config) {
-        registrationWatches.clear();
-        List<String> paths = config.getRegistrationWatches();
-        if (paths != null) {
-            for (String path : paths) {
-                watchRegistrations(path, null);
-            }
-        }
-
-        writeWatches.clear();
-        paths = config.getWriteWatches();
-        if (paths != null) {
-            for (String path : paths) {
-                watchWrites(path, null);
-            }
-        }
-    }
-
-    /**
-     * Log registrations to this subtree of the md-sal.
-     * @param iidString the iid path of the root of the subtree
-     * @param store Which LogicalDataStore? or null for both
-     */
-    public void watchRegistrations(String iidString, LogicalDatastoreType store) {
-        LOG.info("Watching registrations to {} in {}", iidString, store);
-        registrationWatches.add(new Watch(iidString, store));
-    }
-
-    /**
-     * Log writes to this subtree of the md-sal.
-     * @param iidString the iid path of the root of the subtree
-     * @param store Which LogicalDataStore? or null for both
-     */
-    public void watchWrites(String iidString, LogicalDatastoreType store) {
-        LOG.info("Watching writes to {} in {}", iidString, store);
-        Watch watch = new Watch(iidString, store);
-        writeWatches.add(watch);
-    }
-
-    private boolean isRegistrationWatched(YangInstanceIdentifier iid, LogicalDatastoreType store) {
-        if (registrationWatches.isEmpty()) {
-            return true;
-        }
-
-        for (Watch regInterest : registrationWatches) {
-            if (regInterest.subtreesOverlap(iid, store)) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    boolean isWriteWatched(YangInstanceIdentifier iid, LogicalDatastoreType store) {
-        if (writeWatches.isEmpty()) {
-            return true;
-        }
-
-        for (Watch watch : writeWatches) {
-            if (watch.eventIsOfInterest(iid, store)) {
-                return true;
-            }
-        }
-
-        return false;
-    }
-
-    static void toPathString(InstanceIdentifier<? extends DataObject> iid, StringBuilder builder) {
-        for (InstanceIdentifier.PathArgument pathArg : iid.getPathArguments()) {
-            builder.append('/').append(pathArg.getType().getSimpleName());
-        }
-    }
-
-    String toPathString(YangInstanceIdentifier  yiid) {
-        StringBuilder sb = new StringBuilder();
-        toPathString(yiid, sb);
-        return sb.toString();
-    }
-
-
-    private void toPathString(YangInstanceIdentifier yiid, StringBuilder sb) {
-        InstanceIdentifier<?> iid = codec.fromYangInstanceIdentifier(yiid);
-        if (null == iid) {
-            reconstructIidPathString(yiid, sb);
-        } else {
-            toPathString(iid, sb);
-        }
-    }
-
-    private void reconstructIidPathString(YangInstanceIdentifier yiid, StringBuilder sb) {
-        sb.append("<RECONSTRUCTED FROM: \"").append(yiid.toString()).append("\">");
-        for (YangInstanceIdentifier.PathArgument pathArg : yiid.getPathArguments()) {
-            if (pathArg instanceof YangInstanceIdentifier.AugmentationIdentifier) {
-                sb.append('/').append("AUGMENTATION");
-                continue;
-            }
-            sb.append('/').append(pathArg.getNodeType().getLocalName());
-        }
-    }
-
-    String getStackSummary() {
-        StackTraceElement[] stack = Thread.currentThread().getStackTrace();
-
-        StringBuilder sb = new StringBuilder();
-        for (int i = STACK_TRACE_FIRST_RELEVANT_FRAME; i < stack.length; i++) {
-            StackTraceElement frame = stack[i];
-            sb.append("\n\t(TracingBroker)\t").append(frame.getClassName()).append('.').append(frame.getMethodName());
-        }
-
-        return sb.toString();
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return new TracingReadWriteTransaction(delegate.newReadWriteTransaction(), this, readWriteTransactionsRegistry);
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return new TracingWriteTransaction(delegate.newWriteOnlyTransaction(), this, writeTransactionsRegistry);
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(TransactionChainListener transactionChainListener) {
-        return new TracingTransactionChain(
-                delegate.createTransactionChain(transactionChainListener), this, transactionChainsRegistry);
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        return new TracingReadOnlyTransaction(delegate.newReadOnlyTransaction(), readOnlyTransactionsRegistry);
-    }
-
-    @Override
-    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-        Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> res = delegate.getSupportedExtensions();
-        DOMDataTreeChangeService treeChangeSvc = (DOMDataTreeChangeService) res.get(DOMDataTreeChangeService.class);
-        if (treeChangeSvc == null) {
-            return res;
-        }
-
-        res = new HashMap<>(res);
-
-        res.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
-            @Override
-            public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-                    DOMDataTreeIdentifier domDataTreeIdentifier, L listener) {
-                if (isRegistrationWatched(domDataTreeIdentifier.getRootIdentifier(),
-                        domDataTreeIdentifier.getDatastoreType())) {
-                    LOG.warn("{} registration (registerDataTreeChangeListener) for {} from {}.",
-                            listener instanceof ClusteredDOMDataTreeChangeListener ? "Clustered" : "Non-clustered",
-                            toPathString(domDataTreeIdentifier.getRootIdentifier()), getStackSummary());
-                }
-                return treeChangeSvc.registerDataTreeChangeListener(domDataTreeIdentifier, listener);
-            }
-        });
-
-        return res;
-    }
-
-    @Override
-    public boolean printOpenTransactions(PrintStream ps, int minOpenTXs) {
-        if (transactionChainsRegistry.getAllUnique().isEmpty()
-            && readOnlyTransactionsRegistry.getAllUnique().isEmpty()
-            && writeTransactionsRegistry.getAllUnique().isEmpty()
-            && readWriteTransactionsRegistry.getAllUnique().isEmpty()) {
-
-            ps.println(type + ": No open transactions, great!");
-            return false;
-        }
-
-        ps.println(type + ": " + getClass().getSimpleName()
-                 + " found some not yet (or never..) closed transaction[chain]s!");
-        ps.println("[NB: If no stack traces are shown below, then "
-                 + "enable transaction-debug-context-enabled in mdsaltrace_config.xml]");
-        ps.println();
-        // Flag to track if we really found any real leaks with more (or equal) to minOpenTXs
-        boolean hasFound = print(readOnlyTransactionsRegistry, ps, "  ", minOpenTXs);
-        hasFound |= print(writeTransactionsRegistry, ps, "  ", minOpenTXs);
-        hasFound |= print(readWriteTransactionsRegistry, ps, "  ", minOpenTXs);
-
-        // Now print details for each non-closed TransactionChain
-        // incl. in turn each ones own read/Write[Only]TransactionsRegistry
-        Set<CloseTrackedRegistryReportEntry<TracingTransactionChain>>
-            entries = transactionChainsRegistry.getAllUnique();
-        if (!entries.isEmpty()) {
-            ps.println("  " + transactionChainsRegistry.getAnchor() + " : "
-                    + transactionChainsRegistry.getCreateDescription());
-        }
-        for (CloseTrackedRegistryReportEntry<TracingTransactionChain> entry : entries) {
-            ps.println("    " + entry.getNumberAddedNotRemoved() + "x TransactionChains opened but not closed here:");
-            printStackTraceElements(ps, "      ", entry.getStackTraceElements());
-            @SuppressWarnings("resource")
-            TracingTransactionChain txChain = (TracingTransactionChain) entry
-                .getExampleCloseTracked().getRealCloseTracked();
-            hasFound |= print(txChain.getReadOnlyTransactionsRegistry(), ps, "        ", minOpenTXs);
-            hasFound |= print(txChain.getWriteTransactionsRegistry(), ps, "        ", minOpenTXs);
-            hasFound |= print(txChain.getReadWriteTransactionsRegistry(), ps, "        ", minOpenTXs);
-        }
-        ps.println();
-
-        return hasFound;
-    }
-
-    private <T extends CloseTracked<T>> boolean print(
-            CloseTrackedRegistry<T> registry, PrintStream ps, String indent, int minOpenTransactions) {
-        Set<CloseTrackedRegistryReportEntry<T>> unsorted = registry.getAllUnique();
-        if (unsorted.size() < minOpenTransactions) {
-            return false;
-        }
-
-        List<CloseTrackedRegistryReportEntry<T>> entries = new ArrayList<>(unsorted);
-        entries.sort((o1, o2) -> Long.compare(o2.getNumberAddedNotRemoved(), o1.getNumberAddedNotRemoved()));
-
-        if (!entries.isEmpty()) {
-            ps.println(indent + registry.getAnchor() + " : " + registry.getCreateDescription());
-        }
-        entries.forEach(entry -> {
-            ps.println(indent + "  " + entry.getNumberAddedNotRemoved()
-                + "x transactions opened here, which are not closed:");
-            printStackTraceElements(ps, indent + "    ", entry.getStackTraceElements());
-        });
-        if (!entries.isEmpty()) {
-            ps.println();
-        }
-        return true;
-    }
-
-    private void printStackTraceElements(PrintStream ps, String indent, List<StackTraceElement> stackTraceElements) {
-        boolean ellipsis = false;
-        for (final StackTraceElement stackTraceElement : stackTraceElements) {
-            if (isStackTraceElementInteresting(stackTraceElement)) {
-                ps.println(indent + stackTraceElement);
-                ellipsis = false;
-            } else if (!ellipsis) {
-                ps.println(indent + "(...)");
-                ellipsis = true;
-            }
-        }
-    }
-
-    private boolean isStackTraceElementInteresting(StackTraceElement element) {
-        final String className = element.getClassName();
-        return !className.startsWith(getClass().getPackage().getName())
-            && !className.startsWith(CloseTracked.class.getPackage().getName())
-            && !className.startsWith("Proxy")
-            && !className.startsWith("akka")
-            && !className.startsWith("scala")
-            && !className.startsWith("sun.reflect")
-            && !className.startsWith("java.lang.reflect")
-            && !className.startsWith("org.apache.aries.blueprint")
-            && !className.startsWith("org.osgi.util.tracker");
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadOnlyTransaction.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadOnlyTransaction.java
deleted file mode 100644 (file)
index 56e8d90..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.AbstractCloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-class TracingReadOnlyTransaction
-        extends AbstractCloseTracked<TracingReadOnlyTransaction>
-        implements DOMDataReadOnlyTransaction {
-
-    private final DOMDataReadOnlyTransaction delegate;
-
-    TracingReadOnlyTransaction(DOMDataReadOnlyTransaction delegate,
-            CloseTrackedRegistry<TracingReadOnlyTransaction> readOnlyTransactionsRegistry) {
-        super(readOnlyTransactionsRegistry);
-        this.delegate = delegate;
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(LogicalDatastoreType store,
-            YangInstanceIdentifier path) {
-        return delegate.read(store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store, YangInstanceIdentifier path) {
-        return delegate.exists(store, path);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-        super.removeFromTrackedRegistry();
-    }
-
-
-    // https://jira.opendaylight.org/browse/CONTROLLER-1792
-
-    @Override
-    public final boolean equals(Object object) {
-        return object == this || delegate.equals(object);
-    }
-
-    @Override
-    public final int hashCode() {
-        return delegate.hashCode();
-    }
-
-    @Override
-    public final String toString() {
-        return getClass().getName() + "; delegate=" + delegate;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadWriteTransaction.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingReadWriteTransaction.java
deleted file mode 100644 (file)
index 82c1a3b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Objects;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedTrait;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-class TracingReadWriteTransaction
-    extends AbstractTracingWriteTransaction
-        implements DOMDataReadWriteTransaction, CloseTracked<TracingReadWriteTransaction> {
-
-    private final CloseTrackedTrait<TracingReadWriteTransaction> closeTracker;
-    private final DOMDataReadWriteTransaction delegate;
-
-    TracingReadWriteTransaction(DOMDataReadWriteTransaction delegate, TracingBroker tracingBroker,
-            CloseTrackedRegistry<TracingReadWriteTransaction> readWriteTransactionsRegistry) {
-        super(delegate, tracingBroker);
-        this.closeTracker = new CloseTrackedTrait<>(readWriteTransactionsRegistry, this);
-        this.delegate = Objects.requireNonNull(delegate);
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
-                                                            LogicalDatastoreType store, YangInstanceIdentifier yiid) {
-        return delegate.read(store, yiid);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store, YangInstanceIdentifier yiid) {
-        return delegate.exists(store, yiid);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        closeTracker.removeFromTrackedRegistry();
-        return super.commit();
-    }
-
-    @Override
-    public boolean cancel() {
-        closeTracker.removeFromTrackedRegistry();
-        return super.cancel();
-    }
-
-    @Override
-    public StackTraceElement[] getAllocationContextStackTrace() {
-        return closeTracker.getAllocationContextStackTrace();
-    }
-
-    @Override
-    public CloseTracked<TracingReadWriteTransaction> getRealCloseTracked() {
-        return this;
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingTransactionChain.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingTransactionChain.java
deleted file mode 100644 (file)
index 2e5070f..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import java.util.Objects;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.AbstractCloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-
-class TracingTransactionChain extends AbstractCloseTracked<TracingTransactionChain> implements DOMTransactionChain {
-
-    private final DOMTransactionChain delegate;
-    private final TracingBroker tracingBroker;
-    private final CloseTrackedRegistry<TracingReadOnlyTransaction> readOnlyTransactionsRegistry;
-    private final CloseTrackedRegistry<TracingWriteTransaction> writeTransactionsRegistry;
-    private final CloseTrackedRegistry<TracingReadWriteTransaction> readWriteTransactionsRegistry;
-
-    TracingTransactionChain(DOMTransactionChain delegate, TracingBroker tracingBroker,
-            CloseTrackedRegistry<TracingTransactionChain> transactionChainsRegistry) {
-        super(transactionChainsRegistry);
-        this.delegate = Objects.requireNonNull(delegate);
-        this.tracingBroker = Objects.requireNonNull(tracingBroker);
-
-        final boolean isDebug = transactionChainsRegistry.isDebugContextEnabled();
-        String anchor = "TransactionChain@" + Integer.toHexString(hashCode());
-        this.readOnlyTransactionsRegistry  = new CloseTrackedRegistry<>(anchor, "newReadOnlyTransaction()", isDebug);
-        this.writeTransactionsRegistry     = new CloseTrackedRegistry<>(anchor, "newWriteOnlyTransaction()", isDebug);
-        this.readWriteTransactionsRegistry = new CloseTrackedRegistry<>(anchor, "newReadWriteTransaction()", isDebug);
-    }
-
-    @Override
-    @SuppressWarnings("resource")
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        final DOMDataReadOnlyTransaction tx = delegate.newReadOnlyTransaction();
-        return new TracingReadOnlyTransaction(tx, readOnlyTransactionsRegistry);
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return new TracingReadWriteTransaction(delegate.newReadWriteTransaction(), tracingBroker,
-                readWriteTransactionsRegistry);
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        final DOMDataWriteTransaction tx = delegate.newWriteOnlyTransaction();
-        return new TracingWriteTransaction(tx, tracingBroker, writeTransactionsRegistry);
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-        super.removeFromTrackedRegistry();
-    }
-
-    public CloseTrackedRegistry<TracingReadOnlyTransaction> getReadOnlyTransactionsRegistry() {
-        return readOnlyTransactionsRegistry;
-    }
-
-    public CloseTrackedRegistry<TracingReadWriteTransaction> getReadWriteTransactionsRegistry() {
-        return readWriteTransactionsRegistry;
-    }
-
-    public CloseTrackedRegistry<TracingWriteTransaction> getWriteTransactionsRegistry() {
-        return writeTransactionsRegistry;
-    }
-
-
-    // https://jira.opendaylight.org/browse/CONTROLLER-1792
-
-    @Override
-    public final boolean equals(Object object) {
-        return object == this || delegate.equals(object);
-    }
-
-    @Override
-    public final int hashCode() {
-        return delegate.hashCode();
-    }
-
-    @Override
-    public final String toString() {
-        return getClass().getName() + "; delegate=" + delegate;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingWriteTransaction.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/java/org/opendaylight/controller/md/sal/trace/dom/impl/TracingWriteTransaction.java
deleted file mode 100644 (file)
index 5fe313f..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.dom.impl;
-
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedTrait;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-
-class TracingWriteTransaction extends AbstractTracingWriteTransaction
-        implements CloseTracked<TracingWriteTransaction> {
-
-    private final CloseTrackedTrait<TracingWriteTransaction> closeTracker;
-
-    TracingWriteTransaction(DOMDataWriteTransaction delegate, TracingBroker tracingBroker,
-            CloseTrackedRegistry<TracingWriteTransaction> writeTransactionsRegistry) {
-        super(delegate, tracingBroker);
-        this.closeTracker = new CloseTrackedTrait<>(writeTransactionsRegistry, this);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        closeTracker.removeFromTrackedRegistry();
-        return super.commit();
-    }
-
-    @Override
-    public boolean cancel() {
-        closeTracker.removeFromTrackedRegistry();
-        return super.cancel();
-    }
-
-    @Override
-    public StackTraceElement[] getAllocationContextStackTrace() {
-        return closeTracker.getAllocationContextStackTrace();
-    }
-
-    @Override
-    public CloseTracked<TracingWriteTransaction> getRealCloseTracked() {
-        return this;
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml b/opendaylight/md-sal/mdsal-trace/dom-impl/src/main/resources/OSGI-INF/blueprint/impl-blueprint.xml
deleted file mode 100644 (file)
index 95f4e1f..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-  xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-  odl:use-default-for-reference-types="true">
-
-  <odl:clustered-app-config id="mdsalConfig"
-                            binding-class="org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908.Config">
-  </odl:clustered-app-config>
-
-  <reference id="codec"
-        interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"
-        odl:type="default" />
-
-  <!-- Tracing Binding DataBroker -->
-
-  <reference id="realDefaultDOMBroker"
-      interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-      odl:type="default" />
-
-  <bean id="tracingDefaultDOMBroker" class="org.opendaylight.controller.md.sal.trace.dom.impl.TracingBroker">
-      <argument value="default" />
-      <argument ref="realDefaultDOMBroker" />
-      <argument ref="mdsalConfig" />
-      <argument ref="codec" />
-  </bean>
-
-  <service id="tracingDefaultDOMBrokerSvc" ref="tracingDefaultDOMBroker" ranking="10" odl:type="default">
-    <interfaces>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMDataBroker</value>
-      <value>org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker</value>
-    </interfaces>
-  </service>
-
-  <!-- Tracing Binding PingPong DataBroker -->
-
-  <reference id="realPingPongDOMDataBroker"
-      interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-      odl:type="pingpong"/>
-
-  <bean id="tracingPingPongDOMBroker" class="org.opendaylight.controller.md.sal.trace.dom.impl.TracingBroker">
-      <argument value="pingpong" />
-      <argument ref="realPingPongDOMDataBroker" />
-      <argument ref="mdsalConfig" />
-      <argument ref="codec" />
-  </bean>
-
-  <service id="tracingPingPongDOMBrokerSvc" ref="tracingPingPongDOMBroker" ranking="10" odl:type="pingpong">
-    <interfaces>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMDataBroker</value>
-      <value>org.opendaylight.controller.md.sal.trace.api.TracingDOMDataBroker</value>
-    </interfaces>
-  </service>
-</blueprint>
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/CloseTrackedRegistryTest.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/CloseTrackedRegistryTest.java
deleted file mode 100644 (file)
index c91080c..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.tests;
-
-import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.fail;
-
-import java.util.Set;
-import java.util.function.Predicate;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.AbstractCloseTracked;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistry;
-import org.opendaylight.controller.md.sal.trace.closetracker.impl.CloseTrackedRegistryReportEntry;
-
-public class CloseTrackedRegistryTest {
-
-    private static class SomethingClosable extends AbstractCloseTracked<SomethingClosable> implements AutoCloseable {
-        SomethingClosable(CloseTrackedRegistry<SomethingClosable> transactionChainRegistry) {
-            super(transactionChainRegistry);
-        }
-
-        @Override
-        public void close() {
-            removeFromTrackedRegistry();
-        }
-    }
-
-    @Test
-    public void testDuplicateAllocationContexts() {
-        final CloseTrackedRegistry<SomethingClosable> registry =
-                new CloseTrackedRegistry<>(this, "testDuplicateAllocationContexts", true);
-
-        for (int i = 0; i < 100; i++) {
-            SomethingClosable isClosedManyTimes = new SomethingClosable(registry);
-            isClosedManyTimes.close();
-            someOtherMethodWhichDoesNotClose(registry);
-        }
-        @SuppressWarnings({ "resource", "unused" })
-        SomethingClosable forgotToCloseOnce = new SomethingClosable(registry);
-
-        Set<CloseTrackedRegistryReportEntry<SomethingClosable>> uniqueNonClosed = registry.getAllUnique();
-        assertThat(uniqueNonClosed).hasSize(2);
-        assertThatIterableContains(uniqueNonClosed, entry ->
-            entry.getNumberAddedNotRemoved() == 100 || entry.getNumberAddedNotRemoved() == 1);
-        uniqueNonClosed.forEach(entry -> {
-            if (entry.getNumberAddedNotRemoved() == 100) {
-                assertThatIterableContains(entry.getStackTraceElements(),
-                    element -> element.getMethodName().equals("someOtherMethodWhichDoesNotClose"));
-            } else if (entry.getNumberAddedNotRemoved() == 1) {
-                assertThatIterableContains(entry.getStackTraceElements(),
-                    element -> element.getMethodName().equals("testDuplicateAllocationContexts"));
-            } else {
-                fail("Unexpected number of added, not removed: " + entry.getNumberAddedNotRemoved());
-            }
-        });
-    }
-
-    // Something like this really should be in Google Truth...
-    private <T> void assertThatIterableContains(Iterable<T> iterable, Predicate<T> predicate) {
-        for (T element : iterable) {
-            if (predicate.test(element)) {
-                return;
-            }
-        }
-        fail("Iterable did not contain any element matching predicate");
-    }
-
-    @SuppressWarnings({ "resource", "unused" })
-    private void someOtherMethodWhichDoesNotClose(CloseTrackedRegistry<SomethingClosable> registry) {
-        new SomethingClosable(registry);
-    }
-
-    @Test
-    @SuppressWarnings({ "unused", "resource" })
-    public void testDebugContextDisabled() {
-        final CloseTrackedRegistry<SomethingClosable> debugContextDisabledRegistry =
-                new CloseTrackedRegistry<>(this, "testDebugContextDisabled", false);
-
-        SomethingClosable forgotToCloseOnce = new SomethingClosable(debugContextDisabledRegistry);
-
-        Set<CloseTrackedRegistryReportEntry<SomethingClosable>>
-            closeRegistryReport = debugContextDisabledRegistry.getAllUnique();
-        assertThat(closeRegistryReport).hasSize(1);
-
-        CloseTrackedRegistryReportEntry<SomethingClosable>
-            closeRegistryReportEntry1 = closeRegistryReport.iterator().next();
-        assertThat(closeRegistryReportEntry1.getNumberAddedNotRemoved()).isEqualTo(1);
-        assertThat(closeRegistryReportEntry1.getStackTraceElements()).isEmpty();
-    }
-}
diff --git a/opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/TracingBrokerTest.java b/opendaylight/md-sal/mdsal-trace/dom-impl/src/test/java/org/opendaylight/controller/md/sal/trace/tests/TracingBrokerTest.java
deleted file mode 100644 (file)
index 6e06279..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.trace.tests;
-
-import static com.google.common.truth.Truth.assertThat;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Collections.singletonList;
-import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
-import static org.mockito.Mockito.mock;
-
-import java.io.ByteArrayOutputStream;
-import java.io.PrintStream;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.test.DataBrokerTestModule;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.md.sal.trace.dom.impl.TracingBroker;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908.Config;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.mdsaltrace.rev160908.ConfigBuilder;
-
-/**
- * Test of {@link TracingBroker}.
- *
- * @author Michael Vorburger.ch
- */
-public class TracingBrokerTest {
-
-    @Test
-    public void testEnd2End() {
-        DataBrokerTestModule wiring = new DataBrokerTestModule(true);
-        wiring.getDataBroker(); // required so DataBrokerTestModule creates the DOMDataBroker
-        DOMDataBroker realDomBroker = wiring.getDOMDataBroker();
-        TracingBroker tracingDomBroker = new TracingBroker("Test", realDomBroker, new ConfigBuilder()
-                // CONTROLLER-1877: configure it like the default/initial mdsaltrace_config.xml in mdsal-trace-api
-                .setTransactionDebugContextEnabled(true)
-                .setWriteWatches(singletonList("/this/will/never/exist"))
-                .setRegistrationWatches(singletonList("/this/will/never/exist"))
-                .build(),
-                wiring.getBindingToNormalizedNodeCodec());
-        tracingDomBroker.newWriteOnlyTransaction().cancel();
-    }
-
-    @Test
-    @SuppressWarnings({ "resource", "unused" }) // Finding resource leaks is the point of this test
-    public void testPrintOpenTransactions() {
-        DOMDataBroker domDataBroker = mock(DOMDataBroker.class, RETURNS_DEEP_STUBS);
-        Config config = new ConfigBuilder().setTransactionDebugContextEnabled(true).build();
-        BindingNormalizedNodeSerializer codec = mock(BindingNormalizedNodeSerializer.class);
-        TracingBroker tracingBroker = new TracingBroker("mock", domDataBroker, config, codec);
-
-        for (int i = 0; i < 3; i++) {
-            DOMDataReadWriteTransaction tx = tracingBroker.newReadWriteTransaction();
-        }
-        DOMDataReadWriteTransaction anotherTx = tracingBroker.newReadWriteTransaction();
-
-        DOMTransactionChain txChain = tracingBroker.createTransactionChain(null);
-        DOMDataReadWriteTransaction txFromChain = txChain.newReadWriteTransaction();
-
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        PrintStream ps = new PrintStream(baos);
-        boolean printReturnValue = tracingBroker.printOpenTransactions(ps, 1);
-        String output = new String(baos.toByteArray(), UTF_8);
-
-        assertThat(printReturnValue).isTrue();
-        // Assert expectations about stack trace
-        assertThat(output).contains("testPrintOpenTransactions(TracingBrokerTest.java");
-        assertThat(output).doesNotContain(TracingBroker.class.getName());
-
-        String previousLine = "";
-        for (String line : output.split("\n")) {
-            if (line.contains("(...")) {
-                assertThat(previousLine.contains("(...)")).isFalse();
-            }
-            previousLine = line;
-        }
-
-        // assert that the sorting works - the x3 is shown before the x1
-        assertThat(output).contains("  DataBroker : newReadWriteTransaction()\n    3x");
-
-        // We don't do any verify/times on the mocks,
-        // because the main point of the test is just to verify that
-        // printOpenTransactions runs through without any exceptions
-        // (e.g. it used to have a ClassCastException).
-    }
-
-}
diff --git a/opendaylight/md-sal/mdsal-trace/features/features-mdsal-trace/pom.xml b/opendaylight/md-sal/mdsal-trace/features/features-mdsal-trace/pom.xml
deleted file mode 100644 (file)
index 652f7a0..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>feature-repo-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>features-mdsal-trace</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <name>OpenDaylight :: TracingBroker</name>
-    <packaging>feature</packaging>
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-mdsal-trace</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/pom.xml b/opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/pom.xml
deleted file mode 100644 (file)
index a71bac9..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>single-feature-parent</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>odl-mdsal-trace</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <name>OpenDaylight :: TracingBroker</name>
-    <packaging>feature</packaging>
-
-    <dependencyManagement>
-        <dependencies>
-            <dependency>
-                <groupId>org.opendaylight.controller</groupId>
-                <artifactId>mdsal-artifacts</artifactId>
-                <version>${project.version}</version>
-                <type>pom</type>
-                <scope>import</scope>
-            </dependency>
-        </dependencies>
-
-    </dependencyManagement>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-mdsal-broker</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>odl-mdsal-broker-local</artifactId>
-            <version>${project.version}</version>
-            <type>xml</type>
-            <classifier>features</classifier>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-api</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-dom-impl</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-binding-impl</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>mdsal-trace-cli</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-    </dependencies>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/src/main/feature/feature.xml b/opendaylight/md-sal/mdsal-trace/features/odl-mdsal-trace/src/main/feature/feature.xml
deleted file mode 100644 (file)
index 8e6070b..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<features name="odl-mdsal-trace-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0">
-    <feature name="odl-mdsal-trace" version="${project.version}">
-        <configfile finalname="etc/opendaylight/datastore/initial/config/mdsaltrace_config.xml">mvn:org.opendaylight.controller/mdsal-trace-api/${project.version}/xml/config</configfile>
-    </feature>
-</features>
diff --git a/opendaylight/md-sal/mdsal-trace/features/pom.xml b/opendaylight/md-sal/mdsal-trace/features/pom.xml
deleted file mode 100644 (file)
index 06d6524..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-feature-aggregator</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>features-mdsal-trace</module>
-    <module>odl-mdsal-trace</module>
-  </modules>
-</project>
diff --git a/opendaylight/md-sal/mdsal-trace/pom.xml b/opendaylight/md-sal/mdsal-trace/pom.xml
deleted file mode 100644 (file)
index dc94c25..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright © 2016 Red Hat and others. All rights reserved.
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>mdsal-trace-aggregator</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <name>mdsaltrace</name>
-  <packaging>pom</packaging>
-
-  <modules>
-    <module>api</module>
-    <module>dom-impl</module>
-    <module>binding-impl</module>
-    <module>cli</module>
-    <module>features</module>
-  </modules>
-
-  <!-- DO NOT install or deploy the repo root pom as it's only needed to initiate a build -->
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-deploy-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-install-plugin</artifactId>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <profiles>
-    <profile>
-      <!--
-          This profile is to ensure we only build javadocs reports
-          when we plan to deploy Maven site for our project.
-      -->
-      <id>maven-site</id>
-      <activation>
-        <file>
-          <exists>${user.dir}/deploy-site.xml</exists>
-        </file>
-      </activation>
-
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-javadoc-plugin</artifactId>
-            <inherited>false</inherited>
-            <executions>
-              <execution>
-                <id>aggregate</id>
-                <goals>
-                  <goal>aggregate</goal>
-                </goals>
-                <phase>package</phase>
-            </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-
-</project>
diff --git a/opendaylight/md-sal/messagebus-api/pom.xml b/opendaylight/md-sal/messagebus-api/pom.xml
deleted file mode 100644 (file)
index 5cfe535..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <relativePath>../parent</relativePath>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>messagebus-api</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>bundle</packaging>
-    <name>${project.artifactId}</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-inventory</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>yang-ext</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>ietf-topology</artifactId>
-        </dependency>
-    </dependencies>
-
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.felix</groupId>
-                <artifactId>maven-bundle-plugin</artifactId>
-                <extensions>true</extensions>
-                <configuration>
-                    <instructions>
-                        <Export-Package>org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.*</Export-Package>
-                    </instructions>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-
-</project>
diff --git a/opendaylight/md-sal/messagebus-api/src/main/yang/event-aggregator.yang b/opendaylight/md-sal/messagebus-api/src/main/yang/event-aggregator.yang
deleted file mode 100644 (file)
index ad7b573..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-module event-aggregator {
-    // FIXME: this module needs to be split up to concepts and API
-    //        as the concepts are shared with the other model in this
-    //        package.
-    yang-version 1;
-    namespace "urn:cisco:params:xml:ns:yang:messagebus:eventaggregator";
-    prefix "eventaggregator";
-
-    organization "Cisco Systems, Inc.";
-    contact "Robert Gallas";
-
-    description
-        "Module implementing message but RPC.
-
-        Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
-
-        This program and the accompanying materials are made available
-        under the terms of the Eclipse Public License v1.0 which
-        accompanies this distribution, and is available at
-        http://www.eclipse.org/legal/epl-v10.html";
-
-    revision "2014-12-02" {
-        description "Initial revision";
-    }
-
-    typedef pattern {
-        type string {
-            length 1..max;
-        }
-
-        // FIXME: make this a regular expression
-        description "A match pattern. Specifically this is a wildcard pattern.";
-    }
-
-    typedef notification-pattern {
-        type pattern;
-        description
-            "Pattern for matching candidate notification types. This pattern is to be
-            applied against the concatenation of the namespace of the module which
-            defines that particular notification, followed by a single colon, and
-            then followed by notification identifier, as supplied in the argument to
-            the notification statement.";
-    }
-
-    typedef topic-id {
-        type string {
-            length 1..max;
-        }
-        description
-            "A topic identifier. It uniquely defines a topic as seen by the the user
-            of this model's RPCs";
-    }
-
-    // FIXME: we would really like to share instances here, but that requires some sort
-    //        of sane reference counting. The reason for sharing is the data path part
-    //        of notification delivery -- multiple creators of topics can still share
-    //        a single data path.
-    rpc create-topic {
-        description
-            "Create a new topic. A topic is an aggregation of several notification
-            types from a set of nodes. Each successful invocation results in a unique
-            topic being created. The caller is responsible for removing the topic
-            once it is no longer needed.";
-
-        input {
-            leaf notification-pattern {
-                type notification-pattern;
-                mandatory true;
-                description
-                    "Pattern matching notification which should be forwarded into this
-                    topic.";
-            }
-
-            leaf node-id-pattern {
-                type pattern;
-                mandatory true;
-                description
-                    "Pattern for matching candidate event source nodes when looking
-                    for contributors to the topic. The pattern will be applied against
-                    /network-topology/topology/node/node-id";
-            }
-        }
-
-        output {
-            leaf topic-id {
-                type topic-id;
-                mandatory true;
-            }
-        }
-    }
-
-    rpc destroy-topic {
-        description
-            "Destroy a topic. No further messages will be delivered to it.";
-
-        input {
-            leaf topic-id {
-                type topic-id;
-                mandatory true;
-            }
-        }
-    }
-
-    notification topic-notification {
-        description
-            "Notification of an event occuring on a particular node. This notification
-            acts as an encapsulation for the event being delivered.";
-
-        leaf topic-id {
-            type topic-id;
-            mandatory true;
-            description
-                "Topic to which this event is being delivered.";
-        }
-
-        leaf node-id {
-            // FIXME: should be topology node ID
-            type string;
-            mandatory true;
-            description
-                "Node ID of the node which generated the event.";
-        }
-
-        anyxml payload {
-            mandatory true;
-            description
-                "Encapsulated notification. The format is the XML representation of
-                a notification according to RFC6020 section 7.14.2.";
-        }
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-api/src/main/yang/event-source.yang b/opendaylight/md-sal/messagebus-api/src/main/yang/event-source.yang
deleted file mode 100644 (file)
index c90b266..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-module event-source {
-    yang-version 1;
-    namespace "urn:cisco:params:xml:ns:yang:messagebus:eventsource";
-    prefix "eventsource";
-
-    import event-aggregator { prefix aggr; }
-    import network-topology { prefix nt; revision-date "2013-10-21"; }
-    import opendaylight-inventory {prefix inv; revision-date "2013-08-19"; }
-    import yang-ext {prefix ext; revision-date "2013-07-09"; }
-
-    organization "Cisco Systems, Inc.";
-    contact "Robert Gallas";
-
-    description
-        "Base model for a topology where individual nodes can produce events.
-
-        Module implementing event source topology and encapped notification.
-
-        Copyright (c)2014 Cisco Systems, Inc. All rights reserved.
-
-        This program and the accompanying materials are made available
-        under the terms of the Eclipse Public License v1.0 which
-        accompanies this distribution, and is available at
-        http://www.eclipse.org/legal/epl-v10.html";
-
-    revision "2014-12-02" {
-        description "first revision
-            + add rpc dis-join-topic
-            + add notification event-source-status-notification";
-    }
-
-    // FIXME: expand this
-    typedef join-topic-status {
-        type enumeration {
-            enum up;
-            enum down;
-        }
-        description "Object status";
-    }
-
-    // FIXME: migrate to topology
-    typedef node-ref {
-        type leafref {
-            path "/inv:nodes/inv:node/inv:id";
-        }
-    }
-
-    typedef event-source-status {
-        type enumeration {
-            enum active;
-            enum inactive;
-            enum deactive;
-        }
-        description "Status of event source
-                       - active: event source is publishing notification,
-                       - inactive: event source stopped publishing of notifications temporarily
-                       - deactive: event source stopped publishing of notifications permanently" ;
-    }
-
-    grouping topology-event-source-type {
-        container topology-event-source {
-            presence "indicates an event source-aware topology";
-        }
-    }
-
-    rpc join-topic {
-        input {
-            leaf node {
-               ext:context-reference "inv:node-context";
-               type "instance-identifier";
-            }
-            leaf topic-id {
-                type aggr:topic-id;
-                description "in current implementation notification-pattern is defined by topic-id.
-                             By persisting topic definition we could omit notification-pattern";
-            }
-            leaf notification-pattern {
-                type aggr:notification-pattern;
-            }
-        }
-
-        output {
-            leaf status {
-                type join-topic-status;
-            }
-        }
-    }
-
-    rpc dis-join-topic {
-        input {
-            leaf node {
-               ext:context-reference "inv:node-context";
-               type "instance-identifier";
-            }
-            leaf topic-id {
-                type aggr:topic-id;
-                mandatory true;
-                description "identifier of topic to be disjoin";
-            }
-        }
-
-    }
-
-    notification event-source-status-notification {
-
-        description
-            "Notification of change event source status.";
-
-        leaf status {
-            type event-source-status;
-            mandatory true;
-            description "Current status of event source.";
-        }
-
-    }
-
-    augment "/nt:network-topology/nt:topology/nt:topology-types" {
-        uses topology-event-source-type;
-    }
-
-    augment "/nt:network-topology/nt:topology/nt:node" {
-        when "../../nt:topology-types/topology-event-source";
-        leaf event-source-node {
-            type node-ref;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-impl/pom.xml b/opendaylight/md-sal/messagebus-impl/pom.xml
deleted file mode 100644 (file)
index 02380f4..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <relativePath>../parent</relativePath>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <version>1.10.0-SNAPSHOT</version>
-    <artifactId>messagebus-impl</artifactId>
-    <name>${project.artifactId}</name>
-    <packaging>bundle</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-binding-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-util</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-spi</artifactId>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImpl.java b/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImpl.java
deleted file mode 100644 (file)
index dac4f8f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-class EventSourceRegistrationImpl<T extends EventSource> extends AbstractObjectRegistration<T>
-        implements EventSourceRegistration<T> {
-
-    private final EventSourceTopology eventSourceTopology;
-
-    /**
-     * Constructor.
-     *
-     * @param instance of EventSource that has been registered by
-     *     {@link EventSourceRegistryImpl#registerEventSource(Node, EventSource)}
-     */
-    EventSourceRegistrationImpl(T instance, EventSourceTopology eventSourceTopology) {
-        super(instance);
-        this.eventSourceTopology = Preconditions.checkNotNull(eventSourceTopology);
-    }
-
-    @Override
-    protected void removeRegistration() {
-        this.eventSourceTopology.unRegister(getInstance());
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopic.java b/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopic.java
deleted file mode 100644 (file)
index 6c9e17b..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.UUID;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.ExecutionException;
-import java.util.regex.Pattern;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.DataTreeModification;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.DisJoinTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.LoggerFactory;
-
-public final class EventSourceTopic implements DataTreeChangeListener<Node>, AutoCloseable {
-    private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(EventSourceTopic.class);
-    private final NotificationPattern notificationPattern;
-    private final EventSourceService sourceService;
-    private final Pattern nodeIdPattern;
-    private final TopicId topicId;
-    private ListenerRegistration<?> listenerRegistration;
-    private final CopyOnWriteArraySet<InstanceIdentifier<?>> joinedEventSources = new CopyOnWriteArraySet<>();
-
-    public static EventSourceTopic create(final NotificationPattern notificationPattern,
-            final String nodeIdRegexPattern, final EventSourceTopology eventSourceTopology) {
-        final EventSourceTopic est = new EventSourceTopic(notificationPattern, nodeIdRegexPattern,
-                eventSourceTopology.getEventSourceService());
-        est.registerListner(eventSourceTopology);
-        est.notifyExistingNodes(eventSourceTopology);
-        return est;
-    }
-
-    private EventSourceTopic(final NotificationPattern notificationPattern, final String nodeIdRegexPattern,
-            final EventSourceService sourceService) {
-        this.notificationPattern = requireNonNull(notificationPattern);
-        this.sourceService = requireNonNull(sourceService);
-        this.nodeIdPattern = Pattern.compile(nodeIdRegexPattern);
-        this.topicId = new TopicId(getUUIDIdent());
-        this.listenerRegistration = null;
-        LOG.info("EventSourceTopic created - topicId {}", topicId.getValue());
-    }
-
-    public TopicId getTopicId() {
-        return topicId;
-    }
-
-    @Override
-    public void onDataTreeChanged(final Collection<DataTreeModification<Node>> changes) {
-        for (DataTreeModification<Node> change: changes) {
-            final DataObjectModification<Node> rootNode = change.getRootNode();
-            switch (rootNode.getModificationType()) {
-                case WRITE:
-                case SUBTREE_MODIFIED:
-                    final Node node = rootNode.getDataAfter();
-                    if (getNodeIdRegexPattern().matcher(node.getNodeId().getValue()).matches()) {
-                        notifyNode(change.getRootPath().getRootIdentifier());
-                    }
-                    break;
-                default:
-                    break;
-            }
-        }
-    }
-
-    public void notifyNode(final InstanceIdentifier<?> nodeId) {
-        LOG.debug("Notify node: {}", nodeId);
-        try {
-            final RpcResult<JoinTopicOutput> rpcResultJoinTopic =
-                    sourceService.joinTopic(getJoinTopicInputArgument(nodeId)).get();
-            if (!rpcResultJoinTopic.isSuccessful()) {
-                for (final RpcError err : rpcResultJoinTopic.getErrors()) {
-                    LOG.error("Can not join topic: [{}] on node: [{}]. Error: {}", getTopicId().getValue(),
-                            nodeId.toString(), err.toString());
-                }
-            } else {
-                joinedEventSources.add(nodeId);
-            }
-        } catch (InterruptedException | ExecutionException e) {
-            LOG.error("Could not invoke join topic for node {}", nodeId);
-        }
-    }
-
-    private void notifyExistingNodes(final EventSourceTopology eventSourceTopology) {
-        LOG.debug("Notify existing nodes");
-        final Pattern nodeRegex = this.nodeIdPattern;
-
-        final ReadTransaction tx = eventSourceTopology.getDataBroker().newReadOnlyTransaction();
-        final ListenableFuture<Optional<Topology>> future =
-                tx.read(LogicalDatastoreType.OPERATIONAL, EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH);
-
-        Futures.addCallback(future, new FutureCallback<Optional<Topology>>() {
-            @Override
-            public void onSuccess(final Optional<Topology> data) {
-                if (data.isPresent()) {
-                    final List<Node> nodes = data.get().getNode();
-                    if (nodes != null) {
-                        for (final Node node : nodes) {
-                            if (nodeRegex.matcher(node.getNodeId().getValue()).matches()) {
-                                notifyNode(EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class,
-                                    node.key()));
-                            }
-                        }
-                    }
-                }
-                tx.close();
-            }
-
-            @Override
-            public void onFailure(final Throwable ex) {
-                LOG.error("Can not notify existing nodes", ex);
-                tx.close();
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    private JoinTopicInput getJoinTopicInputArgument(final InstanceIdentifier<?> path) {
-        final NodeRef nodeRef = new NodeRef(path);
-        final JoinTopicInput jti =
-                new JoinTopicInputBuilder()
-                        .setNode(nodeRef.getValue())
-                        .setTopicId(topicId)
-                        .setNotificationPattern(notificationPattern)
-                        .build();
-        return jti;
-    }
-
-    public Pattern getNodeIdRegexPattern() {
-        return nodeIdPattern;
-    }
-
-    private DisJoinTopicInput getDisJoinTopicInputArgument(final InstanceIdentifier<?> eventSourceNodeId) {
-        final NodeRef nodeRef = new NodeRef(eventSourceNodeId);
-        final DisJoinTopicInput dji = new DisJoinTopicInputBuilder()
-                .setNode(nodeRef.getValue())
-                .setTopicId(topicId)
-                .build();
-        return dji;
-    }
-
-    private void registerListner(final EventSourceTopology eventSourceTopology) {
-        this.listenerRegistration = eventSourceTopology.getDataBroker().registerDataTreeChangeListener(
-            DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL,
-                EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class)), this);
-    }
-
-    @Override
-    public void close() {
-        if (this.listenerRegistration != null) {
-            this.listenerRegistration.close();
-        }
-        for (final InstanceIdentifier<?> eventSourceNodeId : joinedEventSources) {
-            try {
-                final RpcResult<DisJoinTopicOutput> result = sourceService
-                        .disJoinTopic(getDisJoinTopicInputArgument(eventSourceNodeId)).get();
-                if (result.isSuccessful() == false) {
-                    for (final RpcError err : result.getErrors()) {
-                        LOG.error("Can not destroy topic: [{}] on node: [{}]. Error: {}", getTopicId().getValue(),
-                                eventSourceNodeId, err.toString());
-                    }
-                }
-            } catch (InterruptedException | ExecutionException ex) {
-                LOG.error("Can not close event source topic / destroy topic {} on node {}.", this.topicId.getValue(),
-                        eventSourceNodeId, ex);
-            }
-        }
-        joinedEventSources.clear();
-    }
-
-    private static String getUUIDIdent() {
-        final UUID uuid = UUID.randomUUID();
-        return uuid.toString();
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopology.java b/opendaylight/md-sal/messagebus-impl/src/main/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopology.java
deleted file mode 100644 (file)
index 7f53a53..0000000
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.controller.messagebus.app.util.Util;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistration;
-import org.opendaylight.controller.messagebus.spi.EventSourceRegistry;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicOutput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.Node1Builder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.TopologyTypes1Builder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSource;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.topology.event.source.type.TopologyEventSourceBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.TopologyTypes;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class EventSourceTopology implements EventAggregatorService, EventSourceRegistry {
-    private static final Logger LOG = LoggerFactory.getLogger(EventSourceTopology.class);
-
-    private static final String TOPOLOGY_ID = "EVENT-SOURCE-TOPOLOGY" ;
-    private static final TopologyKey EVENT_SOURCE_TOPOLOGY_KEY = new TopologyKey(new TopologyId(TOPOLOGY_ID));
-    private static final LogicalDatastoreType OPERATIONAL = LogicalDatastoreType.OPERATIONAL;
-
-    static final InstanceIdentifier<Topology> EVENT_SOURCE_TOPOLOGY_PATH =
-            InstanceIdentifier.create(NetworkTopology.class).child(Topology.class, EVENT_SOURCE_TOPOLOGY_KEY);
-
-    private static final InstanceIdentifier<TopologyTypes1> TOPOLOGY_TYPE_PATH = EVENT_SOURCE_TOPOLOGY_PATH
-            .child(TopologyTypes.class).augmentation(TopologyTypes1.class);
-
-    private final Map<TopicId, EventSourceTopic> eventSourceTopicMap = new ConcurrentHashMap<>();
-    private final Map<NodeKey, Registration> routedRpcRegistrations = new ConcurrentHashMap<>();
-
-    private final DataBroker dataBroker;
-    private final ObjectRegistration<EventSourceTopology> aggregatorRpcReg;
-    private final EventSourceService eventSourceService;
-    private final RpcProviderService rpcRegistry;
-
-    public EventSourceTopology(final DataBroker dataBroker, final RpcProviderService providerService,
-            RpcConsumerRegistry rpcService) {
-
-        this.dataBroker = dataBroker;
-        this.rpcRegistry = providerService;
-        aggregatorRpcReg = providerService.registerRpcImplementation(EventAggregatorService.class, this);
-        eventSourceService = rpcService.getRpcService(EventSourceService.class);
-
-        final TopologyEventSource topologySource = new TopologyEventSourceBuilder().build();
-        final TopologyTypes1 topologyTypeAugment =
-                new TopologyTypes1Builder().setTopologyEventSource(topologySource).build();
-        putData(OPERATIONAL, TOPOLOGY_TYPE_PATH, topologyTypeAugment);
-        LOG.info("EventSourceRegistry has been initialized");
-    }
-
-    private <T extends DataObject> void putData(final LogicalDatastoreType store,
-                                                 final InstanceIdentifier<T> path,
-                                                 final T data) {
-
-        final WriteTransaction tx = getDataBroker().newWriteOnlyTransaction();
-        tx.mergeParentStructurePut(store, path, data);
-        tx.commit().addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                LOG.trace("Data has put into datastore {} {}", store, path);
-            }
-
-            @Override
-            public void onFailure(final Throwable ex) {
-                LOG.error("Can not put data into datastore [store: {}] [path: {}]", store, path, ex);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    private <T extends DataObject>  void deleteData(final LogicalDatastoreType store,
-            final InstanceIdentifier<T> path) {
-        final WriteTransaction tx = getDataBroker().newWriteOnlyTransaction();
-        tx.delete(OPERATIONAL, path);
-        tx.commit().addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                LOG.trace("Data has deleted from datastore {} {}", store, path);
-            }
-
-            @Override
-            public void onFailure(final Throwable ex) {
-                LOG.error("Can not delete data from datastore [store: {}] [path: {}]", store, path, ex);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    private void insert(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath) {
-        final NodeKey nodeKey = sourcePath.getKey();
-        final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
-        final Node1 nodeAgument = new Node1Builder().setEventSourceNode(
-                new NodeId(nodeKey.getNodeId().getValue())).build();
-        putData(OPERATIONAL, augmentPath, nodeAgument);
-    }
-
-    private void remove(final KeyedInstanceIdentifier<Node, NodeKey> sourcePath) {
-        final InstanceIdentifier<Node1> augmentPath = sourcePath.augmentation(Node1.class);
-        deleteData(OPERATIONAL, augmentPath);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<CreateTopicOutput>> createTopic(final CreateTopicInput input) {
-        LOG.debug("Received Topic creation request: NotificationPattern -> {}, NodeIdPattern -> {}",
-                input.getNotificationPattern(),
-                input.getNodeIdPattern());
-
-        final NotificationPattern notificationPattern = new NotificationPattern(input.getNotificationPattern());
-        //FIXME: do not use Util.wildcardToRegex - NodeIdPatter should be regex
-        final String nodeIdPattern = input.getNodeIdPattern().getValue();
-        final EventSourceTopic eventSourceTopic = EventSourceTopic.create(notificationPattern, nodeIdPattern, this);
-
-        eventSourceTopicMap.put(eventSourceTopic.getTopicId(), eventSourceTopic);
-
-        final CreateTopicOutput cto = new CreateTopicOutputBuilder()
-                .setTopicId(eventSourceTopic.getTopicId())
-                .build();
-
-        LOG.info("Topic has been created: NotificationPattern -> {}, NodeIdPattern -> {}",
-                input.getNotificationPattern(),
-                input.getNodeIdPattern());
-
-        return Util.resultRpcSuccessFor(cto);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<DestroyTopicOutput>> destroyTopic(final DestroyTopicInput input) {
-        final EventSourceTopic topicToDestroy = eventSourceTopicMap.remove(input.getTopicId());
-        if (topicToDestroy != null) {
-            topicToDestroy.close();
-        }
-        return Util.resultRpcSuccessFor(new DestroyTopicOutputBuilder().build());
-    }
-
-    @Override
-    public void close() {
-        aggregatorRpcReg.close();
-        eventSourceTopicMap.values().forEach(EventSourceTopic::close);
-    }
-
-    public void register(final EventSource eventSource) {
-        final NodeKey nodeKey = eventSource.getSourceNodeKey();
-        final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
-        final Registration reg = rpcRegistry.registerRpcImplementation(EventSourceService.class, eventSource,
-            Collections.singleton(sourcePath));
-        routedRpcRegistrations.put(nodeKey, reg);
-        insert(sourcePath);
-    }
-
-    public void unRegister(final EventSource eventSource) {
-        final NodeKey nodeKey = eventSource.getSourceNodeKey();
-        final KeyedInstanceIdentifier<Node, NodeKey> sourcePath = EVENT_SOURCE_TOPOLOGY_PATH.child(Node.class, nodeKey);
-        final Registration removeRegistration = routedRpcRegistrations.remove(nodeKey);
-        if (removeRegistration != null) {
-            removeRegistration.close();
-            remove(sourcePath);
-        }
-    }
-
-    @Override
-    public <T extends EventSource> EventSourceRegistration<T> registerEventSource(final T eventSource) {
-        final EventSourceRegistrationImpl<T> esr = new EventSourceRegistrationImpl<>(eventSource, this);
-        register(eventSource);
-        return esr;
-    }
-
-    DataBroker getDataBroker() {
-        return dataBroker;
-    }
-
-    EventSourceService getEventSourceService() {
-        return eventSourceService;
-    }
-
-    @VisibleForTesting
-    Map<NodeKey, Registration> getRoutedRpcRegistrations() {
-        return routedRpcRegistrations;
-    }
-
-    @VisibleForTesting
-    Map<TopicId, EventSourceTopic> getEventSourceTopicMap() {
-        return eventSourceTopicMap;
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-impl/src/main/resources/OSGI-INF/blueprint/messagebus.xml b/opendaylight/md-sal/messagebus-impl/src/main/resources/OSGI-INF/blueprint/messagebus.xml
deleted file mode 100644 (file)
index 24a2013..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Copyright (c) 2017 Inocybe Technologies Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-  <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker"/>
-  <reference id="consumerRegistry" interface="org.opendaylight.mdsal.binding.api.RpcConsumerRegistry"/>
-  <reference id="providerRegistry" interface="org.opendaylight.mdsal.binding.api.RpcProviderService"/>
-
-  <bean id="eventSourceTopology" class="org.opendaylight.controller.messagebus.app.impl.EventSourceTopology"
-          destroy-method="close">
-    <argument ref="dataBroker"/>
-    <argument ref="providerRegistry"/>
-    <argument ref="consumerRegistry"/>
-  </bean>
-
-  <service ref="eventSourceTopology"
-          interface="org.opendaylight.controller.messagebus.spi.EventSourceRegistry"/>
-</blueprint>
diff --git a/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImplTest.java b/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceRegistrationImplTest.java
deleted file mode 100644 (file)
index 838d174..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-
-public class EventSourceRegistrationImplTest {
-
-    EventSourceRegistrationImplLocal eventSourceRegistrationImplLocal;
-    EventSourceTopology eventSourceTopologyMock;
-
-    @BeforeClass
-    public static void initTestClass() {
-    }
-
-    @Before
-    public void setUp() {
-        EventSource eventSourceMock = mock(EventSource.class);
-        eventSourceTopologyMock = mock(EventSourceTopology.class);
-        eventSourceRegistrationImplLocal = new EventSourceRegistrationImplLocal(eventSourceMock,
-                eventSourceTopologyMock);
-    }
-
-    @Test
-    public void removeRegistrationTest() {
-        eventSourceRegistrationImplLocal.removeRegistration();
-        verify(eventSourceTopologyMock, times(1)).unRegister(any(EventSource.class));
-    }
-
-
-    private class EventSourceRegistrationImplLocal extends EventSourceRegistrationImpl<EventSource> {
-        EventSourceRegistrationImplLocal(EventSource instance, EventSourceTopology eventSourceTopology) {
-            super(instance, eventSourceTopology);
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopicTest.java b/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopicTest.java
deleted file mode 100644 (file)
index d785287..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collections;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.DataTreeModification;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.JoinTopicStatus;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-
-public class EventSourceTopicTest {
-
-    EventSourceTopic eventSourceTopic;
-    DataBroker dataBrokerMock;
-    EventSourceService eventSourceServiceMock;
-    EventSourceTopology eventSourceTopologyMock;
-
-    @BeforeClass
-    public static void initTestClass() {
-    }
-
-    @Before
-    public void setUp() {
-        final NotificationPattern notificationPattern = new NotificationPattern("value1");
-        eventSourceServiceMock = mock(EventSourceService.class);
-        doReturn(RpcResultBuilder.success(new JoinTopicOutputBuilder().setStatus(JoinTopicStatus.Up).build())
-                .buildFuture()).when(eventSourceServiceMock).joinTopic(any(JoinTopicInput.class));
-
-        eventSourceTopologyMock = mock(EventSourceTopology.class);
-        dataBrokerMock = mock(DataBroker.class);
-        doReturn(eventSourceServiceMock).when(eventSourceTopologyMock).getEventSourceService();
-        doReturn(dataBrokerMock).when(eventSourceTopologyMock).getDataBroker();
-
-        WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
-        doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
-        doNothing().when(writeTransactionMock).mergeParentStructurePut(any(LogicalDatastoreType.class),
-                any(InstanceIdentifier.class), any(DataObject.class));
-        FluentFuture checkedFutureWriteMock = mock(FluentFuture.class);
-        doReturn(checkedFutureWriteMock).when(writeTransactionMock).commit();
-
-        ReadTransaction readOnlyTransactionMock = mock(ReadTransaction.class);
-        doReturn(readOnlyTransactionMock).when(dataBrokerMock).newReadOnlyTransaction();
-        FluentFuture checkedFutureReadMock = mock(FluentFuture.class);
-        doReturn(checkedFutureReadMock).when(readOnlyTransactionMock).read(LogicalDatastoreType.OPERATIONAL,
-                EventSourceTopology.EVENT_SOURCE_TOPOLOGY_PATH);
-        eventSourceTopic = EventSourceTopic.create(notificationPattern, "nodeIdPattern1", eventSourceTopologyMock);
-    }
-
-    @Test
-    public void createModuleTest() {
-        assertNotNull("Instance has not been created correctly.", eventSourceTopic);
-    }
-
-    @Test
-    public void getTopicIdTest() {
-        assertNotNull("Topic has not been created correctly.", eventSourceTopic.getTopicId());
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void onDataTreeChangedTest() {
-        InstanceIdentifier<Node> instanceIdentifierMock = mock(InstanceIdentifier.class);
-        DataTreeModification<Node> mockDataTreeModification = mock(DataTreeModification.class);
-        DataObjectModification<Node> mockModification = mock(DataObjectModification.class);
-        doReturn(mockModification).when(mockDataTreeModification).getRootNode();
-        doReturn(DataTreeIdentifier.create(LogicalDatastoreType.OPERATIONAL, instanceIdentifierMock))
-                .when(mockDataTreeModification).getRootPath();
-        doReturn(DataObjectModification.ModificationType.WRITE).when(mockModification).getModificationType();
-
-        Node dataObjectNodeMock = mock(Node.class);
-        doReturn(getNodeKey("testNodeId01")).when(dataObjectNodeMock).key();
-        NodeId nodeIdMock = mock(NodeId.class);
-        doReturn(nodeIdMock).when(dataObjectNodeMock).getNodeId();
-        doReturn("nodeIdPattern1").when(nodeIdMock).getValue();
-
-        doReturn(dataObjectNodeMock).when(mockModification).getDataAfter();
-
-        eventSourceTopic.onDataTreeChanged(Collections.singletonList(mockDataTreeModification));
-        verify(dataObjectNodeMock).getNodeId();
-        verify(nodeIdMock).getValue();
-    }
-
-    @Test
-    public void notifyNodeTest() {
-        InstanceIdentifier instanceIdentifierMock = mock(InstanceIdentifier.class);
-        eventSourceTopic.notifyNode(instanceIdentifierMock);
-        verify(eventSourceServiceMock, times(1)).joinTopic(any(JoinTopicInput.class));
-    }
-
-    public NodeKey getNodeKey(final String nodeId) {
-        return new NodeKey(new NodeId(nodeId));
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopologyTest.java b/opendaylight/md-sal/messagebus-impl/src/test/java/org/opendaylight/controller/messagebus/app/impl/EventSourceTopologyTest.java
deleted file mode 100644 (file)
index 79a7a55..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.messagebus.spi.EventSource;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.api.ReadTransaction;
-import org.opendaylight.mdsal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.binding.api.RpcProviderService;
-import org.opendaylight.mdsal.binding.api.WriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.CreateTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInput;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.DestroyTopicInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.EventAggregatorService;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.NotificationPattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.Pattern;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicId;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class EventSourceTopologyTest {
-
-    EventSourceTopology eventSourceTopology;
-    DataBroker dataBrokerMock;
-    RpcProviderService rpcProviderRegistryMock;
-    RpcConsumerRegistry rpcServiceMock;
-    CreateTopicInput createTopicInputMock;
-    ListenerRegistration<?> listenerRegistrationMock;
-    NodeKey nodeKey;
-    ObjectRegistration<EventAggregatorService> aggregatorRpcReg;
-
-    @Before
-    public void setUp() {
-        dataBrokerMock = mock(DataBroker.class);
-        rpcProviderRegistryMock = mock(RpcProviderService.class);
-        rpcServiceMock = mock(RpcConsumerRegistry.class);
-    }
-
-    @Test
-    public void constructorTest() {
-        constructorTestHelper();
-        eventSourceTopology = new EventSourceTopology(dataBrokerMock, rpcProviderRegistryMock, rpcServiceMock);
-        assertNotNull("Instance has not been created correctly.", eventSourceTopology);
-    }
-
-    private void constructorTestHelper() {
-        aggregatorRpcReg = mock(ObjectRegistration.class);
-        EventSourceService eventSourceService = mock(EventSourceService.class);
-        doReturn(aggregatorRpcReg).when(rpcProviderRegistryMock).registerRpcImplementation(
-            eq(EventAggregatorService.class), any(EventSourceTopology.class));
-        doReturn(eventSourceService).when(rpcServiceMock).getRpcService(EventSourceService.class);
-        WriteTransaction writeTransactionMock = mock(WriteTransaction.class);
-        doReturn(writeTransactionMock).when(dataBrokerMock).newWriteOnlyTransaction();
-        doNothing().when(writeTransactionMock).mergeParentStructurePut(any(LogicalDatastoreType.class),
-            any(InstanceIdentifier.class), any(DataObject.class));
-        FluentFuture checkedFutureMock = mock(FluentFuture.class);
-        doReturn(checkedFutureMock).when(writeTransactionMock).commit();
-    }
-
-    @Test
-    public void createTopicTest() throws Exception {
-        topicTestHelper();
-        assertNotNull("Topic has not been created correctly.", eventSourceTopology.createTopic(createTopicInputMock));
-    }
-
-    @Test
-    public void destroyTopicTest() throws Exception {
-        topicTestHelper();
-        TopicId topicId = new TopicId("topic-id-007");
-        Map<TopicId, EventSourceTopic> localMap = eventSourceTopology.getEventSourceTopicMap();
-        EventSourceTopic eventSourceTopic = EventSourceTopic.create(new NotificationPattern("foo"),
-                "pattern", eventSourceTopology);
-        localMap.put(topicId, eventSourceTopic);
-        DestroyTopicInput input = new DestroyTopicInputBuilder().setTopicId(topicId).build();
-        eventSourceTopology.destroyTopic(input);
-        verify(listenerRegistrationMock, times(1)).close();
-    }
-
-    private void topicTestHelper() throws Exception {
-        constructorTestHelper();
-        createTopicInputMock = mock(CreateTopicInput.class);
-        eventSourceTopology = new EventSourceTopology(dataBrokerMock, rpcProviderRegistryMock, rpcServiceMock);
-
-        NotificationPattern notificationPattern = new NotificationPattern("value1");
-        doReturn(notificationPattern).when(createTopicInputMock).getNotificationPattern();
-        Pattern pattern = new Pattern("valuePattern1");
-        doReturn(pattern).when(createTopicInputMock).getNodeIdPattern();
-
-        listenerRegistrationMock = mock(ListenerRegistration.class);
-        doReturn(listenerRegistrationMock).when(dataBrokerMock).registerDataTreeChangeListener(
-                any(DataTreeIdentifier.class), any(EventSourceTopic.class));
-
-        ReadTransaction readOnlyTransactionMock = mock(ReadTransaction.class);
-        doReturn(readOnlyTransactionMock).when(dataBrokerMock).newReadOnlyTransaction();
-
-        FluentFuture checkedFutureMock = mock(FluentFuture.class);
-        doReturn(checkedFutureMock).when(readOnlyTransactionMock).read(eq(LogicalDatastoreType.OPERATIONAL),
-                any(InstanceIdentifier.class));
-        Topology topologyMock = mock(Topology.class);
-        doReturn(Optional.of(topologyMock)).when(checkedFutureMock).get();
-
-        Node nodeMock = mock(Node.class);
-        List<Node> nodeList = new ArrayList<>();
-        nodeList.add(nodeMock);
-        doReturn(nodeList).when(topologyMock).getNode();
-
-        NodeId nodeId = new NodeId("nodeIdValue1");
-        doReturn(nodeId).when(nodeMock).getNodeId();
-    }
-
-    @Test
-    public void closeTest() throws Exception {
-        constructorTestHelper();
-        topicTestHelper();
-        Map<TopicId, EventSourceTopic> localMap = eventSourceTopology.getEventSourceTopicMap();
-        TopicId topicIdMock = mock(TopicId.class);
-        EventSourceTopic eventSourceTopic = EventSourceTopic.create(new NotificationPattern("foo"),
-                "pattern", eventSourceTopology);
-        localMap.put(topicIdMock, eventSourceTopic);
-        eventSourceTopology.close();
-        verify(aggregatorRpcReg, times(1)).close();
-        verify(listenerRegistrationMock, times(1)).close();
-    }
-
-    @Test
-    public void registerTest() throws Exception {
-        topicTestHelper();
-        Node nodeMock = mock(Node.class);
-        EventSource eventSourceMock = mock(EventSource.class);
-        NodeId nodeId = new NodeId("nodeIdValue1");
-        nodeKey = new NodeKey(nodeId);
-        doReturn(nodeKey).when(nodeMock).key();
-        doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
-        ObjectRegistration routedRpcRegistrationMock = mock(ObjectRegistration.class);
-        doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock).registerRpcImplementation(
-            eq(EventSourceService.class), eq(eventSourceMock), any(Set.class));
-        eventSourceTopology.register(eventSourceMock);
-        verify(rpcProviderRegistryMock, times(1)).registerRpcImplementation(eq(EventSourceService.class),
-            eq(eventSourceMock), any(Set.class));
-    }
-
-    @Test
-    public void unregisterTest() throws Exception {
-        topicTestHelper();
-        EventSource eventSourceMock = mock(EventSource.class);
-        NodeId nodeId = new NodeId("nodeIdValue1");
-        nodeKey = new NodeKey(nodeId);
-        Map<NodeKey, Registration> localMap = eventSourceTopology.getRoutedRpcRegistrations();
-        NodeKey nodeKeyMock = mock(NodeKey.class);
-        doReturn(nodeKeyMock).when(eventSourceMock).getSourceNodeKey();
-        BindingAwareBroker.RoutedRpcRegistration<EventSourceService> routedRpcRegistrationMock =
-                mock(BindingAwareBroker.RoutedRpcRegistration.class);
-        localMap.put(nodeKeyMock, routedRpcRegistrationMock);
-        eventSourceTopology.unRegister(eventSourceMock);
-        verify(routedRpcRegistrationMock, times(1)).close();
-    }
-
-    @Test
-    public void registerEventSourceTest() throws Exception {
-        topicTestHelper();
-        Node nodeMock = mock(Node.class);
-        EventSource eventSourceMock = mock(EventSource.class);
-        NodeId nodeId = new NodeId("nodeIdValue1");
-        nodeKey = new NodeKey(nodeId);
-        doReturn(nodeKey).when(nodeMock).key();
-        doReturn(nodeKey).when(eventSourceMock).getSourceNodeKey();
-        ObjectRegistration routedRpcRegistrationMock = mock(ObjectRegistration.class);
-        doReturn(routedRpcRegistrationMock).when(rpcProviderRegistryMock)
-                .registerRpcImplementation(eq(EventSourceService.class), eq(eventSourceMock), any(Set.class));
-        assertNotNull("Return value has not been created correctly.",
-                eventSourceTopology.registerEventSource(eventSourceMock));
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-spi/pom.xml b/opendaylight/md-sal/messagebus-spi/pom.xml
deleted file mode 100644 (file)
index 07c6d12..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>messagebus-spi</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <name>${project.artifactId}</name>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>messagebus-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-api</artifactId>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-</project>
diff --git a/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSource.java b/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSource.java
deleted file mode 100644 (file)
index 3221e13..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-import java.util.List;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventsource.rev141202.EventSourceService;
-import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Event source is a node in topology which is able to produce notifications.
- * To register event source you use {@link EventSourceRegistry#registerEventSource(EventSource)}.
- * EventSourceRegistry will request registered event source to publish notifications
- * whenever EventSourceRegistry has been asked to publish a certain type of notifications.
- * EventSourceRegistry will call method JoinTopic to request EventSource to publish notification.
- * Event source must implement method JoinTopic (from superinterface {@link EventSourceService}).
- */
-
-public interface EventSource extends EventSourceService, AutoCloseable {
-
-    /**
-     * Identifier of node associated with event source.
-     *
-     * @return instance of NodeKey
-     */
-    NodeKey getSourceNodeKey();
-
-    /**
-     * List the types of notifications which source can produce.
-     *
-     * @return list of available notification
-     */
-    List<SchemaPath> getAvailableNotifications();
-}
diff --git a/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistration.java b/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistration.java
deleted file mode 100644 (file)
index dd6cc1a..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-
-/**
- * Instance of EventSourceRegistration is returned by {@link EventSourceRegistry#registerEventSource(EventSource)}
- * and it is used to unregister EventSource.
- *
- */
-public interface EventSourceRegistration<T extends EventSource> extends ObjectRegistration<T> {
-
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistry.java b/opendaylight/md-sal/messagebus-spi/src/main/java/org/opendaylight/controller/messagebus/spi/EventSourceRegistry.java
deleted file mode 100644 (file)
index 893ea16..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.spi;
-
-/**
- * EventSourceRegistry is used to register {@link EventSource}.
- */
-public interface EventSourceRegistry extends AutoCloseable {
-
-    /**
-     * Registers the given EventSource for public consumption. The EventSource is
-     * associated with the node identified via {@link EventSource#getSourceNodeKey}.
-     *
-     * @param eventSource the EventSource instance to register
-     * @return an EventSourceRegistration instance that is used to unregister the EventSource
-     * via {@link EventSourceRegistration#close()}.
-     */
-    <T extends EventSource> EventSourceRegistration<T> registerEventSource(T eventSource);
-}
diff --git a/opendaylight/md-sal/messagebus-util/pom.xml b/opendaylight/md-sal/messagebus-util/pom.xml
deleted file mode 100644 (file)
index d3c29a0..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
-
-This program and the accompanying materials are made available under the
-terms of the Eclipse Public License v1.0 which accompanies this distribution,
-and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-    <modelVersion>4.0.0</modelVersion>
-
-    <parent>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <relativePath>../parent</relativePath>
-    </parent>
-
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>messagebus-util</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>bundle</packaging>
-    <name>${project.artifactId}</name>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-core-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-binding-api</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>messagebus-api</artifactId>
-        </dependency>
-        <!-- Testing Dependencies -->
-        <dependency>
-              <groupId>org.glassfish.jersey.test-framework.providers</groupId>
-              <artifactId>jersey-test-framework-provider-grizzly2</artifactId>
-              <version>2.4</version>
-              <scope>test</scope>
-        </dependency>
-        <dependency>
-              <groupId>org.mockito</groupId>
-              <artifactId>mockito-core</artifactId>
-              <scope>test</scope>
-        </dependency>
-    </dependencies>
-
-</project>
diff --git a/opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotification.java b/opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotification.java
deleted file mode 100644 (file)
index b6c6cdf..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.messagebus.app.util;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class TopicDOMNotification implements DOMNotification {
-
-    private static final SchemaPath TOPIC_NOTIFICATION_ID = SchemaPath.create(true, TopicNotification.QNAME);
-    private final ContainerNode body;
-
-    public TopicDOMNotification(final ContainerNode body) {
-        this.body = body;
-    }
-
-    @Override
-    public SchemaPath getType() {
-        return TOPIC_NOTIFICATION_ID;
-    }
-
-    @Override
-    public ContainerNode getBody() {
-        return body;
-    }
-
-    @Override
-    public String toString() {
-        return "TopicDOMNotification [body=" + body + "]";
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/Util.java b/opendaylight/md-sal/messagebus-util/src/main/java/org/opendaylight/controller/messagebus/app/util/Util.java
deleted file mode 100644 (file)
index 0b28e55..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.messagebus.app.util;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.regex.Pattern;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public final class Util {
-    private Util() {
-    }
-
-    public static <T> ListenableFuture<RpcResult<T>> resultRpcSuccessFor(final T output) {
-        return Futures.immediateFuture(RpcResultBuilder.success(output).build());
-    }
-
-    /**
-     * Method filters qnames based on wildcard strings.
-     *
-     * @param list list of SchemaPaths
-     * @param pattern matching pattern
-     * @return list of filtered qnames
-     */
-    public static List<SchemaPath> expandQname(final List<SchemaPath> list, final Pattern pattern) {
-        final List<SchemaPath> matchingQnames = new ArrayList<>();
-
-        for (final SchemaPath notification : list) {
-            final String namespace = notification.getLastComponent().getNamespace().toString();
-            if (pattern.matcher(namespace).matches()) {
-                matchingQnames.add(notification);
-            }
-        }
-        return matchingQnames;
-    }
-
-    /**
-     * CREDIT to http://www.rgagnon.com/javadetails/java-0515.html.
-     */
-    public static String wildcardToRegex(final String wildcard) {
-        final StringBuilder s = new StringBuilder(wildcard.length());
-        s.append('^');
-        for (final char c : wildcard.toCharArray()) {
-            switch (c) {
-                case '*':
-                    s.append(".*");
-                    break;
-                case '?':
-                    s.append('.');
-                    break;
-                // escape special regexp-characters
-                case '(':
-                case ')':
-                case '[':
-                case ']':
-                case '$':
-                case '^':
-                case '.':
-                case '{':
-                case '}':
-                case '|':
-                case '\\':
-                    s.append("\\");
-                    s.append(c);
-                    break;
-                default:
-                    s.append(c);
-                    break;
-            }
-        }
-        s.append('$');
-        return s.toString();
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotificationTest.java b/opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/TopicDOMNotificationTest.java
deleted file mode 100644 (file)
index a366c69..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.cisco.params.xml.ns.yang.messagebus.eventaggregator.rev141202.TopicNotification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class TopicDOMNotificationTest {
-
-    private static final String CONTAINER_NODE_BODY_MOCK_TO_STRING = "containerNodeBodyMock";
-    ContainerNode containerNodeBodyMock;
-    TopicDOMNotification topicDOMNotification;
-
-    @BeforeClass
-    public static void initTestClass() {
-    }
-
-    @Before
-    public void setUp() {
-        containerNodeBodyMock = mock(ContainerNode.class);
-        doReturn(CONTAINER_NODE_BODY_MOCK_TO_STRING).when(containerNodeBodyMock).toString();
-        topicDOMNotification = new TopicDOMNotification(containerNodeBodyMock);
-    }
-
-    @Test
-    public void constructorTest() {
-        assertNotNull("Instance has not been created correctly.", topicDOMNotification);
-    }
-
-    @Test
-    public void getTypeTest() {
-        SchemaPath topicNotificationId = SchemaPath.create(true, TopicNotification.QNAME);
-        assertEquals("Type has not been created correctly.", topicNotificationId, topicDOMNotification.getType());
-    }
-
-    @Test
-    public void getBodyTest() {
-        assertEquals("String has not been created correctly.", containerNodeBodyMock, topicDOMNotification.getBody());
-    }
-
-    @Test
-    public void getToStringTest() {
-        String bodyString = "TopicDOMNotification [body=" + CONTAINER_NODE_BODY_MOCK_TO_STRING + "]";
-        assertEquals("String has not been created correctly.", bodyString, topicDOMNotification.toString());
-    }
-}
diff --git a/opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/UtilTest.java b/opendaylight/md-sal/messagebus-util/src/test/java/org/opendaylight/controller/messagebus/app/util/UtilTest.java
deleted file mode 100644 (file)
index 1a2d66c..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.messagebus.app.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.regex.Pattern;
-import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Unit tests for Util.
- *
- * @author ppalmar
- */
-public class UtilTest {
-
-    @Test
-    public void testResultFor() throws Exception {
-        {
-            final String expectedResult = "dummy string";
-            RpcResult<String> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
-            assertEquals(expectedResult, rpcResult.getResult());
-            assertTrue(rpcResult.isSuccessful());
-            assertTrue(rpcResult.getErrors().isEmpty());
-        }
-        {
-            final Integer expectedResult = 42;
-            RpcResult<Integer> rpcResult = Util.resultRpcSuccessFor(expectedResult).get();
-            assertEquals(expectedResult, rpcResult.getResult());
-            assertTrue(rpcResult.isSuccessful());
-            assertTrue(rpcResult.getErrors().isEmpty());
-        }
-    }
-
-    @Test
-    public void testExpandQname() {
-        // match no path because the list of the allowed paths is empty
-        {
-            final List<SchemaPath> paths = new ArrayList<>();
-            final Pattern regexPattern = Pattern.compile(".*"); // match everything
-            final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
-            assertTrue(matchingPaths.isEmpty());
-        }
-
-        // match no path because of regex pattern
-        {
-            final List<SchemaPath> paths = createSchemaPathList();
-            final Pattern regexPattern = Pattern.compile("^@.*");
-            final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
-            assertTrue(matchingPaths.isEmpty());
-        }
-
-        // match all paths
-        {
-            final List<SchemaPath> paths = createSchemaPathList();
-            final Pattern regexPattern = Pattern.compile(".*");
-            final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
-            assertTrue(matchingPaths.contains(paths.get(0)));
-            assertTrue(matchingPaths.contains(paths.get(1)));
-            assertEquals(paths.size(), matchingPaths.size());
-        }
-
-        // match one path only
-        {
-            final List<SchemaPath> paths = createSchemaPathList();
-            final Pattern regexPattern = Pattern.compile(".*yyy$");
-            final List<SchemaPath> matchingPaths = Util.expandQname(paths, regexPattern);
-            assertTrue(matchingPaths.contains(paths.get(1)));
-            assertEquals(1, matchingPaths.size());
-        }
-    }
-
-    private static List<SchemaPath> createSchemaPathList() {
-        final QName qname1 = QName.create("urn:odl:xxx", "2015-01-01", "localName");
-        final QName qname2 = QName.create("urn:odl:yyy", "2015-01-01", "localName");
-        final SchemaPath path1 = SchemaPath.create(true, qname1);
-        final SchemaPath path2 = SchemaPath.create(true, qname2);
-        return Arrays.asList(path1, path2);
-    }
-}
index a074df2d43bb3d54f9fa9a1842c637668a97fe14..2cd8d0ac1bdb5c90799694b41a02e850fc2ca529 100644 (file)
   <parent>
     <groupId>org.opendaylight.mdsal</groupId>
     <artifactId>binding-parent</artifactId>
-    <version>3.0.6</version>
+    <version>13.0.1</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>mdsal-parent</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <dependencyManagement>
     <dependencies>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <artifactId>bundle-parent</artifactId>
+        <version>9.0.3-SNAPSHOT</version>
         <type>pom</type>
         <scope>import</scope>
       </dependency>
     </dependencies>
   </dependencyManagement>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <failOnError>true</failOnError>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
 </project>
index 7e07837c28a8bf184ff2e55604e0e5752cfbd99b..86f5e203f2323fdbf6b8c551a0431ac01243a09d 100644 (file)
@@ -5,52 +5,37 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-parent</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
+  <artifactId>mdsal-aggregator</artifactId>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
+  <properties>
+    <maven.deploy.skip>true</maven.deploy.skip>
+    <maven.install.skip>true</maven.install.skip>
+  </properties>
+
   <modules>
-    <module>mdsal-artifacts</module>
     <module>parent</module>
 
     <!-- Common APIs & Implementation -->
-    <module>sal-common-api</module>
-    <module>sal-common-impl</module>
     <module>sal-common-util</module>
 
-    <!-- Binding Independent -->
-    <module>sal-dom-api</module>
-    <module>sal-dom-broker</module>
-    <module>sal-dom-spi</module>
-    <module>sal-dom-compat</module>
-
-    <!-- Binding Aware -->
-    <module>sal-binding-api</module>
-    <module>sal-binding-broker</module>
-
-    <module>sal-binding-util</module>
-
     <!-- Samples -->
     <module>samples</module>
 
-    <!-- Connectors -->
-    <module>sal-connector-api</module>
-
     <module>sal-akka-raft</module>
     <module>sal-akka-raft-example</module>
 
-    <!--InMemory DOM DataStore-->
-    <module>sal-inmemory-datastore</module>
-
     <module>sal-clustering-commons</module>
     <module>cds-access-api</module>
     <module>cds-access-client</module>
     <module>cds-dom-api</module>
+    <module>cds-mgmt-api</module>
     <module>sal-akka-segmented-journal</module>
 
     <!-- sal clustering configuration -->
     <module>sal-dummy-distributed-datastore</module>
     <module>sal-cluster-admin-api</module>
     <module>sal-cluster-admin-impl</module>
+    <module>sal-cluster-admin-karaf-cli</module>
+
+    <!-- Entity Ownership Service on top of Akka Distributed Data/Singleton -->
+    <module>eos-dom-akka</module>
 
     <!-- Yang Test Models for MD-SAL -->
     <module>sal-test-model</module>
     <!-- Clustering -->
     <module>sal-remoterpc-connector</module>
 
-    <!-- Message Bus -->
-    <module>messagebus-api</module>
-    <module>messagebus-spi</module>
-    <module>messagebus-impl</module>
-    <module>messagebus-util</module>
-
     <!-- PAX EXAM ITs -->
     <module>sal-binding-it</module>
-    <module>sal-binding-dom-it</module>
 
     <!-- IT Base and Parent -->
     <module>mdsal-it-base</module>
     <module>mdsal-it-parent</module>
-
-    <!-- Debug utils -->
-    <module>mdsal-trace</module>
-
   </modules>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-  <profiles>
-    <profile>
-      <id>benchmarks</id>
-      <activation>
-        <activeByDefault>false</activeByDefault>
-      </activation>
-      <modules>
-        <module>benchmark-data-store</module>
-      </modules>
-    </profile>
-  </profiles>
 </project>
index a62a3e043499517b24f964b9bbd5c638a016a5b7..295d0d0552a6fbad351d43e89deb126c85439f7d 100644 (file)
@@ -1,19 +1,29 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2014, 2018 Cisco Systems, Inc. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-akka-raft-example</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft</artifactId>
@@ -46,5 +56,4 @@
     <tag>HEAD</tag>
     <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
   </scm>
-
 </project>
index a2bcc8a724f68ec05f2c9c57440084fb8368dcb6..052a48940c2cbb8db823832565d8a006c0c07d51 100644 (file)
@@ -5,19 +5,17 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example;
 
 import akka.actor.ActorRef;
 import akka.actor.Props;
-import com.google.common.base.Optional;
-import com.google.common.base.Throwables;
 import com.google.common.io.ByteSource;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.Serializable;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
 import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
 import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
@@ -31,8 +29,8 @@ import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 import org.opendaylight.controller.cluster.raft.behaviors.Leader;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.util.AbstractStringIdentifier;
 
@@ -53,8 +51,8 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
     private long persistIdentifier = 1;
 
-    public ExampleActor(String id, Map<String, String> peerAddresses,
-        Optional<ConfigParams> configParams) {
+    public ExampleActor(final String id, final Map<String, String> peerAddresses,
+        final Optional<ConfigParams> configParams) {
         super(id, peerAddresses, configParams, (short)0);
         setPersistence(true);
         roleChangeNotifier = createRoleChangeNotifier(id);
@@ -66,14 +64,12 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     @Override
-    protected void handleNonRaftCommand(Object message) {
+    protected void handleNonRaftCommand(final Object message) {
         if (message instanceof KeyValue) {
             if (isLeader()) {
                 persistData(getSender(), new PayloadIdentifier(persistIdentifier++), (Payload) message, false);
-            } else {
-                if (getLeader() != null) {
-                    getLeader().forward(message, getContext());
-                }
+            } else if (getLeader() != null) {
+                getLeader().forward(message, getContext());
             }
 
         } else if (message instanceof PrintState) {
@@ -85,7 +81,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
         } else if (message instanceof PrintRole) {
             if (LOG.isDebugEnabled()) {
                 if (getRaftState() == RaftState.Leader || getRaftState() == RaftState.IsolatedLeader) {
-                    final String followers = ((Leader)this.getCurrentBehavior()).printFollowerStates();
+                    final String followers = ((Leader)getCurrentBehavior()).printFollowerStates();
                     LOG.debug("{} = {}, Peers={}, followers={}", getId(), getRaftState(),
                         getRaftActorContext().getPeerIds(), followers);
                 } else {
@@ -107,8 +103,8 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
             + ", im-mem journal size=" + getRaftActorContext().getReplicatedLog().size();
     }
 
-    public Optional<ActorRef> createRoleChangeNotifier(String actorId) {
-        ActorRef exampleRoleChangeNotifier = this.getContext().actorOf(
+    public Optional<ActorRef> createRoleChangeNotifier(final String actorId) {
+        ActorRef exampleRoleChangeNotifier = getContext().actorOf(
             RoleChangeNotifier.getProps(actorId), actorId + "-notifier");
         return Optional.<ActorRef>of(exampleRoleChangeNotifier);
     }
@@ -120,8 +116,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
     @Override
     protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
-        if (data instanceof KeyValue) {
-            KeyValue kv = (KeyValue) data;
+        if (data instanceof KeyValue kv) {
             state.put(kv.getKey(), kv.getValue());
             if (clientActor != null) {
                 clientActor.tell(new KeyValueSaved(), getSelf());
@@ -131,10 +126,10 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public void createSnapshot(ActorRef actorRef, java.util.Optional<OutputStream> installSnapshotStream) {
+    public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
         try {
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize((Serializable) state, installSnapshotStream.get());
+                SerializationUtils.serialize((Serializable) state, installSnapshotStream.orElseThrow());
             }
         } catch (RuntimeException e) {
             LOG.error("Exception in creating snapshot", e);
@@ -144,7 +139,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     @Override
-    public void applySnapshot(Snapshot.State snapshotState) {
+    public void applySnapshot(final Snapshot.State snapshotState) {
         state.clear();
         state.putAll(((MapState)snapshotState).state);
 
@@ -169,11 +164,11 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     @Override
-    public void startLogRecoveryBatch(int maxBatchSize) {
+    public void startLogRecoveryBatch(final int maxBatchSize) {
     }
 
     @Override
-    public void appendRecoveredLogEntry(Payload data) {
+    public void appendRecoveredLogEntry(final Payload data) {
     }
 
     @Override
@@ -185,7 +180,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     @Override
-    public void applyRecoverySnapshot(Snapshot.State snapshotState) {
+    public void applyRecoverySnapshot(final Snapshot.State snapshotState) {
     }
 
     @Override
@@ -200,11 +195,11 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
     @SuppressWarnings("unchecked")
     @Override
-    public Snapshot.State deserializeSnapshot(ByteSource snapshotBytes) {
+    public Snapshot.State deserializeSnapshot(final ByteSource snapshotBytes) {
         try {
             return new MapState((Map<String, String>) SerializationUtils.deserialize(snapshotBytes.read()));
         } catch (IOException e) {
-            throw Throwables.propagate(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -213,7 +208,7 @@ public class ExampleActor extends RaftActor implements RaftActorRecoveryCohort,
 
         Map<String, String> state;
 
-        MapState(Map<String, String> state) {
+        MapState(final Map<String, String> state) {
             this.state = state;
         }
     }
index 65d2109b30277f2a70efea3c4a3b219c8efa9b9f..312615671391e8db04eb33c0e0d51c452cf0202a 100644 (file)
@@ -19,7 +19,7 @@ public class ExampleConfigParamsImpl extends DefaultConfigParamsImpl {
     }
 
     @Override
-    public int getSnapshotChunkSize() {
+    public int getMaximumMessageSliceSize() {
         return 50;
     }
 }
index d3bf544ef94d60365b1ae6cfb4046b5168110755..6b4c17addc4b202cf262f6bc30999158e7d606f7 100644 (file)
@@ -5,12 +5,12 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example;
 
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
 import akka.actor.Props;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -47,7 +47,7 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
     private static final FiniteDuration SCHEDULER_DURATION = new FiniteDuration(1, TimeUnit.SECONDS);
     private static final String[] SHARDS_TO_MONITOR = new String[] {"example"};
 
-    public ExampleRoleChangeListener(String memberName) {
+    public ExampleRoleChangeListener(final String memberName) {
         scheduleRegistrationListener(SCHEDULER_DURATION);
         populateRegistry(memberName);
     }
@@ -57,7 +57,7 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
     }
 
     @Override
-    protected void handleReceive(Object message) {
+    protected void handleReceive(final Object message) {
         if (message instanceof RegisterListener) {
             // called by the scheduler at intervals to register any unregistered notifiers
             sendRegistrationRequests();
@@ -79,7 +79,7 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
         }
     }
 
-    private void scheduleRegistrationListener(FiniteDuration interval) {
+    private void scheduleRegistrationListener(final FiniteDuration interval) {
         LOG.debug("--->scheduleRegistrationListener called.");
         registrationSchedule = getContext().system().scheduler().schedule(
             interval, interval, getSelf(), new RegisterListener(),
@@ -87,7 +87,7 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
 
     }
 
-    private void populateRegistry(String memberName) {
+    private void populateRegistry(final String memberName) {
         String notifier = new StringBuilder().append(NOTIFIER_AKKA_URL).append(memberName)
                 .append("/").append(memberName).append("-notifier").toString();
 
@@ -101,6 +101,7 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
+    @SuppressFBWarnings("REC_CATCH_EXCEPTION")
     private void sendRegistrationRequests() {
         for (Map.Entry<String, Boolean> entry : notifierRegistrationStatus.entrySet()) {
             if (!entry.getValue()) {
@@ -112,13 +113,13 @@ public class ExampleRoleChangeListener extends AbstractUntypedActor implements A
                     notifier.tell(new RegisterRoleChangeListener(), getSelf());
 
                 } catch (Exception e) {
-                    LOG.error("ERROR!! Unable to send registration request to notifier {}", entry.getKey());
+                    LOG.error("ERROR!! Unable to send registration request to notifier {}", entry.getKey(), e);
                 }
             }
         }
     }
 
-    private void handleRegisterRoleChangeListenerReply(String senderId) {
+    private void handleRegisterRoleChangeListenerReply(final String senderId) {
         if (notifierRegistrationStatus.containsKey(senderId)) {
             notifierRegistrationStatus.put(senderId, true);
 
index 96712eef4159542dd30726a2e9389f26821a5cb9..9559f1cff422c75fc116ffbfa321338eda320175 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example;
 
 import akka.actor.ActorRef;
@@ -13,49 +12,53 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Created by kramesha on 7/16/14.
  */
 public class LogGenerator {
+    private static final Logger LOG = LoggerFactory.getLogger(LogGenerator.class);
+
     private final Map<ActorRef, LoggingThread> clientToLoggingThread = new HashMap<>();
 
-    public void startLoggingForClient(ActorRef client) {
+    public void startLoggingForClient(final ActorRef client) {
         LoggingThread lt = new LoggingThread(client);
         clientToLoggingThread.put(client, lt);
         new Thread(lt).start();
     }
 
-    public void stopLoggingForClient(ActorRef client) {
+    public void stopLoggingForClient(final ActorRef client) {
         clientToLoggingThread.get(client).stopLogging();
         clientToLoggingThread.remove(client);
     }
 
     public static class LoggingThread implements Runnable {
-
+        private final Random random = new Random();
         private final ActorRef clientActor;
+
         private volatile boolean stopLogging = false;
 
-        public LoggingThread(ActorRef clientActor) {
+        public LoggingThread(final ActorRef clientActor) {
             this.clientActor = clientActor;
         }
 
         @Override
-        @SuppressWarnings("checkstyle:RegexpSingleLineJava")
         public void run() {
-            Random random = new Random();
             while (true) {
                 if (stopLogging) {
-                    System.out.println("Logging stopped for client:" + clientActor.path());
+                    LOG.info("Logging stopped for client: {}", clientActor.path());
                     break;
                 }
                 String key = clientActor.path().name();
                 int randomInt = random.nextInt(100);
                 clientActor.tell(new KeyValue(key + "-key-" + randomInt, "value-" + randomInt), null);
+
                 try {
                     Thread.sleep(randomInt % 10 * 1000L);
                 } catch (InterruptedException e) {
-                    e.printStackTrace();
+                    LOG.info("Interrupted while sleeping", e);
                 }
             }
         }
index 54ff92993a9ad23e2236b26c2944a28c944f9ba3..871d3dfc2c9342b222b70972963913f3f92f7939 100644 (file)
@@ -5,22 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.PoisonPill;
-import com.google.common.base.Optional;
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
 import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
-import org.opendaylight.controller.cluster.raft.ConfigParams;
 
 public final class Main {
     private static final ActorSystem ACTOR_SYSTEM = ActorSystem.create();
@@ -37,25 +34,24 @@ public final class Main {
     }
 
     @SuppressWarnings("checkstyle:RegexpSingleLineJava")
-    public static void main(String[] args) throws Exception {
+    public static void main(final String[] args) throws Exception {
         ActorRef example1Actor =
             ACTOR_SYSTEM.actorOf(ExampleActor.props("example-1",
-                withoutPeer("example-1"), Optional.<ConfigParams>absent()), "example-1");
+                withoutPeer("example-1"), Optional.empty()), "example-1");
 
         ActorRef example2Actor =
             ACTOR_SYSTEM.actorOf(ExampleActor.props("example-2",
-                withoutPeer("example-2"), Optional.<ConfigParams>absent()), "example-2");
+                withoutPeer("example-2"), Optional.empty()), "example-2");
 
         ActorRef example3Actor =
             ACTOR_SYSTEM.actorOf(ExampleActor.props("example-3",
-                withoutPeer("example-3"), Optional.<ConfigParams>absent()), "example-3");
+                withoutPeer("example-3"), Optional.empty()), "example-3");
 
 
-        List<ActorRef> examples = Arrays.asList(example1Actor, example2Actor, example3Actor);
+        final var examples = Arrays.asList(example1Actor, example2Actor, example3Actor);
 
-        ActorRef clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor));
-        BufferedReader br =
-            new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
+        final var clientActor = ACTOR_SYSTEM.actorOf(ClientActor.props(example1Actor));
+        final var br = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
 
         System.out.println("Usage :");
         System.out.println("s <1-3> to start a peer");
@@ -82,8 +78,7 @@ public final class Main {
                         String actorName = "example-" + num;
                         examples.add(num - 1,
                             ACTOR_SYSTEM.actorOf(ExampleActor.props(actorName,
-                                withoutPeer(actorName), Optional.<ConfigParams>absent()),
-                                actorName));
+                                withoutPeer(actorName), Optional.empty()), actorName));
                         System.out.println("Created actor : " + actorName);
                         continue;
                     }
@@ -100,7 +95,7 @@ public final class Main {
         }
     }
 
-    private static Map<String, String> withoutPeer(String peerId) {
+    private static Map<String, String> withoutPeer(final String peerId) {
         Map<String, String> without = new HashMap<>(allPeers);
         without.remove(peerId);
         return without;
index 8f414c7a6b2dd636818463fb29d8280b858835df..b8be5d368012d53721c2e981ac5a5c4f6e2b0352 100644 (file)
@@ -5,12 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
-import com.google.common.base.Optional;
 import com.google.common.collect.Lists;
 import com.typesafe.config.ConfigFactory;
 import java.io.BufferedReader;
@@ -19,6 +17,7 @@ import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 import org.opendaylight.controller.cluster.example.messages.PrintRole;
 import org.opendaylight.controller.cluster.example.messages.PrintState;
@@ -63,7 +62,7 @@ public class TestDriver {
      *  AbstractUptypedActor and AbstractUptypedPersistentActor would need to be commented out.
      *  Also RaftActor handleCommand(), debug log which prints for every command other than AE/AER
      */
-    public static void main(String[] args) throws Exception {
+    public static void main(final String[] args) throws Exception {
 
         actorSystem = ActorSystem.create("raft-test", ConfigFactory
             .load().getConfig("raft-test"));
@@ -135,7 +134,7 @@ public class TestDriver {
     }
 
     // create the listener using a separate actor system for each example actor
-    private static void createClusterRoleChangeListener(List<String> memberIds) {
+    private static void createClusterRoleChangeListener(final List<String> memberIds) {
         System.out.println("memberIds=" + memberIds);
         for (String memberId : memberIds) {
             ActorRef listenerActor = listenerActorSystem.actorOf(
@@ -144,12 +143,12 @@ public class TestDriver {
         }
     }
 
-    public static ActorRef createExampleActor(String name) {
+    public static ActorRef createExampleActor(final String name) {
         return actorSystem.actorOf(ExampleActor.props(name, withoutPeer(name),
             Optional.of(configParams)), name);
     }
 
-    public void createNodes(int num) {
+    public void createNodes(final int num) {
         for (int i = 0; i < num; i++)  {
             nameCounter = nameCounter + 1;
             allPeers.put("example-" + nameCounter, "akka://raft-test/user/example-" + nameCounter);
@@ -165,7 +164,7 @@ public class TestDriver {
     }
 
     // add num clients to all nodes in the system
-    public void addClients(int num) {
+    public void addClients(final int num) {
         for (Map.Entry<String, ActorRef> actorRefEntry : actorRefs.entrySet()) {
             for (int i = 0; i < num; i++) {
                 String clientName = "client-" + i + "-" + actorRefEntry.getKey();
@@ -178,7 +177,7 @@ public class TestDriver {
     }
 
     // add num clients to a node
-    public void addClientsToNode(String actorName, int num) {
+    public void addClientsToNode(final String actorName, final int num) {
         ActorRef actorRef = actorRefs.get(actorName);
         for (int i = 0; i < num; i++) {
             String clientName = "client-" + i + "-" + actorName;
@@ -188,7 +187,7 @@ public class TestDriver {
         }
     }
 
-    public void stopNode(String actorName) {
+    public void stopNode(final String actorName) {
         ActorRef actorRef = actorRefs.get(actorName);
 
         for (Map.Entry<String,ActorRef> entry : clientActorRefs.entrySet()) {
@@ -202,7 +201,7 @@ public class TestDriver {
         allPeers.remove(actorName);
     }
 
-    public void reinstateNode(String actorName) {
+    public void reinstateNode(final String actorName) {
         String address = "akka://default/user/" + actorName;
         allPeers.put(actorName, address);
 
@@ -225,7 +224,7 @@ public class TestDriver {
         }
     }
 
-    public void startLoggingForClient(ActorRef client) {
+    public void startLoggingForClient(final ActorRef client) {
         logGenerator.startLoggingForClient(client);
     }
 
@@ -235,7 +234,7 @@ public class TestDriver {
         }
     }
 
-    public void stopLoggingForClient(ActorRef client) {
+    public void stopLoggingForClient(final ActorRef client) {
         logGenerator.stopLoggingForClient(client);
     }
 
@@ -256,7 +255,7 @@ public class TestDriver {
     }
 
 
-    private static Map<String, String> withoutPeer(String peerId) {
+    private static Map<String, String> withoutPeer(final String peerId) {
         Map<String, String> without = new ConcurrentHashMap<>(allPeers);
         without.remove(peerId);
 
diff --git a/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java b/opendaylight/md-sal/sal-akka-raft-example/src/main/java/org/opendaylight/controller/cluster/example/messages/KVv1.java
new file mode 100644 (file)
index 0000000..7721a8b
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.example.messages;
+
+import java.io.Serializable;
+
+final class KVv1 implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final String key;
+    private final String value;
+
+    KVv1(String key, String value) {
+        this.key = key;
+        this.value = value;
+    }
+
+    Object readResolve() {
+        return new KeyValue(key, value);
+    }
+}
index 520188b8cd4dd91ecc8a4649ab5e3dab42744443..78eea5cd862a8f26dd7d1b73df156266f58fe8e3 100644 (file)
@@ -5,21 +5,20 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.example.messages;
 
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
-public class KeyValue extends Payload implements Serializable {
+public final class KeyValue extends Payload {
     private static final long serialVersionUID = 1L;
+
     private String key;
     private String value;
 
     public KeyValue() {
     }
 
-    public KeyValue(String key, String value) {
+    public KeyValue(final String key, final String value) {
         this.key = key;
         this.value = value;
     }
@@ -32,12 +31,15 @@ public class KeyValue extends Payload implements Serializable {
         return value;
     }
 
-    public void setKey(String key) {
-        this.key = key;
+    @Override
+    public int size() {
+        return value.length() + key.length();
     }
 
-    public void setValue(String value) {
-        this.value = value;
+    @Override
+    public int serializedSize() {
+        // Should be a better estimate
+        return size();
     }
 
     @Override
@@ -46,8 +48,7 @@ public class KeyValue extends Payload implements Serializable {
     }
 
     @Override
-    public int size() {
-        return this.value.length() + this.key.length();
+    protected Object writeReplace() {
+        return new KVv1(value, key);
     }
-
 }
index 52a8c1548bf65a37e5b63fa1322b0da17abf5203..306e7561a567c488017a62a3928708599b7970c7 100644 (file)
@@ -4,71 +4,64 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-akka-raft</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-clustering-commons</artifactId>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
-
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-actor_2.12</artifactId>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-cluster_2.12</artifactId>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence_2.12</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-remote_2.12</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.12</artifactId>
-      <scope>test</scope>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-clustering-commons</artifactId>
     </dependency>
-
     <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>cds-mgmt-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>repackaged-akka</artifactId>
     </dependency>
-
     <dependency>
       <groupId>org.scala-lang</groupId>
       <artifactId>scala-library</artifactId>
     </dependency>
-
     <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-lang3</artifactId>
     </dependency>
 
+    <!-- Test Dependencies -->
     <dependency>
       <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-slf4j_2.12</artifactId>
+      <artifactId>akka-testkit_2.13</artifactId>
     </dependency>
-
-    <!-- Test Dependencies -->
     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
+      <groupId>org.awaitility</groupId>
+      <artifactId>awaitility</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
       <artifactId>commons-io</artifactId>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
 
   <build>
     <plugins>
-
       <plugin>
         <groupId>org.apache.felix</groupId>
         <artifactId>maven-bundle-plugin</artifactId>
           </instructions>
         </configuration>
       </plugin>
-
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
           <execution>
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <artifactId>maven-source-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar-no-fork</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 
     <tag>HEAD</tag>
     <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
   </scm>
-
 </project>
index 86ba5ecc97aee9e7bbef8c103c08dcac6e5da36f..53d317fba1148d7c10093d443d4e0ea7a80dfbeb 100644 (file)
@@ -12,6 +12,7 @@ import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,23 +37,23 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     private long previousSnapshotTerm = -1;
     private int dataSize = 0;
 
-    protected AbstractReplicatedLogImpl(long snapshotIndex, long snapshotTerm,
-            List<ReplicatedLogEntry> unAppliedEntries, String logContext) {
+    protected AbstractReplicatedLogImpl(final long snapshotIndex, final long snapshotTerm,
+            final List<ReplicatedLogEntry> unAppliedEntries, final String logContext) {
         this.snapshotIndex = snapshotIndex;
         this.snapshotTerm = snapshotTerm;
         this.logContext = logContext;
 
-        this.journal = new ArrayList<>(unAppliedEntries.size());
+        journal = new ArrayList<>(unAppliedEntries.size());
         for (ReplicatedLogEntry entry: unAppliedEntries) {
             append(entry);
         }
     }
 
     protected AbstractReplicatedLogImpl() {
-        this(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), "");
+        this(-1L, -1L, Collections.emptyList(), "");
     }
 
-    protected int adjustedIndex(long logEntryIndex) {
+    protected int adjustedIndex(final long logEntryIndex) {
         if (snapshotIndex < 0) {
             return (int) logEntryIndex;
         }
@@ -60,7 +61,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public ReplicatedLogEntry get(long logEntryIndex) {
+    public ReplicatedLogEntry get(final long logEntryIndex) {
         int adjustedIndex = adjustedIndex(logEntryIndex);
 
         if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
@@ -101,7 +102,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public long removeFrom(long logEntryIndex) {
+    public long removeFrom(final long logEntryIndex) {
         int adjustedIndex = adjustedIndex(logEntryIndex);
         if (adjustedIndex < 0 || adjustedIndex >= journal.size()) {
             // physical index should be less than list size and >= 0
@@ -118,7 +119,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public boolean append(ReplicatedLogEntry replicatedLogEntry) {
+    public boolean append(final ReplicatedLogEntry replicatedLogEntry) {
         if (replicatedLogEntry.getIndex() > lastIndex()) {
             journal.add(replicatedLogEntry);
             dataSize += replicatedLogEntry.size();
@@ -131,17 +132,17 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public void increaseJournalLogCapacity(int amount) {
+    public void increaseJournalLogCapacity(final int amount) {
         journal.ensureCapacity(journal.size() + amount);
     }
 
     @Override
-    public List<ReplicatedLogEntry> getFrom(long logEntryIndex) {
+    public List<ReplicatedLogEntry> getFrom(final long logEntryIndex) {
         return getFrom(logEntryIndex, journal.size(), NO_MAX_SIZE);
     }
 
     @Override
-    public List<ReplicatedLogEntry> getFrom(long logEntryIndex, int maxEntries, long maxDataSize) {
+    public List<ReplicatedLogEntry> getFrom(final long logEntryIndex, final int maxEntries, final long maxDataSize) {
         int adjustedIndex = adjustedIndex(logEntryIndex);
         int size = journal.size();
         if (adjustedIndex >= 0 && adjustedIndex < size) {
@@ -161,12 +162,13 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
         }
     }
 
-    private List<ReplicatedLogEntry> copyJournalEntries(int fromIndex, int toIndex, long maxDataSize) {
+    private @NonNull List<ReplicatedLogEntry> copyJournalEntries(final int fromIndex, final int toIndex,
+            final long maxDataSize) {
         List<ReplicatedLogEntry> retList = new ArrayList<>(toIndex - fromIndex);
         long totalSize = 0;
         for (int i = fromIndex; i < toIndex; i++) {
             ReplicatedLogEntry entry = journal.get(i);
-            totalSize += entry.size();
+            totalSize += entry.serializedSize();
             if (totalSize <= maxDataSize) {
                 retList.add(entry);
             } else {
@@ -194,7 +196,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public boolean isPresent(long logEntryIndex) {
+    public boolean isPresent(final long logEntryIndex) {
         if (logEntryIndex > lastIndex()) {
             // if the request logical index is less than the last present in the list
             return false;
@@ -204,7 +206,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public boolean isInSnapshot(long logEntryIndex) {
+    public boolean isInSnapshot(final long logEntryIndex) {
         return logEntryIndex >= 0 && logEntryIndex <= snapshotIndex && snapshotIndex != -1;
     }
 
@@ -219,22 +221,22 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public void setSnapshotIndex(long snapshotIndex) {
+    public void setSnapshotIndex(final long snapshotIndex) {
         this.snapshotIndex = snapshotIndex;
     }
 
     @Override
-    public void setSnapshotTerm(long snapshotTerm) {
+    public void setSnapshotTerm(final long snapshotTerm) {
         this.snapshotTerm = snapshotTerm;
     }
 
     @Override
-    public void clear(int startIndex, int endIndex) {
+    public void clear(final int startIndex, final int endIndex) {
         journal.subList(startIndex, endIndex).clear();
     }
 
     @Override
-    public void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm) {
+    public void snapshotPreCommit(final long snapshotCapturedIndex, final long snapshotCapturedTerm) {
         Preconditions.checkArgument(snapshotCapturedIndex >= snapshotIndex,
                 "snapshotCapturedIndex must be greater than or equal to snapshotIndex");
 
@@ -254,16 +256,20 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @Override
-    public void snapshotCommit() {
+    public void snapshotCommit(final boolean updateDataSize) {
         snapshottedJournal = null;
         previousSnapshotIndex = -1;
         previousSnapshotTerm = -1;
-        dataSize = 0;
-        // need to recalc the datasize based on the entries left after precommit.
-        for (ReplicatedLogEntry logEntry : journal) {
-            dataSize += logEntry.size();
-        }
 
+        if (updateDataSize) {
+            // need to recalc the datasize based on the entries left after precommit.
+            int newDataSize = 0;
+            for (ReplicatedLogEntry logEntry : journal) {
+                newDataSize += logEntry.size();
+            }
+            LOG.trace("{}: Updated dataSize from {} to {}", logContext, dataSize, newDataSize);
+            dataSize = newDataSize;
+        }
     }
 
     @Override
@@ -280,7 +286,7 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     }
 
     @VisibleForTesting
-    ReplicatedLogEntry getAtPhysicalIndex(int index) {
+    ReplicatedLogEntry getAtPhysicalIndex(final int index) {
         return journal.get(index);
     }
 }
index 0f14844d5666e5ee6f73f3d7cb005b607c41b3a7..c69decdd14d8578dcba92d608ca55018a8b34b21 100644 (file)
@@ -5,33 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft;
 
 import akka.actor.ActorRef;
 import org.opendaylight.yangtools.concepts.Identifier;
 
-public interface ClientRequestTracker {
-    /**
-     * Returns the client actor that should be sent a response when consensus is achieved.
-     *
-     * @return the client actor
-     */
-    ActorRef getClientActor();
-
-    /**
-     * Returns the identifier of the object that is to be replicated. For example a transaction identifier in the case
-     * of a transaction.
-     *
-     * @return the identifier
-     */
-    Identifier getIdentifier();
-
-    /**
-     * Returns the index of the log entry that is to be replicated.
-     *
-     * @return the index
-     */
-    long getIndex();
+/**
+ * Consensus forwarding tracker.
+ *
+ * @param clientActor the client actor that should be sent a response when consensus is achieved
+ * @param identifier the identifier of the object that is to be replicated. For example a transaction identifier in the
+ *        case of a transaction
+ * @param logIndex the index of the log entry that is to be replicated
+ */
+public record ClientRequestTracker(long logIndex, ActorRef clientActor, Identifier identifier) {
 
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ClientRequestTrackerImpl.java
deleted file mode 100644 (file)
index 6ffb922..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft;
-
-import akka.actor.ActorRef;
-import org.opendaylight.yangtools.concepts.Identifier;
-
-public class ClientRequestTrackerImpl implements ClientRequestTracker {
-
-    private final ActorRef clientActor;
-    private final Identifier identifier;
-    private final long logIndex;
-
-    public ClientRequestTrackerImpl(ActorRef clientActor, Identifier identifier, long logIndex) {
-
-        this.clientActor = clientActor;
-
-        this.identifier = identifier;
-
-        this.logIndex = logIndex;
-    }
-
-    @Override
-    public ActorRef getClientActor() {
-        return clientActor;
-    }
-
-    @Override
-    public long getIndex() {
-        return logIndex;
-    }
-
-    @Override
-    public Identifier getIdentifier() {
-        return identifier;
-    }
-}
index 070218e92e46448a74f82fd2416c1458b2f320c5..7f98295a300c6861e78fe383713801aada7d62ca 100644 (file)
@@ -12,14 +12,9 @@ import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * Configuration Parameter interface for configuring the Raft consensus system
- *
- * <p>
- * Any component using this implementation might want to provide an implementation of
- * this interface to configure
- *
- * <p>
- * A default implementation will be used if none is provided.
+ * Configuration Parameter interface for configuring the Raft consensus system. Any component using this implementation
+ * might want to provide an implementation of this interface to configure. A default implementation will be used if none
+ * is provided.
  *
  * @author Kamal Rameshan
  */
@@ -35,11 +30,28 @@ public interface ConfigParams {
 
     /**
      * Returns the percentage of total memory used in the in-memory Raft log before a snapshot should be taken.
+     * Disabled when direct threshold is enabled.
      *
      * @return the percentage.
      */
     int getSnapshotDataThresholdPercentage();
 
+    /**
+     * Returns the max size of memory used in the in-memory Raft log before a snapshot should be taken. 0 means that
+     * direct threshold is disabled and percentage is used instead.
+     *
+     * @return maximum journal size (in MiB).
+     */
+    int getSnapshotDataThreshold();
+
+    /**
+     * Returns the interval(in seconds) after which a snapshot should be taken during recovery. Negative value means
+     * do not take snapshots.
+     *
+     * @return the interval of recovery snapshot in seconds
+     */
+    int getRecoverySnapshotIntervalSeconds();
+
     /**
      * Returns the interval at which a heart beat message should be sent to remote followers.
      *
@@ -75,7 +87,7 @@ public interface ConfigParams {
      *
      * @return the maximum size (in bytes).
      */
-    int getSnapshotChunkSize();
+    int getMaximumMessageSliceSize();
 
     /**
      * Returns the maximum number of journal log entries to batch on recovery before applying.
@@ -91,7 +103,6 @@ public interface ConfigParams {
      */
     long getIsolatedCheckIntervalInMillis();
 
-
     /**
      * Returns the multiplication factor to be used to determine the shard election timeout. The election timeout
      * is determined by multiplying the election timeout factor with the heart beat duration.
@@ -100,7 +111,6 @@ public interface ConfigParams {
      */
     long getElectionTimeoutFactor();
 
-
     /**
      * Returns the RaftPolicy used to determine certain Raft behaviors.
      *
index 4185754da462651e8c121f5b6d3cf312f5f5ace6..c83f90ec430e9d4ec0228b53e65b4cd9d9d726b4 100644 (file)
@@ -11,9 +11,9 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.Strings;
-import com.google.common.base.Supplier;
 import com.google.common.base.Suppliers;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.raft.policy.DefaultRaftPolicy;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
@@ -29,6 +29,10 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     private static final Logger LOG = LoggerFactory.getLogger(DefaultConfigParamsImpl.class);
 
     private static final int SNAPSHOT_BATCH_COUNT = 20000;
+    /**
+     * Interval after which a snapshot should be taken during the recovery process. 0 if never.
+     */
+    private static final int RECOVERY_SNAPSHOT_INTERVAL_SECONDS = 0;
 
     private static final int JOURNAL_RECOVERY_LOG_BATCH_SIZE = 1000;
 
@@ -37,7 +41,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
      */
     private static final int ELECTION_TIME_MAX_VARIANCE = 100;
 
-    private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+    private static final int MAXIMUM_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
 
 
     /**
@@ -56,6 +60,7 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
     private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
     private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
+    private int recoverySnapshotIntervalSeconds = RECOVERY_SNAPSHOT_INTERVAL_SECONDS;
     private long isolatedLeaderCheckInterval = HEART_BEAT_INTERVAL.$times(1000).toMillis();
     private FiniteDuration electionTimeOutInterval;
 
@@ -63,7 +68,11 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     // in-memory journal can use before it needs to snapshot
     private int snapshotDataThresholdPercentage = 12;
 
-    private int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+    // max size of in-memory journal in MB
+    // 0 means direct threshold if disabled
+    private int snapshotDataThreshold = 0;
+
+    private int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
 
     private long electionTimeoutFactor = 2;
     private long candidateElectionTimeoutDivisor = 1;
@@ -86,12 +95,21 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         this.snapshotBatchCount = snapshotBatchCount;
     }
 
+    public void setRecoverySnapshotIntervalSeconds(final int recoverySnapshotInterval) {
+        checkArgument(recoverySnapshotInterval >= 0);
+        recoverySnapshotIntervalSeconds = recoverySnapshotInterval;
+    }
+
     public void setSnapshotDataThresholdPercentage(final int snapshotDataThresholdPercentage) {
         this.snapshotDataThresholdPercentage = snapshotDataThresholdPercentage;
     }
 
-    public void setSnapshotChunkSize(final int snapshotChunkSize) {
-        this.snapshotChunkSize = snapshotChunkSize;
+    public void setSnapshotDataThreshold(final int snapshotDataThreshold) {
+        this.snapshotDataThreshold = snapshotDataThreshold;
+    }
+
+    public void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
+        this.maximumMessageSliceSize = maximumMessageSliceSize;
     }
 
     public void setJournalRecoveryLogBatchSize(final int journalRecoveryLogBatchSize) {
@@ -138,6 +156,16 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         return snapshotDataThresholdPercentage;
     }
 
+    @Override
+    public int getSnapshotDataThreshold() {
+        return snapshotDataThreshold;
+    }
+
+    @Override
+    public int getRecoverySnapshotIntervalSeconds() {
+        return recoverySnapshotIntervalSeconds;
+    }
+
     @Override
     public FiniteDuration getHeartBeatInterval() {
         return heartBeatInterval;
@@ -163,8 +191,8 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     }
 
     @Override
-    public int getSnapshotChunkSize() {
-        return snapshotChunkSize;
+    public int getMaximumMessageSliceSize() {
+        return maximumMessageSliceSize;
     }
 
     @Override
index a5f24990f6a54b4542bcc67dee35f2cdd4c56467..f5c94fbf4cb26aaf8d61f42f7804b9d660d664fd 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkState;
 import static java.util.Objects.requireNonNull;
 
@@ -36,15 +37,14 @@ public final class FollowerLogInformation {
 
     private long lastReplicatedIndex = -1L;
 
+    private long sentCommitIndex = -1L;
+
     private final Stopwatch lastReplicatedStopwatch = Stopwatch.createUnstarted();
 
     private short payloadVersion = -1;
 
-    // Assume the HELIUM_VERSION version initially for backwards compatibility until we obtain the follower's
-    // actual version via AppendEntriesReply. Although we no longer support the Helium version, a pre-Boron
-    // follower will not have the version field in AppendEntriesReply so it will be set to 0 which is
-    // HELIUM_VERSION.
-    private short raftVersion = RaftVersions.HELIUM_VERSION;
+    // Assume the FLUORINE_VERSION version initially, as we no longer support pre-Fluorine versions.
+    private short raftVersion = RaftVersions.FLUORINE_VERSION;
 
     private final PeerInfo peerInfo;
 
@@ -63,7 +63,7 @@ public final class FollowerLogInformation {
      */
     @VisibleForTesting
     FollowerLogInformation(final PeerInfo peerInfo, final long matchIndex, final RaftActorContext context) {
-        this.nextIndex = context.getCommitIndex();
+        nextIndex = context.getCommitIndex();
         this.matchIndex = matchIndex;
         this.context = context;
         this.peerInfo = requireNonNull(peerInfo);
@@ -236,15 +236,18 @@ public final class FollowerLogInformation {
      * sending duplicate message too frequently if the last replicate message was sent and no reply has been received
      * yet within the current heart beat interval
      *
+     * @param commitIndex current commitIndex
      * @return true if it is OK to replicate, false otherwise
      */
-    public boolean okToReplicate() {
+    public boolean okToReplicate(final long commitIndex) {
         if (peerInfo.getVotingState() == VotingState.VOTING_NOT_INITIALIZED) {
             return false;
         }
 
-        // Return false if we are trying to send duplicate data before the heartbeat interval
-        if (getNextIndex() == lastReplicatedIndex && lastReplicatedStopwatch.elapsed(TimeUnit.MILLISECONDS)
+        // Return false if we are trying to send duplicate data before the heartbeat interval. This check includes
+        // also our commitIndex, as followers need to be told of new commitIndex as soon as possible.
+        if (getNextIndex() == lastReplicatedIndex && !hasStaleCommitIndex(commitIndex)
+                && lastReplicatedStopwatch.elapsed(TimeUnit.MILLISECONDS)
                 < context.getConfigParams().getHeartBeatInterval().toMillis()) {
             return false;
         }
@@ -294,6 +297,7 @@ public final class FollowerLogInformation {
      * @param raftVersion the raft version.
      */
     public void setRaftVersion(final short raftVersion) {
+        checkArgument(raftVersion >= RaftVersions.FLUORINE_VERSION, "Unexpected version %s", raftVersion);
         this.raftVersion = raftVersion;
     }
 
@@ -312,8 +316,8 @@ public final class FollowerLogInformation {
      * @param state the LeaderInstallSnapshotState
      */
     public void setLeaderInstallSnapshotState(final @NonNull LeaderInstallSnapshotState state) {
-        if (this.installSnapshotState == null) {
-            this.installSnapshotState = requireNonNull(state);
+        if (installSnapshotState == null) {
+            installSnapshotState = requireNonNull(state);
         }
     }
 
@@ -345,19 +349,28 @@ public final class FollowerLogInformation {
         return slicedLogEntryIndex != NO_INDEX;
     }
 
-    public void setNeedsLeaderAddress(boolean value) {
+    public void setNeedsLeaderAddress(final boolean value) {
         needsLeaderAddress = value;
     }
 
-    public @Nullable String needsLeaderAddress(String leaderId) {
+    public @Nullable String needsLeaderAddress(final String leaderId) {
         return needsLeaderAddress ? context.getPeerAddress(leaderId) : null;
     }
 
+    public boolean hasStaleCommitIndex(final long commitIndex) {
+        return sentCommitIndex != commitIndex;
+    }
+
+    public void setSentCommitIndex(final long commitIndex) {
+        sentCommitIndex = commitIndex;
+    }
+
     @Override
     public String toString() {
         return "FollowerLogInformation [id=" + getId() + ", nextIndex=" + nextIndex + ", matchIndex=" + matchIndex
-                + ", lastReplicatedIndex=" + lastReplicatedIndex + ", votingState=" + peerInfo.getVotingState()
-                + ", stopwatch=" + stopwatch.elapsed(TimeUnit.MILLISECONDS) + ", followerTimeoutMillis="
-                + context.getConfigParams().getElectionTimeOutInterval().toMillis() + "]";
+                + ", lastReplicatedIndex=" + lastReplicatedIndex + ", commitIndex=" + sentCommitIndex
+                + ", votingState=" + peerInfo.getVotingState()
+                + ", stopwatch=" + stopwatch.elapsed(TimeUnit.MILLISECONDS)
+                + ", followerTimeoutMillis=" + context.getConfigParams().getElectionTimeOutInterval().toMillis() + "]";
     }
 }
index a533168bb21d35631f6384dd93fa278f2ae22da8..6febb902517d08d21076b6f02fc8003b214bbaba 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.actor.ReceiveTimeout;
 import akka.actor.UntypedAbstractActor;
-import com.google.common.base.Preconditions;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
@@ -28,7 +29,7 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Thomas Pantelis
  */
-class GetSnapshotReplyActor extends UntypedAbstractActor {
+final class GetSnapshotReplyActor extends UntypedAbstractActor {
     private static final Logger LOG = LoggerFactory.getLogger(GetSnapshotReplyActor.class);
 
     private final Params params;
@@ -82,12 +83,12 @@ class GetSnapshotReplyActor extends UntypedAbstractActor {
 
         Params(final CaptureSnapshot captureSnapshot, final ElectionTerm electionTerm, final ActorRef replyToActor,
                 final FiniteDuration receiveTimeout, final String id, final ServerConfigurationPayload peerInfo) {
-            this.captureSnapshot = Preconditions.checkNotNull(captureSnapshot);
-            this.electionTerm = Preconditions.checkNotNull(electionTerm);
-            this.replyToActor = Preconditions.checkNotNull(replyToActor);
-            this.receiveTimeout = Preconditions.checkNotNull(receiveTimeout);
-            this.id = Preconditions.checkNotNull(id);
-            this.peerInformation = peerInfo;
+            this.captureSnapshot = requireNonNull(captureSnapshot);
+            this.electionTerm = requireNonNull(electionTerm);
+            this.replyToActor = requireNonNull(replyToActor);
+            this.receiveTimeout = requireNonNull(receiveTimeout);
+            this.id = requireNonNull(id);
+            peerInformation = peerInfo;
         }
     }
 }
old mode 100755 (executable)
new mode 100644 (file)
index 775ab0c..d71d879
@@ -15,14 +15,15 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.actor.Status;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.collect.Lists;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.eclipse.jdt.annotation.NonNull;
@@ -32,6 +33,7 @@ import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
 import org.opendaylight.controller.cluster.NonPersistentDataProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
 import org.opendaylight.controller.cluster.notifications.RoleChanged;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -46,16 +48,15 @@ import org.opendaylight.controller.cluster.raft.behaviors.Follower;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.NoopPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.concepts.Immutable;
 
@@ -100,8 +101,7 @@ import org.opendaylight.yangtools.concepts.Immutable;
  * </ul>
  */
 public abstract class RaftActor extends AbstractUntypedPersistentActor {
-
-    private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
+    private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50);
 
     /**
      * This context should NOT be passed directly to any other actor it is
@@ -123,17 +123,17 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
     private boolean shuttingDown;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected RaftActor(final String id, final Map<String, String> peerAddresses,
          final Optional<ConfigParams> configParams, final short payloadVersion) {
 
         persistentProvider = new PersistentDataProvider(this);
         delegatingPersistenceProvider = new RaftActorDelegatingPersistentDataProvider(null, persistentProvider);
 
-        context = new RaftActorContextImpl(this.getSelf(),
-            this.getContext(), id, new ElectionTermImpl(persistentProvider, id, LOG),
-            -1, -1, peerAddresses,
-            configParams.isPresent() ? configParams.get() : new DefaultConfigParamsImpl(),
-            delegatingPersistenceProvider, this::handleApplyState, LOG);
+        context = new RaftActorContextImpl(getSelf(), getContext(), id,
+            new ElectionTermImpl(persistentProvider, id, LOG), -1, -1, peerAddresses,
+            configParams.isPresent() ? configParams.orElseThrow() : new DefaultConfigParamsImpl(),
+            delegatingPersistenceProvider, this::handleApplyState, LOG, this::executeInSelf);
 
         context.setPayloadVersion(payloadVersion);
         context.setReplicatedLog(ReplicatedLogImpl.newInstance(context));
@@ -151,7 +151,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     }
 
     @Override
-    public void postStop() {
+    public void postStop() throws Exception {
         context.close();
         super.postStop();
     }
@@ -213,7 +213,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
      * Handles a message.
      *
      * @deprecated This method is not final for testing purposes. DO NOT OVERRIDE IT, override
-     * {@link #handleNonRaftCommand(Object)} instead.
+     *             {@link #handleNonRaftCommand(Object)} instead.
      */
     @Deprecated
     @Override
@@ -225,9 +225,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         if (snapshotSupport.handleSnapshotMessage(message, getSender())) {
             return;
         }
-        if (message instanceof ApplyState) {
-            ApplyState applyState = (ApplyState) message;
-
+        if (message instanceof ApplyState applyState) {
             if (!hasFollowers()) {
                 // for single node, the capture should happen after the apply state
                 // as we delete messages from the persistent journal which have made it to the snapshot
@@ -239,35 +237,38 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             }
 
             possiblyHandleBehaviorMessage(message);
-        } else if (message instanceof ApplyJournalEntries) {
-            ApplyJournalEntries applyEntries = (ApplyJournalEntries) message;
+        } else if (message instanceof ApplyJournalEntries applyEntries) {
             LOG.debug("{}: Persisting ApplyJournalEntries with index={}", persistenceId(), applyEntries.getToIndex());
 
             persistence().persistAsync(applyEntries, NoopProcedure.instance());
-
         } else if (message instanceof FindLeader) {
-            getSender().tell(
-                new FindLeaderReply(getLeaderAddress()),
-                getSelf()
-            );
+            getSender().tell(new FindLeaderReply(getLeaderAddress()), getSelf());
         } else if (message instanceof GetOnDemandRaftState) {
             onGetOnDemandRaftStats();
         } else if (message instanceof InitiateCaptureSnapshot) {
             captureSnapshot();
-        } else if (message instanceof SwitchBehavior) {
-            switchBehavior((SwitchBehavior) message);
-        } else if (message instanceof LeaderTransitioning) {
-            onLeaderTransitioning((LeaderTransitioning)message);
+        } else if (message instanceof SwitchBehavior switchBehavior) {
+            switchBehavior(switchBehavior);
+        } else if (message instanceof LeaderTransitioning leaderTransitioning) {
+            onLeaderTransitioning(leaderTransitioning);
         } else if (message instanceof Shutdown) {
             onShutDown();
-        } else if (message instanceof Runnable) {
-            ((Runnable)message).run();
-        } else if (message instanceof NoopPayload) {
-            persistData(null, null, (NoopPayload) message, false);
-        } else if (message instanceof RequestLeadership) {
-            onRequestLeadership((RequestLeadership) message);
+        } else if (message instanceof Runnable runnable) {
+            runnable.run();
+        } else if (message instanceof NoopPayload noopPayload) {
+            persistData(null, null, noopPayload, false);
+        } else if (message instanceof RequestLeadership requestLeadership) {
+            onRequestLeadership(requestLeadership);
         } else if (!possiblyHandleBehaviorMessage(message)) {
-            handleNonRaftCommand(message);
+            if (message instanceof JournalProtocol.Response response
+                && delegatingPersistenceProvider.handleJournalResponse(response)) {
+                LOG.debug("{}: handled a journal response", persistenceId());
+            } else if (message instanceof SnapshotProtocol.Response response
+                && delegatingPersistenceProvider.handleSnapshotResponse(response)) {
+                LOG.debug("{}: handled a snapshot response", persistenceId());
+            } else {
+                handleNonRaftCommand(message);
+            }
         }
     }
 
@@ -412,7 +413,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         Optional<ActorRef> roleChangeNotifier = getRoleChangeNotifier();
         if (getRaftState() == RaftState.Follower && roleChangeNotifier.isPresent()
                 && leaderTransitioning.getLeaderId().equals(getCurrentBehavior().getLeaderId())) {
-            roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), null,
+            roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), null,
                 getCurrentBehavior().getLeaderPayloadVersion()), getSelf());
         }
     }
@@ -451,7 +452,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
 
         final RaftActorBehavior currentBehavior = context.getCurrentBehavior();
-        OnDemandRaftState.AbstractBuilder<?, ?> builder = newOnDemandRaftStateBuilder()
+        final var builder = newOnDemandRaftStateBuilder()
                 .commitIndex(context.getCommitIndex())
                 .currentTerm(context.getTermInformation().getCurrentTerm())
                 .inMemoryJournalDataSize(replicatedLog().dataSize())
@@ -477,19 +478,14 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             builder.lastLogTerm(lastLogEntry.getTerm());
         }
 
-        if (getCurrentBehavior() instanceof AbstractLeader) {
-            AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
-            Collection<String> followerIds = leader.getFollowerIds();
-            List<FollowerInfo> followerInfoList = Lists.newArrayListWithCapacity(followerIds.size());
-            for (String id: followerIds) {
-                final FollowerLogInformation info = leader.getFollower(id);
-                followerInfoList.add(new FollowerInfo(id, info.getNextIndex(), info.getMatchIndex(),
-                        info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
-                            TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
-                        context.getPeerInfo(info.getId()).isVoting()));
-            }
-
-            builder.followerInfoList(followerInfoList);
+        if (getCurrentBehavior() instanceof AbstractLeader leader) {
+            builder.followerInfoList(leader.getFollowerIds().stream()
+                .map(leader::getFollower)
+                .map(info -> new FollowerInfo(info.getId(), info.getNextIndex(), info.getMatchIndex(),
+                    info.isFollowerActive(), DurationFormatUtils.formatDurationHMS(
+                        TimeUnit.NANOSECONDS.toMillis(info.nanosSinceLastActivity())),
+                    context.getPeerInfo(info.getId()).isVoting()))
+                .collect(ImmutableList.toImmutableList()));
         }
 
         sender().tell(builder.build(), self());
@@ -516,7 +512,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         if (!Objects.equals(lastLeaderId, currentBehavior.getLeaderId())
                 || oldBehaviorState.getLeaderPayloadVersion() != currentBehavior.getLeaderPayloadVersion()) {
             if (roleChangeNotifier.isPresent()) {
-                roleChangeNotifier.get().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
+                roleChangeNotifier.orElseThrow().tell(newLeaderStateChanged(getId(), currentBehavior.getLeaderId(),
                         currentBehavior.getLeaderPayloadVersion()), getSelf());
             }
 
@@ -533,7 +529,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         if (roleChangeNotifier.isPresent()
                 && (oldBehavior == null || oldBehavior.state() != currentBehavior.state())) {
-            roleChangeNotifier.get().tell(new RoleChanged(getId(), oldBehaviorStateName ,
+            roleChangeNotifier.orElseThrow().tell(new RoleChanged(getId(), oldBehaviorStateName ,
                     currentBehavior.state().name()), getSelf());
         }
     }
@@ -625,8 +621,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         if (wasAppended && hasFollowers()) {
             // Send log entry for replication.
-            getCurrentBehavior().handleMessage(getSelf(), new Replicate(clientActor, identifier, replicatedLogEntry,
-                    !batchHint));
+            getCurrentBehavior().handleMessage(getSelf(),
+                new Replicate(replicatedLogEntry.getIndex(), !batchHint, clientActor, identifier));
         }
     }
 
@@ -902,7 +898,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             LOG.debug("Take a snapshot of current state. lastReplicatedLog is {} and replicatedToAllIndex is {}",
                 replicatedLog().last(), idx);
 
-            snapshotManager.capture(replicatedLog().last(), idx);
+            snapshotManager.captureWithForcedTrim(replicatedLog().last(), idx);
         }
     }
 
@@ -963,7 +959,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             this.lastValidLeaderId = lastValidLeaderId;
             this.lastLeaderId = lastLeaderId;
             this.behavior = requireNonNull(behavior);
-            this.leaderPayloadVersion = behavior.getLeaderPayloadVersion();
+            leaderPayloadVersion = behavior.getLeaderPayloadVersion();
         }
 
         @Override
index b5f3afdc16839252f2f2069986e3ef5955c995ae..e971ed4f6de95d5d53d02e7eeb9d3d6c89628db6 100644 (file)
@@ -15,6 +15,7 @@ import akka.cluster.Cluster;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.Collection;
 import java.util.Optional;
+import java.util.concurrent.Executor;
 import java.util.function.Consumer;
 import java.util.function.LongSupplier;
 import org.eclipse.jdt.annotation.NonNull;
@@ -64,6 +65,13 @@ public interface RaftActorContext {
      */
     ActorRef getActor();
 
+    /**
+     * Return an Executor which is guaranteed to run tasks in the context of {@link #getActor()}.
+     *
+     * @return An executor.
+     */
+    @NonNull Executor getExecutor();
+
     /**
      * The akka Cluster singleton for the actor system if one is configured.
      *
index 634bbd4343fbba9219a8ab7a81c22c96c1ebf16c..a27bb9c395688b4c99177d942a4dcd11a1ae5b76 100644 (file)
@@ -16,14 +16,13 @@ import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.cluster.Cluster;
 import com.google.common.annotations.VisibleForTesting;
-import java.util.ArrayList;
+import com.google.common.collect.ImmutableList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import java.util.Set;
+import java.util.concurrent.Executor;
 import java.util.function.Consumer;
 import java.util.function.LongSupplier;
 import org.eclipse.jdt.annotation.NonNull;
@@ -49,6 +48,8 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     private final ActorContext context;
 
+    private final @NonNull Executor executor;
+
     private final String id;
 
     private final ElectionTerm termInformation;
@@ -96,16 +97,18 @@ public class RaftActorContextImpl implements RaftActorContext {
             final @NonNull ElectionTerm termInformation, final long commitIndex, final long lastApplied,
             final @NonNull Map<String, String> peerAddresses,
             final @NonNull ConfigParams configParams, final @NonNull DataPersistenceProvider persistenceProvider,
-            final @NonNull Consumer<ApplyState> applyStateConsumer, final @NonNull Logger logger) {
+            final @NonNull Consumer<ApplyState> applyStateConsumer, final @NonNull Logger logger,
+            final @NonNull Executor executor) {
         this.actor = actor;
         this.context = context;
         this.id = id;
         this.termInformation = requireNonNull(termInformation);
+        this.executor = requireNonNull(executor);
         this.commitIndex = commitIndex;
         this.lastApplied = lastApplied;
         this.configParams = requireNonNull(configParams);
         this.persistenceProvider = requireNonNull(persistenceProvider);
-        this.log = requireNonNull(logger);
+        log = requireNonNull(logger);
         this.applyStateConsumer = requireNonNull(applyStateConsumer);
 
         fileBackedOutputStreamFactory = new FileBackedOutputStreamFactory(
@@ -150,6 +153,11 @@ public class RaftActorContextImpl implements RaftActorContext {
         return actor;
     }
 
+    @Override
+    public final Executor getExecutor() {
+        return executor;
+    }
+
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
     public Optional<Cluster> getCluster() {
@@ -209,7 +217,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public Logger getLogger() {
-        return this.log;
+        return log;
     }
 
     @Override
@@ -246,34 +254,27 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public void updatePeerIds(final ServerConfigurationPayload serverConfig) {
-        votingMember = true;
-        boolean foundSelf = false;
-        Set<String> currentPeers = new HashSet<>(this.getPeerIds());
-        for (ServerInfo server : serverConfig.getServerConfig()) {
-            if (getId().equals(server.getId())) {
-                foundSelf = true;
-                if (!server.isVoting()) {
-                    votingMember = false;
-                }
+        boolean newVotingMember = false;
+        var currentPeers = new HashSet<>(getPeerIds());
+        for (var server : serverConfig.getServerConfig()) {
+            if (getId().equals(server.peerId())) {
+                newVotingMember = server.isVoting();
             } else {
-                VotingState votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
-                if (!currentPeers.contains(server.getId())) {
-                    this.addToPeers(server.getId(), null, votingState);
+                final var votingState = server.isVoting() ? VotingState.VOTING : VotingState.NON_VOTING;
+                if (currentPeers.contains(server.peerId())) {
+                    getPeerInfo(server.peerId()).setVotingState(votingState);
+                    currentPeers.remove(server.peerId());
                 } else {
-                    this.getPeerInfo(server.getId()).setVotingState(votingState);
-                    currentPeers.remove(server.getId());
+                    addToPeers(server.peerId(), null, votingState);
                 }
             }
         }
 
         for (String peerIdToRemove : currentPeers) {
-            this.removePeer(peerIdToRemove);
-        }
-
-        if (!foundSelf) {
-            votingMember = false;
+            removePeer(peerIdToRemove);
         }
 
+        votingMember = newVotingMember;
         log.debug("{}: Updated server config: isVoting: {}, peers: {}", id, votingMember, peerInfoMap.values());
 
         setDynamicServerConfigurationInUse();
@@ -357,7 +358,7 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     @Override
     public void setDynamicServerConfigurationInUse() {
-        this.dynamicServerConfiguration = true;
+        dynamicServerConfiguration = true;
     }
 
     @Override
@@ -365,9 +366,9 @@ public class RaftActorContextImpl implements RaftActorContext {
         if (!isDynamicServerConfigurationInUse()) {
             return null;
         }
-        Collection<PeerInfo> peers = getPeers();
-        List<ServerInfo> newConfig = new ArrayList<>(peers.size() + 1);
-        for (PeerInfo peer: peers) {
+        final var peers = getPeers();
+        final var newConfig = ImmutableList.<ServerInfo>builderWithExpectedSize(peers.size() + (includeSelf ? 1 : 0));
+        for (PeerInfo peer : peers) {
             newConfig.add(new ServerInfo(peer.getId(), peer.isVoting()));
         }
 
@@ -375,7 +376,7 @@ public class RaftActorContextImpl implements RaftActorContext {
             newConfig.add(new ServerInfo(getId(), votingMember));
         }
 
-        return new ServerConfigurationPayload(newConfig);
+        return new ServerConfigurationPayload(newConfig.build());
     }
 
     @Override
@@ -403,7 +404,7 @@ public class RaftActorContextImpl implements RaftActorContext {
     }
 
     void setCurrentBehavior(final RaftActorBehavior behavior) {
-        this.currentBehavior = requireNonNull(behavior);
+        currentBehavior = requireNonNull(behavior);
     }
 
     @Override
index 238b7e77852b34f242c0d500cec206960deedba4..846ef22bb08c9cec938f02d9317f76684319188a 100644 (file)
@@ -7,13 +7,13 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.japi.Procedure;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 
 /**
  * The DelegatingPersistentDataProvider used by RaftActor to override the configured persistent provider to
@@ -24,10 +24,10 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Persis
 class RaftActorDelegatingPersistentDataProvider extends DelegatingPersistentDataProvider {
     private final PersistentDataProvider persistentProvider;
 
-    RaftActorDelegatingPersistentDataProvider(DataPersistenceProvider delegate,
-            PersistentDataProvider persistentProvider) {
+    RaftActorDelegatingPersistentDataProvider(final DataPersistenceProvider delegate,
+            final PersistentDataProvider persistentProvider) {
         super(delegate);
-        this.persistentProvider = Preconditions.checkNotNull(persistentProvider);
+        this.persistentProvider = requireNonNull(persistentProvider);
     }
 
     @Override
@@ -36,38 +36,24 @@ class RaftActorDelegatingPersistentDataProvider extends DelegatingPersistentData
     }
 
     @Override
-    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
         doPersist(entry, procedure, true);
     }
 
     private <T> void doPersist(final T entry, final Procedure<T> procedure, final boolean async) {
-        if (getDelegate().isRecoveryApplicable()) {
-            persistSuper(entry, procedure, async);
-        } else {
-            if (entry instanceof ReplicatedLogEntry) {
-                Payload payload = ((ReplicatedLogEntry)entry).getData();
-                if (payload instanceof PersistentPayload) {
-                    // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes
-                    // on recovery if data persistence is later enabled.
-                    if (async) {
-                        persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
-                    } else {
-                        persistentProvider.persist(payload, p -> procedure.apply(entry));
-                    }
-                } else {
-                    persistSuper(entry, procedure, async);
-                }
+        if (!getDelegate().isRecoveryApplicable() && entry instanceof ReplicatedLogEntry replicatedLogEntry
+            && replicatedLogEntry.getData() instanceof PersistentPayload payload) {
+            // We persist the Payload but not the ReplicatedLogEntry to avoid gaps in the journal indexes on recovery
+            // if data persistence is later enabled.
+            if (async) {
+                persistentProvider.persistAsync(payload, p -> procedure.apply(entry));
             } else {
-                persistSuper(entry, procedure, async);
+                persistentProvider.persist(payload, p -> procedure.apply(entry));
             }
-        }
-    }
-
-    private <T> void persistSuper(final T object, final Procedure<T> procedure, final boolean async) {
-        if (async) {
-            super.persistAsync(object, procedure);
+        } else if (async) {
+            super.persistAsync(entry, procedure);
         } else {
-            super.persist(object, procedure);
+            super.persist(entry, procedure);
         }
     }
 }
index a572f308e5605a13a55576d5033000f985a2031a..3aeaff6d89f95876db6cbc365b39b017bcc134ef 100644 (file)
@@ -11,10 +11,10 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.base.Stopwatch;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.raft.base.messages.LeaderTransitioning;
@@ -89,7 +89,7 @@ public class RaftActorLeadershipTransferCohort {
 
         Optional<ActorRef> roleChangeNotifier = raftActor.getRoleChangeNotifier();
         if (roleChangeNotifier.isPresent()) {
-            roleChangeNotifier.get().tell(raftActor.newLeaderStateChanged(context.getId(), null,
+            roleChangeNotifier.orElseThrow().tell(raftActor.newLeaderStateChanged(context.getId(), null,
                     currentBehavior.getLeaderPayloadVersion()), raftActor.self());
         }
 
@@ -122,9 +122,9 @@ public class RaftActorLeadershipTransferCohort {
     void doTransfer() {
         RaftActorBehavior behavior = raftActor.getCurrentBehavior();
         // Sanity check...
-        if (behavior instanceof Leader) {
+        if (behavior instanceof Leader leader) {
             isTransferring = true;
-            ((Leader)behavior).transferLeadership(this);
+            leader.transferLeadership(this);
         } else {
             LOG.debug("{}: No longer the leader - skipping transfer", raftActor.persistenceId());
             finish(true);
@@ -206,7 +206,7 @@ public class RaftActorLeadershipTransferCohort {
     }
 
     public Optional<String> getRequestedFollowerId() {
-        return Optional.fromNullable(requestedFollowerId);
+        return Optional.ofNullable(requestedFollowerId);
     }
 
     interface OnComplete {
index 1f9b93acd7e507f3f837f550697b1fde8eb67ee5..4df0e7b58b5eb4aff84a03ac21f4865987b7c264 100644 (file)
@@ -8,8 +8,8 @@
 package org.opendaylight.controller.cluster.raft;
 
 import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
  * Interface for a class that participates in raft actor persistence recovery.
index 564d64ca2d3fd5575489613de714406da848e703..389e8dfd8ff942a090a9201fa5432b944402d9d8 100644 (file)
@@ -11,16 +11,18 @@ import akka.persistence.RecoveryCompleted;
 import akka.persistence.SnapshotOffer;
 import com.google.common.base.Stopwatch;
 import java.util.Collections;
+import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
 import org.opendaylight.controller.cluster.raft.persisted.EmptyState;
 import org.opendaylight.controller.cluster.raft.persisted.MigratedSerializable;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
 import org.slf4j.Logger;
 
 /**
@@ -38,12 +40,13 @@ class RaftActorRecoverySupport {
     private boolean hasMigratedDataRecovered;
 
     private Stopwatch recoveryTimer;
+    private Stopwatch recoverySnapshotTimer;
     private final Logger log;
 
     RaftActorRecoverySupport(final RaftActorContext context, final RaftActorRecoveryCohort cohort) {
         this.context = context;
         this.cohort = cohort;
-        this.log = context.getLogger();
+        log = context.getLogger();
     }
 
     boolean handleRecoveryMessage(final Object message, final PersistentDataProvider persistentProvider) {
@@ -56,19 +59,18 @@ class RaftActorRecoverySupport {
         }
 
         boolean recoveryComplete = false;
-        if (message instanceof UpdateElectionTerm) {
-            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
-                    ((UpdateElectionTerm) message).getVotedFor());
-        } else if (message instanceof SnapshotOffer) {
-            onRecoveredSnapshot((SnapshotOffer) message);
-        } else if (message instanceof ReplicatedLogEntry) {
-            onRecoveredJournalLogEntry((ReplicatedLogEntry) message);
-        } else if (message instanceof ApplyJournalEntries) {
-            onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
-        } else if (message instanceof DeleteEntries) {
-            onDeleteEntries((DeleteEntries) message);
-        } else if (message instanceof ServerConfigurationPayload) {
-            context.updatePeerIds((ServerConfigurationPayload)message);
+        if (message instanceof UpdateElectionTerm updateElectionTerm) {
+            context.getTermInformation().update(updateElectionTerm.getCurrentTerm(), updateElectionTerm.getVotedFor());
+        } else if (message instanceof SnapshotOffer snapshotOffer) {
+            onRecoveredSnapshot(snapshotOffer);
+        } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+            onRecoveredJournalLogEntry(replicatedLogEntry);
+        } else if (message instanceof ApplyJournalEntries applyJournalEntries) {
+            onRecoveredApplyLogEntries(applyJournalEntries.getToIndex());
+        } else if (message instanceof DeleteEntries deleteEntries) {
+            onDeleteEntries(deleteEntries);
+        } else if (message instanceof ServerConfigurationPayload serverConfigurationPayload) {
+            context.updatePeerIds(serverConfigurationPayload);
         } else if (message instanceof RecoveryCompleted) {
             recoveryComplete = true;
             onRecoveryCompletedMessage(persistentProvider);
@@ -99,16 +101,19 @@ class RaftActorRecoverySupport {
         return context.getReplicatedLog();
     }
 
-    private void initRecoveryTimer() {
+    private void initRecoveryTimers() {
         if (recoveryTimer == null) {
             recoveryTimer = Stopwatch.createStarted();
         }
+        if (recoverySnapshotTimer == null && context.getConfigParams().getRecoverySnapshotIntervalSeconds() > 0) {
+            recoverySnapshotTimer = Stopwatch.createStarted();
+        }
     }
 
     private void onRecoveredSnapshot(final SnapshotOffer offer) {
         log.debug("{}: SnapshotOffer called.", context.getId());
 
-        initRecoveryTimer();
+        initRecoveryTimers();
 
         Snapshot snapshot = (Snapshot) offer.snapshot();
 
@@ -136,11 +141,15 @@ class RaftActorRecoverySupport {
         context.setCommitIndex(snapshot.getLastAppliedIndex());
         context.getTermInformation().update(snapshot.getElectionTerm(), snapshot.getElectionVotedFor());
 
-        Stopwatch timer = Stopwatch.createStarted();
+        final Stopwatch timer = Stopwatch.createStarted();
 
         // Apply the snapshot to the actors state
-        if (!(snapshot.getState() instanceof EmptyState)) {
-            cohort.applyRecoverySnapshot(snapshot.getState());
+        final State snapshotState = snapshot.getState();
+        if (snapshotState.needsMigration()) {
+            hasMigratedDataRecovered = true;
+        }
+        if (!(snapshotState instanceof EmptyState)) {
+            cohort.applyRecoverySnapshot(snapshotState);
         }
 
         if (snapshot.getServerConfiguration() != null) {
@@ -149,8 +158,8 @@ class RaftActorRecoverySupport {
 
         timer.stop();
         log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
-                context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
-                replicatedLog().getSnapshotTerm(), replicatedLog().size());
+                context.getId(), timer, replicatedLog().getSnapshotIndex(), replicatedLog().getSnapshotTerm(),
+                replicatedLog().size());
     }
 
     private void onRecoveredJournalLogEntry(final ReplicatedLogEntry logEntry) {
@@ -195,6 +204,14 @@ class RaftActorRecoverySupport {
             if (logEntry != null) {
                 lastApplied++;
                 batchRecoveredLogEntry(logEntry);
+                if (shouldTakeRecoverySnapshot() && !context.getSnapshotManager().isCapturing()) {
+                    if (currentRecoveryBatchCount > 0) {
+                        endCurrentLogRecoveryBatch();
+                    }
+                    context.setLastApplied(lastApplied);
+                    context.setCommitIndex(lastApplied);
+                    takeRecoverySnapshot(logEntry);
+                }
             } else {
                 // Shouldn't happen but cover it anyway.
                 log.error("{}: Log entry not found for index {}", context.getId(), i);
@@ -215,7 +232,7 @@ class RaftActorRecoverySupport {
     }
 
     private void batchRecoveredLogEntry(final ReplicatedLogEntry logEntry) {
-        initRecoveryTimer();
+        initRecoveryTimers();
 
         int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
         if (!isServerConfigurationPayload(logEntry)) {
@@ -231,6 +248,23 @@ class RaftActorRecoverySupport {
         }
     }
 
+    private void takeRecoverySnapshot(final ReplicatedLogEntry logEntry) {
+        log.info("Time for recovery snapshot on entry with index {}", logEntry.getIndex());
+        final SnapshotManager snapshotManager = context.getSnapshotManager();
+        if (snapshotManager.capture(logEntry, -1)) {
+            log.info("Capturing snapshot, resetting timer for the next recovery snapshot interval.");
+            recoverySnapshotTimer.reset().start();
+        } else {
+            log.info("SnapshotManager is not able to capture snapshot at this time. It will be retried "
+                + "again with the next recovered entry.");
+        }
+    }
+
+    private boolean shouldTakeRecoverySnapshot() {
+        return recoverySnapshotTimer != null && recoverySnapshotTimer.elapsed(TimeUnit.SECONDS)
+            >= context.getConfigParams().getRecoverySnapshotIntervalSeconds();
+    }
+
     private void endCurrentLogRecoveryBatch() {
         cohort.applyCurrentLogRecoveryBatch();
         currentRecoveryBatchCount = 0;
@@ -241,11 +275,17 @@ class RaftActorRecoverySupport {
             endCurrentLogRecoveryBatch();
         }
 
-        String recoveryTime = "";
+        final String recoveryTime;
         if (recoveryTimer != null) {
-            recoveryTimer.stop();
-            recoveryTime = " in " + recoveryTimer.toString();
+            recoveryTime = " in " + recoveryTimer.stop();
             recoveryTimer = null;
+        } else {
+            recoveryTime = "";
+        }
+
+        if (recoverySnapshotTimer != null) {
+            recoverySnapshotTimer.stop();
+            recoverySnapshotTimer = null;
         }
 
         log.info("{}: Recovery completed {} - Switching actor to Follower - last log index = {}, last log term = {}, "
@@ -297,6 +337,6 @@ class RaftActorRecoverySupport {
     }
 
     private static boolean isMigratedSerializable(final Object message) {
-        return message instanceof MigratedSerializable && ((MigratedSerializable)message).isMigrated();
+        return message instanceof MigratedSerializable migrated && migrated.isMigrated();
     }
 }
index 29641cb00e441d5dedc5e7cccccd69bb0b0e6d3a..fd2cd419d77344fc9284d044d089d0ca4cd21489 100644 (file)
@@ -12,11 +12,10 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
+import com.google.common.collect.ImmutableList;
 import java.util.ArrayDeque;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Queue;
 import java.util.UUID;
@@ -28,6 +27,7 @@ import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
 import org.opendaylight.controller.cluster.raft.messages.AddServer;
 import org.opendaylight.controller.cluster.raft.messages.AddServerReply;
 import org.opendaylight.controller.cluster.raft.messages.ChangeServersVotingStatus;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RemoveServer;
 import org.opendaylight.controller.cluster.raft.messages.RemoveServerReply;
 import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
@@ -36,7 +36,6 @@ import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.messages.UnInitializedFollowerSnapshotReply;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.util.AbstractUUIDIdentifier;
 import org.slf4j.Logger;
@@ -64,27 +63,27 @@ class RaftActorServerConfigurationSupport {
 
     RaftActorServerConfigurationSupport(final RaftActor raftActor) {
         this.raftActor = raftActor;
-        this.raftContext = raftActor.getRaftActorContext();
+        raftContext = raftActor.getRaftActorContext();
     }
 
     boolean handleMessage(final Object message, final ActorRef sender) {
-        if (message instanceof AddServer) {
-            onAddServer((AddServer) message, sender);
+        if (message instanceof AddServer addServer) {
+            onAddServer(addServer, sender);
             return true;
-        } else if (message instanceof RemoveServer) {
-            onRemoveServer((RemoveServer) message, sender);
+        } else if (message instanceof RemoveServer removeServer) {
+            onRemoveServer(removeServer, sender);
             return true;
-        } else if (message instanceof ChangeServersVotingStatus) {
-            onChangeServersVotingStatus((ChangeServersVotingStatus) message, sender);
+        } else if (message instanceof ChangeServersVotingStatus changeServersVotingStatus) {
+            onChangeServersVotingStatus(changeServersVotingStatus, sender);
             return true;
-        } else if (message instanceof ServerOperationTimeout) {
-            currentOperationState.onServerOperationTimeout((ServerOperationTimeout) message);
+        } else if (message instanceof ServerOperationTimeout serverOperationTimeout) {
+            currentOperationState.onServerOperationTimeout(serverOperationTimeout);
             return true;
-        } else if (message instanceof UnInitializedFollowerSnapshotReply) {
-            currentOperationState.onUnInitializedFollowerSnapshotReply((UnInitializedFollowerSnapshotReply) message);
+        } else if (message instanceof UnInitializedFollowerSnapshotReply uninitFollowerSnapshotReply) {
+            currentOperationState.onUnInitializedFollowerSnapshotReply(uninitFollowerSnapshotReply);
             return true;
-        } else if (message instanceof ApplyState) {
-            return onApplyState((ApplyState) message);
+        } else if (message instanceof ApplyState applyState) {
+            return onApplyState(applyState);
         } else if (message instanceof SnapshotComplete) {
             currentOperationState.onSnapshotComplete();
             return false;
@@ -748,7 +747,7 @@ class RaftActorServerConfigurationSupport {
         }
 
         private boolean updateLocalPeerInfo() {
-            List<ServerInfo> newServerInfoList = newServerInfoList();
+            final var newServerInfoList = newServerInfoList();
 
             // Check if new voting state would leave us with no voting members.
             boolean atLeastOneVoting = false;
@@ -765,28 +764,28 @@ class RaftActorServerConfigurationSupport {
             }
 
             raftContext.updatePeerIds(new ServerConfigurationPayload(newServerInfoList));
-            if (raftActor.getCurrentBehavior() instanceof AbstractLeader) {
-                AbstractLeader leader = (AbstractLeader) raftActor.getCurrentBehavior();
+            if (raftActor.getCurrentBehavior() instanceof AbstractLeader leader) {
                 leader.updateMinReplicaCount();
             }
 
             return true;
         }
 
-        private List<ServerInfo> newServerInfoList() {
-            Map<String, Boolean> serverVotingStatusMap = changeVotingStatusContext.getOperation()
-                    .getServerVotingStatusMap();
-            List<ServerInfo> newServerInfoList = new ArrayList<>();
-            for (String peerId: raftContext.getPeerIds()) {
-                newServerInfoList.add(new ServerInfo(peerId, serverVotingStatusMap.containsKey(peerId)
-                        ? serverVotingStatusMap.get(peerId) : raftContext.getPeerInfo(peerId).isVoting()));
+        private ImmutableList<ServerInfo> newServerInfoList() {
+            final var serverVotingStatusMap = changeVotingStatusContext.getOperation().getServerVotingStatusMap();
+            final var peerInfos = raftContext.getPeers();
+            final var newServerInfoList = ImmutableList.<ServerInfo>builderWithExpectedSize(peerInfos.size() + 1);
+            for (var peerInfo : peerInfos) {
+                final var peerId = peerInfo.getId();
+                final var voting = serverVotingStatusMap.get(peerId);
+                newServerInfoList.add(new ServerInfo(peerId, voting != null ? voting : peerInfo.isVoting()));
             }
 
-            newServerInfoList.add(new ServerInfo(raftContext.getId(), serverVotingStatusMap.containsKey(
-                    raftContext.getId()) ? serverVotingStatusMap.get(raftContext.getId())
-                            : raftContext.isVotingMember()));
+            final var myId = raftContext.getId();
+            final var myVoting = serverVotingStatusMap.get(myId);
+            newServerInfoList.add(new ServerInfo(myId, myVoting != null ? myVoting : raftContext.isVotingMember()));
 
-            return newServerInfoList;
+            return newServerInfoList.build();
         }
     }
 
index 3b4c08c405bf02dace5565602d663166e4643e3f..e7344d9b4f17f74e347cf0fc339ff211243a46b5 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.raft;
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotFailure;
 import akka.persistence.SaveSnapshotSuccess;
+import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.Collections;
 import java.util.Optional;
@@ -17,6 +18,7 @@ import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.base.messages.SnapshotComplete;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshotReply;
 import org.opendaylight.controller.cluster.raft.persisted.EmptyState;
@@ -46,7 +48,7 @@ class RaftActorSnapshotMessageSupport {
     RaftActorSnapshotMessageSupport(final RaftActorContext context, final RaftActorSnapshotCohort cohort) {
         this.context = context;
         this.cohort = cohort;
-        this.log = context.getLogger();
+        log = context.getLogger();
 
         context.getSnapshotManager().setCreateSnapshotConsumer(
             outputStream -> cohort.createSnapshot(context.getActor(), outputStream));
@@ -57,19 +59,21 @@ class RaftActorSnapshotMessageSupport {
         return cohort;
     }
 
-    boolean handleSnapshotMessage(Object message, ActorRef sender) {
-        if (message instanceof ApplySnapshot) {
-            onApplySnapshot((ApplySnapshot) message);
-        } else if (message instanceof SaveSnapshotSuccess) {
-            onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
-        } else if (message instanceof SaveSnapshotFailure) {
-            onSaveSnapshotFailure((SaveSnapshotFailure) message);
-        } else if (message instanceof CaptureSnapshotReply) {
-            onCaptureSnapshotReply((CaptureSnapshotReply) message);
+    boolean handleSnapshotMessage(final Object message, final ActorRef sender) {
+        if (message instanceof ApplySnapshot applySnapshot) {
+            onApplySnapshot(applySnapshot);
+        } else if (message instanceof SaveSnapshotSuccess saveSnapshotSuccess) {
+            onSaveSnapshotSuccess(saveSnapshotSuccess);
+        } else if (message instanceof SaveSnapshotFailure saveSnapshotFailure) {
+            onSaveSnapshotFailure(saveSnapshotFailure);
+        } else if (message instanceof CaptureSnapshotReply captureSnapshotReply) {
+            onCaptureSnapshotReply(captureSnapshotReply);
         } else if (COMMIT_SNAPSHOT.equals(message)) {
             context.getSnapshotManager().commit(-1, -1);
-        } else if (message instanceof GetSnapshot) {
-            onGetSnapshot(sender);
+        } else if (message instanceof GetSnapshot getSnapshot) {
+            onGetSnapshot(sender, getSnapshot);
+        } else if (message instanceof SnapshotComplete) {
+            log.debug("{}: SnapshotComplete received", context.getId());
         } else {
             return false;
         }
@@ -77,21 +81,21 @@ class RaftActorSnapshotMessageSupport {
         return true;
     }
 
-    private void onCaptureSnapshotReply(CaptureSnapshotReply reply) {
+    private void onCaptureSnapshotReply(final CaptureSnapshotReply reply) {
         log.debug("{}: CaptureSnapshotReply received by actor", context.getId());
 
         context.getSnapshotManager().persist(reply.getSnapshotState(), reply.getInstallSnapshotStream(),
                 context.getTotalMemory());
     }
 
-    private void onSaveSnapshotFailure(SaveSnapshotFailure saveSnapshotFailure) {
+    private void onSaveSnapshotFailure(final SaveSnapshotFailure saveSnapshotFailure) {
         log.error("{}: SaveSnapshotFailure received for snapshot Cause:",
                 context.getId(), saveSnapshotFailure.cause());
 
         context.getSnapshotManager().rollback();
     }
 
-    private void onSaveSnapshotSuccess(SaveSnapshotSuccess success) {
+    private void onSaveSnapshotSuccess(final SaveSnapshotSuccess success) {
         long sequenceNumber = success.metadata().sequenceNr();
 
         log.info("{}: SaveSnapshotSuccess received for snapshot, sequenceNr: {}", context.getId(), sequenceNumber);
@@ -99,22 +103,26 @@ class RaftActorSnapshotMessageSupport {
         context.getSnapshotManager().commit(sequenceNumber, success.metadata().timestamp());
     }
 
-    private void onApplySnapshot(ApplySnapshot message) {
+    private void onApplySnapshot(final ApplySnapshot message) {
         log.info("{}: Applying snapshot on follower:  {}", context.getId(), message.getSnapshot());
 
         context.getSnapshotManager().apply(message);
     }
 
-    private void onGetSnapshot(ActorRef sender) {
+    private void onGetSnapshot(final ActorRef sender, final GetSnapshot getSnapshot) {
         log.debug("{}: onGetSnapshot", context.getId());
 
+
         if (context.getPersistenceProvider().isRecoveryApplicable()) {
             CaptureSnapshot captureSnapshot = context.getSnapshotManager().newCaptureSnapshot(
-                    context.getReplicatedLog().last(), -1);
+                    context.getReplicatedLog().last(), -1, true);
+
+            final FiniteDuration timeout =
+                    getSnapshot.getTimeout().map(Timeout::duration).orElse(snapshotReplyActorTimeout);
 
             ActorRef snapshotReplyActor = context.actorOf(GetSnapshotReplyActor.props(captureSnapshot,
-                    ImmutableElectionTerm.copyOf(context.getTermInformation()), sender,
-                    snapshotReplyActorTimeout, context.getId(), context.getPeerServerInfo(true)));
+                    ImmutableElectionTerm.copyOf(context.getTermInformation()), sender, timeout, context.getId(),
+                    context.getPeerServerInfo(true)));
 
             cohort.createSnapshot(snapshotReplyActor, Optional.empty());
         } else {
@@ -129,7 +137,7 @@ class RaftActorSnapshotMessageSupport {
     }
 
     @VisibleForTesting
-    void setSnapshotReplyActorTimeout(FiniteDuration snapshotReplyActorTimeout) {
+    void setSnapshotReplyActorTimeout(final FiniteDuration snapshotReplyActorTimeout) {
         this.snapshotReplyActorTimeout = snapshotReplyActorTimeout;
     }
 }
index 5ec376412be1f4e31daffdea6f53760db7a7b8be..a09a4aa2cbb94f19760dc6fe87744cda2f2a64f0 100644 (file)
@@ -12,10 +12,15 @@ package org.opendaylight.controller.cluster.raft;
  *
  * @author Thomas Pantelis
  */
-public interface RaftVersions {
-    short HELIUM_VERSION = 0;
-    short LITHIUM_VERSION = 1;
-    short BORON_VERSION = 3;
-    short FLUORINE_VERSION = 4;
-    short CURRENT_VERSION = FLUORINE_VERSION;
+public final class RaftVersions {
+    // HELIUM_VERSION = 0
+    // LITHIUM_VERSION = 1
+    // BORON_VERSION = 3
+    public static final short FLUORINE_VERSION = 4;
+    public static final short ARGON_VERSION = 5;
+    public static final short CURRENT_VERSION = ARGON_VERSION;
+
+    private RaftVersions() {
+        // Hidden on purpose
+    }
 }
index 095e85cbe568ca3935c9a977af73c0564dea9370..8cf133c2ab73ba2c62a9b177d3ea26d802e06abe 100644 (file)
@@ -7,8 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
-import akka.japi.Procedure;
 import java.util.List;
+import java.util.function.Consumer;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 
@@ -88,14 +88,15 @@ public interface ReplicatedLog {
      * Appends an entry to the in-memory log and persists it as well.
      *
      * @param replicatedLogEntry the entry to append
-     * @param callback the Procedure to be notified when persistence is complete (optional).
+     * @param callback the callback to be notified when persistence is complete (optional).
      * @param doAsync if true, the persistent actor can receive subsequent messages to process in between the persist
      *        call and the execution of the associated callback. If false, subsequent messages are stashed and get
-     *        delivered after persistence is complete and the associated callback is executed.
+     *        delivered after persistence is complete and the associated callback is executed. In either case the
+     *        callback is guaranteed to execute in the context of the actor associated with this log.
      * @return true if the entry was successfully appended, false otherwise.
      */
     boolean appendAndPersist(@NonNull ReplicatedLogEntry replicatedLogEntry,
-            @Nullable Procedure<ReplicatedLogEntry> callback, boolean doAsync);
+            @Nullable Consumer<ReplicatedLogEntry> callback, boolean doAsync);
 
     /**
      * Returns a list of log entries starting from the given index to the end of the log.
@@ -185,9 +186,20 @@ public interface ReplicatedLog {
     void snapshotPreCommit(long snapshotCapturedIndex, long snapshotCapturedTerm);
 
     /**
-     * Sets the Replicated log to state after snapshot success.
+     * Sets the Replicated log to state after snapshot success. This method is equivalent to
+     * {@code snapshotCommit(true)}.
      */
-    void snapshotCommit();
+    default void snapshotCommit() {
+        snapshotCommit(true);
+    }
+
+    /**
+     * Sets the Replicated log to state after snapshot success. Most users will want to use {@link #snapshotCommit()}
+     * instead.
+     *
+     * @param updateDataSize true if {@link #dataSize()} should also be updated
+     */
+    void snapshotCommit(boolean updateDataSize);
 
     /**
      * Restores the replicated log to a state in the event of a save snapshot failure.
index 1348ffca9163adf786d4f715fce3ddf858605548..360f6b690376c1c413b1a2f04bc8a7e946400758 100644 (file)
@@ -8,7 +8,7 @@
 
 package org.opendaylight.controller.cluster.raft;
 
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * Represents one entry in the replicated log.
@@ -42,6 +42,15 @@ public interface ReplicatedLogEntry {
      */
     int size();
 
+    /**
+     * Return the estimate of serialized size of this entry when passed through serialization. The estimate needs to
+     * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+     * uncertainty.
+     *
+     * @return An estimate of serialized size.
+     */
+    int serializedSize();
+
     /**
      * Checks if persistence is pending for this entry.
      *
index 45b0b3898e20cc3b3a5acbe523944bbfc0ee6c6a..6167aac6d2ad71da7225c388ce4bb68bbb139070 100644 (file)
@@ -9,9 +9,9 @@ package org.opendaylight.controller.cluster.raft;
 
 import static java.util.Objects.requireNonNull;
 
-import akka.japi.Procedure;
 import java.util.Collections;
 import java.util.List;
+import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 
@@ -42,10 +42,9 @@ final class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
 
     @Override
     public boolean removeFromAndPersist(final long logEntryIndex) {
-        // FIXME: Maybe this should be done after the command is saved
         long adjustedIndex = removeFrom(logEntryIndex);
         if (adjustedIndex >= 0) {
-            context.getPersistenceProvider().persist(new DeleteEntries(adjustedIndex), NoopProcedure.instance());
+            context.getPersistenceProvider().persist(new DeleteEntries(logEntryIndex), NoopProcedure.instance());
             return true;
         }
 
@@ -55,10 +54,14 @@ final class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
     @Override
     public boolean shouldCaptureSnapshot(final long logIndex) {
         final ConfigParams config = context.getConfigParams();
-        final long journalSize = logIndex + 1;
-        final long dataThreshold = context.getTotalMemory() * config.getSnapshotDataThresholdPercentage() / 100;
+        if ((logIndex + 1) % config.getSnapshotBatchCount() == 0) {
+            return true;
+        }
 
-        return journalSize % config.getSnapshotBatchCount() == 0 || getDataSizeForSnapshotCheck() > dataThreshold;
+        final long absoluteThreshold = config.getSnapshotDataThreshold();
+        final long dataThreshold = absoluteThreshold != 0 ? absoluteThreshold * ConfigParams.MEGABYTE
+                : context.getTotalMemory() * config.getSnapshotDataThresholdPercentage() / 100;
+        return getDataSizeForSnapshotCheck() > dataThreshold;
     }
 
     @Override
@@ -93,7 +96,7 @@ final class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
 
     @Override
     public boolean appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
-            final Procedure<ReplicatedLogEntry> callback, final boolean doAsync)  {
+            final Consumer<ReplicatedLogEntry> callback, final boolean doAsync)  {
 
         context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry);
 
@@ -101,22 +104,29 @@ final class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
             return false;
         }
 
-        Procedure<ReplicatedLogEntry> persistCallback = persistedLogEntry -> {
-            context.getLogger().debug("{}: persist complete {}", context.getId(), persistedLogEntry);
-
-            dataSizeSinceLastSnapshot += persistedLogEntry.size();
-
-            if (callback != null) {
-                callback.apply(persistedLogEntry);
-            }
-        };
-
         if (doAsync) {
-            context.getPersistenceProvider().persistAsync(replicatedLogEntry, persistCallback);
+            context.getPersistenceProvider().persistAsync(replicatedLogEntry,
+                entry -> persistCallback(entry, callback));
         } else {
-            context.getPersistenceProvider().persist(replicatedLogEntry, persistCallback);
+            context.getPersistenceProvider().persist(replicatedLogEntry, entry -> syncPersistCallback(entry, callback));
         }
 
         return true;
     }
+
+    private void persistCallback(final ReplicatedLogEntry persistedLogEntry,
+            final Consumer<ReplicatedLogEntry> callback) {
+        context.getExecutor().execute(() -> syncPersistCallback(persistedLogEntry, callback));
+    }
+
+    private void syncPersistCallback(final ReplicatedLogEntry persistedLogEntry,
+            final Consumer<ReplicatedLogEntry> callback) {
+        context.getLogger().debug("{}: persist complete {}", context.getId(), persistedLogEntry);
+
+        dataSizeSinceLastSnapshot += persistedLogEntry.size();
+
+        if (callback != null) {
+            callback.accept(persistedLogEntry);
+        }
+    }
 }
index 8674ef4f2b910a26ac7eb76e90f9d594fe456ca0..57e6140fc9efc06bd0b343adce8ee852b896cbd4 100644 (file)
@@ -55,7 +55,7 @@ public class SnapshotManager implements SnapshotState {
     private CaptureSnapshot captureSnapshot;
     private long lastSequenceNumber = -1;
 
-    private Consumer<Optional<OutputStream>> createSnapshotProcedure;
+    private Consumer<Optional<OutputStream>> createSnapshotProcedure = null;
 
     private ApplySnapshot applySnapshot;
     private RaftActorSnapshotCohort snapshotCohort = NoopRaftActorSnapshotCohort.INSTANCE;
@@ -68,7 +68,7 @@ public class SnapshotManager implements SnapshotState {
      */
     public SnapshotManager(final RaftActorContext context, final Logger logger) {
         this.context = context;
-        this.log = logger;
+        log = logger;
     }
 
     public boolean isApplying() {
@@ -91,6 +91,11 @@ public class SnapshotManager implements SnapshotState {
         return currentState.capture(lastLogEntry, replicatedToAllIndex);
     }
 
+    @Override
+    public boolean captureWithForcedTrim(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex) {
+        return currentState.captureWithForcedTrim(lastLogEntry, replicatedToAllIndex);
+    }
+
     @Override
     public void apply(final ApplySnapshot snapshot) {
         currentState.apply(snapshot);
@@ -154,7 +159,8 @@ public class SnapshotManager implements SnapshotState {
      * @param replicatedToAllIndex the index of the last entry replicated to all followers.
      * @return a new CaptureSnapshot instance.
      */
-    public CaptureSnapshot newCaptureSnapshot(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex) {
+    public CaptureSnapshot newCaptureSnapshot(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex,
+                                              final boolean mandatoryTrim) {
         TermInformationReader lastAppliedTermInfoReader =
                 lastAppliedTermInformationReader.init(context.getReplicatedLog(), context.getLastApplied(),
                         lastLogEntry, hasFollowers());
@@ -170,21 +176,26 @@ public class SnapshotManager implements SnapshotState {
 
         List<ReplicatedLogEntry> unAppliedEntries = context.getReplicatedLog().getFrom(lastAppliedIndex + 1);
 
-        long lastLogEntryIndex = lastAppliedIndex;
-        long lastLogEntryTerm = lastAppliedTerm;
-        if (lastLogEntry != null) {
+        final long lastLogEntryIndex;
+        final long lastLogEntryTerm;
+        if (lastLogEntry == null) {
+            // When we don't have journal present, for example two captureSnapshots executed right after another with no
+            // new journal we still want to preserve the index and term in the snapshot.
+            lastAppliedIndex = lastLogEntryIndex = context.getReplicatedLog().getSnapshotIndex();
+            lastAppliedTerm = lastLogEntryTerm = context.getReplicatedLog().getSnapshotTerm();
+
+            log.debug("{}: Capturing Snapshot : lastLogEntry is null. Using snapshot values lastAppliedIndex {} and "
+                    + "lastAppliedTerm {} instead.", persistenceId(), lastAppliedIndex, lastAppliedTerm);
+        } else {
             lastLogEntryIndex = lastLogEntry.getIndex();
             lastLogEntryTerm = lastLogEntry.getTerm();
-        } else {
-            log.debug("{}: Capturing Snapshot : lastLogEntry is null. Using lastAppliedIndex {} and "
-                    + "lastAppliedTerm {} instead.", persistenceId(), lastAppliedIndex, lastAppliedTerm);
         }
 
         return new CaptureSnapshot(lastLogEntryIndex, lastLogEntryTerm, lastAppliedIndex, lastAppliedTerm,
-                newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries);
+                newReplicatedToAllIndex, newReplicatedToAllTerm, unAppliedEntries, mandatoryTrim);
     }
 
-    private class AbstractSnapshotState implements SnapshotState {
+    private abstract class AbstractSnapshotState implements SnapshotState {
 
         @Override
         public boolean isCapturing() {
@@ -204,6 +215,12 @@ public class SnapshotManager implements SnapshotState {
             return false;
         }
 
+        @Override
+        public boolean captureWithForcedTrim(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex) {
+            log.debug("captureWithForcedTrim should not be called in state {}", this);
+            return false;
+        }
+
         @Override
         public void apply(final ApplySnapshot snapshot) {
             log.debug("apply should not be called in state {}", this);
@@ -248,7 +265,7 @@ public class SnapshotManager implements SnapshotState {
                 //use the term of the temp-min, since we check for isPresent, entry will not be null
                 ReplicatedLogEntry entry = context.getReplicatedLog().get(tempMin);
                 context.getReplicatedLog().snapshotPreCommit(tempMin, entry.getTerm());
-                context.getReplicatedLog().snapshotCommit();
+                context.getReplicatedLog().snapshotCommit(false);
                 return tempMin;
             }
 
@@ -265,8 +282,7 @@ public class SnapshotManager implements SnapshotState {
         }
     }
 
-    private class Idle extends AbstractSnapshotState {
-
+    private final class Idle extends AbstractSnapshotState {
         @Override
         public boolean isCapturing() {
             return false;
@@ -274,8 +290,8 @@ public class SnapshotManager implements SnapshotState {
 
         @SuppressWarnings("checkstyle:IllegalCatch")
         private boolean capture(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex,
-                final String targetFollower) {
-            captureSnapshot = newCaptureSnapshot(lastLogEntry, replicatedToAllIndex);
+                final String targetFollower, final boolean mandatoryTrim) {
+            captureSnapshot = newCaptureSnapshot(lastLogEntry, replicatedToAllIndex, mandatoryTrim);
 
             OutputStream installSnapshotStream = null;
             if (targetFollower != null) {
@@ -290,12 +306,12 @@ public class SnapshotManager implements SnapshotState {
 
             log.debug("{}: lastSequenceNumber prior to capture: {}", persistenceId(), lastSequenceNumber);
 
-            SnapshotManager.this.currentState = CREATING;
+            currentState = CREATING;
 
             try {
                 createSnapshotProcedure.accept(Optional.ofNullable(installSnapshotStream));
             } catch (Exception e) {
-                SnapshotManager.this.currentState = IDLE;
+                currentState = IDLE;
                 log.error("Error creating snapshot", e);
                 return false;
             }
@@ -305,18 +321,23 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public boolean capture(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex) {
-            return capture(lastLogEntry, replicatedToAllIndex, null);
+            return capture(lastLogEntry, replicatedToAllIndex, null, false);
         }
 
         @Override
         public boolean captureToInstall(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex,
                 final String targetFollower) {
-            return capture(lastLogEntry, replicatedToAllIndex, targetFollower);
+            return capture(lastLogEntry, replicatedToAllIndex, targetFollower, false);
+        }
+
+        @Override
+        public boolean captureWithForcedTrim(final ReplicatedLogEntry lastLogEntry, final long replicatedToAllIndex) {
+            return capture(lastLogEntry, replicatedToAllIndex, null, true);
         }
 
         @Override
         public void apply(final ApplySnapshot toApply) {
-            SnapshotManager.this.applySnapshot = toApply;
+            applySnapshot = toApply;
 
             lastSequenceNumber = context.getPersistenceProvider().getLastSequenceNumber();
 
@@ -324,7 +345,7 @@ public class SnapshotManager implements SnapshotState {
 
             context.getPersistenceProvider().saveSnapshot(toApply.getSnapshot());
 
-            SnapshotManager.this.currentState = PERSISTING;
+            currentState = PERSISTING;
         }
 
         @Override
@@ -338,8 +359,7 @@ public class SnapshotManager implements SnapshotState {
         }
     }
 
-    private class Creating extends AbstractSnapshotState {
-
+    private final class Creating extends AbstractSnapshotState {
         @Override
         public void persist(final Snapshot.State snapshotState, final Optional<OutputStream> installSnapshotStream,
                 final long totalMemory) {
@@ -357,24 +377,29 @@ public class SnapshotManager implements SnapshotState {
 
             log.info("{}: Persisting of snapshot done: {}", persistenceId(), snapshot);
 
-            long dataThreshold = totalMemory * context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
-            boolean dataSizeThresholdExceeded = context.getReplicatedLog().dataSize() > dataThreshold;
+            final ConfigParams config = context.getConfigParams();
+            final long absoluteThreshold = config.getSnapshotDataThreshold();
+            final long dataThreshold = absoluteThreshold != 0 ? absoluteThreshold * ConfigParams.MEGABYTE
+                    : totalMemory * config.getSnapshotDataThresholdPercentage() / 100;
 
-            boolean logSizeExceededSnapshotBatchCount =
-                    context.getReplicatedLog().size() >= context.getConfigParams().getSnapshotBatchCount();
+            final boolean dataSizeThresholdExceeded = context.getReplicatedLog().dataSize() > dataThreshold;
+            final boolean logSizeExceededSnapshotBatchCount =
+                    context.getReplicatedLog().size() >= config.getSnapshotBatchCount();
 
             final RaftActorBehavior currentBehavior = context.getCurrentBehavior();
-            if (dataSizeThresholdExceeded || logSizeExceededSnapshotBatchCount) {
+            if (dataSizeThresholdExceeded || logSizeExceededSnapshotBatchCount || captureSnapshot.isMandatoryTrim()) {
                 if (log.isDebugEnabled()) {
                     if (dataSizeThresholdExceeded) {
                         log.debug("{}: log data size {} exceeds the memory threshold {} - doing snapshotPreCommit "
                                 + "with index {}", context.getId(), context.getReplicatedLog().dataSize(),
                                 dataThreshold, captureSnapshot.getLastAppliedIndex());
-                    } else {
+                    } else if (logSizeExceededSnapshotBatchCount) {
                         log.debug("{}: log size {} exceeds the snapshot batch count {} - doing snapshotPreCommit with "
                                 + "index {}", context.getId(), context.getReplicatedLog().size(),
-                                context.getConfigParams().getSnapshotBatchCount(),
-                                captureSnapshot.getLastAppliedIndex());
+                                config.getSnapshotBatchCount(), captureSnapshot.getLastAppliedIndex());
+                    } else {
+                        log.debug("{}: user triggered or root overwrite snapshot encountered, trimming log up to "
+                                + "last applied index {}", context.getId(), captureSnapshot.getLastAppliedIndex());
                     }
                 }
 
@@ -413,7 +438,8 @@ public class SnapshotManager implements SnapshotState {
             if (installSnapshotStream.isPresent()) {
                 if (context.getId().equals(currentBehavior.getLeaderId())) {
                     try {
-                        ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.get()).asByteSource();
+                        ByteSource snapshotBytes = ((FileBackedOutputStream)installSnapshotStream.orElseThrow())
+                            .asByteSource();
                         currentBehavior.handleMessage(context.getActor(),
                                 new SendInstallSnapshot(snapshot, snapshotBytes));
                     } catch (IOException e) {
@@ -421,12 +447,12 @@ public class SnapshotManager implements SnapshotState {
                                 context.getId(), e);
                     }
                 } else {
-                    ((FileBackedOutputStream)installSnapshotStream.get()).cleanup();
+                    ((FileBackedOutputStream)installSnapshotStream.orElseThrow()).cleanup();
                 }
             }
 
             captureSnapshot = null;
-            SnapshotManager.this.currentState = PERSISTING;
+            currentState = PERSISTING;
         }
 
         @Override
@@ -436,8 +462,7 @@ public class SnapshotManager implements SnapshotState {
 
     }
 
-    private class Persisting extends AbstractSnapshotState {
-
+    private final class Persisting extends AbstractSnapshotState {
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public void commit(final long sequenceNumber, final long timeStamp) {
@@ -469,7 +494,7 @@ public class SnapshotManager implements SnapshotState {
                 context.getReplicatedLog().snapshotCommit();
             }
 
-            context.getPersistenceProvider().deleteSnapshots(new SnapshotSelectionCriteria(sequenceNumber,
+            context.getPersistenceProvider().deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(),
                     timeStamp - 1, 0L, 0L));
 
             context.getPersistenceProvider().deleteMessages(lastSequenceNumber);
@@ -498,7 +523,7 @@ public class SnapshotManager implements SnapshotState {
         private void snapshotComplete() {
             lastSequenceNumber = -1;
             applySnapshot = null;
-            SnapshotManager.this.currentState = IDLE;
+            currentState = IDLE;
 
             context.getActor().tell(SnapshotComplete.INSTANCE, context.getActor());
         }
@@ -516,15 +541,15 @@ public class SnapshotManager implements SnapshotState {
         long getTerm();
     }
 
-    static class LastAppliedTermInformationReader implements TermInformationReader {
+    static final class LastAppliedTermInformationReader implements TermInformationReader {
         private long index;
         private long term;
 
         LastAppliedTermInformationReader init(final ReplicatedLog log, final long originalIndex,
                 final ReplicatedLogEntry lastLogEntry, final boolean hasFollowers) {
             ReplicatedLogEntry entry = log.get(originalIndex);
-            this.index = -1L;
-            this.term = -1L;
+            index = -1L;
+            term = -1L;
             if (!hasFollowers) {
                 if (lastLogEntry != null) {
                     // since we have persisted the last-log-entry to persistent journal before the capture,
@@ -544,23 +569,23 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public long getIndex() {
-            return this.index;
+            return index;
         }
 
         @Override
         public long getTerm() {
-            return this.term;
+            return term;
         }
     }
 
-    private static class ReplicatedToAllTermInformationReader implements TermInformationReader {
+    private static final class ReplicatedToAllTermInformationReader implements TermInformationReader {
         private long index;
         private long term;
 
         ReplicatedToAllTermInformationReader init(final ReplicatedLog log, final long originalIndex) {
             ReplicatedLogEntry entry = log.get(originalIndex);
-            this.index = -1L;
-            this.term = -1L;
+            index = -1L;
+            term = -1L;
 
             if (entry != null) {
                 index = entry.getIndex();
@@ -572,12 +597,12 @@ public class SnapshotManager implements SnapshotState {
 
         @Override
         public long getIndex() {
-            return this.index;
+            return index;
         }
 
         @Override
         public long getTerm() {
-            return this.term;
+            return term;
         }
     }
 }
index 0a702741d8894ceb46386fba63d28ca39e1e51af..acb6e01230ecd64b8b6625f3f1c54eae227bd629 100644 (file)
@@ -46,6 +46,14 @@ public interface SnapshotState {
      */
     boolean captureToInstall(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex, String targetFollower);
 
+    /**
+     * Initiates a capture snapshot, while enforcing trimming of the log up to lastAppliedIndex.
+     * @param lastLogEntry the last entry in the replicated log
+     * @param replicatedToAllIndex the current replicatedToAllIndex
+     * @return true if capture was started
+     */
+    boolean captureWithForcedTrim(ReplicatedLogEntry lastLogEntry, long replicatedToAllIndex);
+
     /**
      * Applies a snapshot on a follower that was installed by the leader.
      *
index 95606657562ce865e3664d2973e248a1f069a3ca..93b5f04df33d8ee92ccef6c6b4a007ad56189859 100644 (file)
@@ -7,8 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Cancellable;
-import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -26,11 +28,12 @@ abstract class TimedRunnable implements Runnable {
     private final Cancellable cancelTimer;
     private boolean canRun = true;
 
-    TimedRunnable(FiniteDuration timeout, RaftActor actor) {
-        Preconditions.checkNotNull(timeout);
-        Preconditions.checkNotNull(actor);
-        cancelTimer = actor.getContext().system().scheduler().scheduleOnce(timeout, actor.self(),
-                (Runnable) this::cancel, actor.getContext().system().dispatcher(), actor.self());
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "https://github.com/spotbugs/spotbugs/issues/1867")
+    TimedRunnable(final FiniteDuration timeout, final RaftActor actor) {
+        cancelTimer = requireNonNull(actor).getContext().system().scheduler()
+            .scheduleOnce(requireNonNull(timeout), actor.self(), (Runnable) this::cancel,
+                actor.getContext().system().dispatcher(), actor.self());
     }
 
     @Override
index b06f8f295ef6598200d840c988acd0842f7c3e5d..0fd48edf811923d5d96f7f2cb301a193f0da01a8 100644 (file)
@@ -21,10 +21,11 @@ public class CaptureSnapshot implements ControlMessage {
     private final long replicatedToAllIndex;
     private final long replicatedToAllTerm;
     private final List<ReplicatedLogEntry> unAppliedEntries;
+    private final boolean mandatoryTrim;
 
     public CaptureSnapshot(long lastIndex, long lastTerm, long lastAppliedIndex,
             long lastAppliedTerm, long replicatedToAllIndex, long replicatedToAllTerm,
-            List<ReplicatedLogEntry> unAppliedEntries) {
+            List<ReplicatedLogEntry> unAppliedEntries, boolean mandatoryTrim) {
         this.lastIndex = lastIndex;
         this.lastTerm = lastTerm;
         this.lastAppliedIndex = lastAppliedIndex;
@@ -33,6 +34,7 @@ public class CaptureSnapshot implements ControlMessage {
         this.replicatedToAllTerm = replicatedToAllTerm;
         this.unAppliedEntries = unAppliedEntries != null ? unAppliedEntries :
             Collections.<ReplicatedLogEntry>emptyList();
+        this.mandatoryTrim = mandatoryTrim;
     }
 
     public long getLastAppliedIndex() {
@@ -63,6 +65,10 @@ public class CaptureSnapshot implements ControlMessage {
         return unAppliedEntries;
     }
 
+    public boolean isMandatoryTrim() {
+        return mandatoryTrim;
+    }
+
     @Override
     public String toString() {
         return "CaptureSnapshot [lastAppliedIndex=" + lastAppliedIndex
@@ -72,7 +78,8 @@ public class CaptureSnapshot implements ControlMessage {
                 + ", installSnapshotInitiated="
                 + ", replicatedToAllIndex=" + replicatedToAllIndex
                 + ", replicatedToAllTerm=" + replicatedToAllTerm
-                + ", unAppliedEntries size=" + unAppliedEntries.size() + "]";
+                + ", unAppliedEntries size=" + unAppliedEntries.size()
+                + ", mandatoryTrim=" + mandatoryTrim + "]";
     }
 
 
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/EmptyExternalizableProxy.java
deleted file mode 100644 (file)
index 091237a..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.raft.base.messages;
-
-import com.google.common.base.Preconditions;
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Abstract base that implements Externalizable with no-op methods that is intended for classes that use the
- * externalizable proxy pattern but have no data to serialize and read-resolve to a static instance.
- *
- * @author Thomas Pantelis
- */
-public abstract class EmptyExternalizableProxy implements Externalizable {
-    private static final long serialVersionUID = 1L;
-
-    private final Object readResolveTo;
-
-    protected EmptyExternalizableProxy(Object readResolveTo) {
-        this.readResolveTo = Preconditions.checkNotNull(readResolveTo);
-    }
-
-    @Override
-    public void writeExternal(ObjectOutput out) {
-    }
-
-    @Override
-    public void readExternal(ObjectInput in) {
-    }
-
-    protected Object readResolve() {
-        return readResolveTo;
-    }
-}
index c58d86354a917d9fc968de3eff2b97ba6107d356..edd4986a47b4b36b036f2f0ea209ac32aab69b0c 100644 (file)
@@ -5,40 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.base.messages;
 
 import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.yangtools.concepts.Identifier;
 
-public class Replicate {
-    private final ActorRef clientActor;
-    private final Identifier identifier;
-    private final ReplicatedLogEntry replicatedLogEntry;
-    private final boolean sendImmediate;
-
-    public Replicate(ActorRef clientActor, Identifier identifier, ReplicatedLogEntry replicatedLogEntry,
-            boolean sendImmediate) {
-        this.clientActor = clientActor;
-        this.identifier = identifier;
-        this.replicatedLogEntry = replicatedLogEntry;
-        this.sendImmediate = sendImmediate;
-    }
-
-    public ActorRef getClientActor() {
-        return clientActor;
-    }
-
-    public Identifier getIdentifier() {
-        return identifier;
-    }
-
-    public ReplicatedLogEntry getReplicatedLogEntry() {
-        return replicatedLogEntry;
-    }
-
-    public boolean isSendImmediate() {
-        return sendImmediate;
-    }
+public record Replicate(long logIndex, boolean sendImmediate, ActorRef clientActor, Identifier identifier) {
+    // Nothing else here
 }
index b212250dd4984828d3c57f8a70f0a177da5cff5c..2b7684481955110bab875403db0447554583cc7e 100644 (file)
@@ -16,25 +16,18 @@ import java.io.Serializable;
  * @author Thomas Pantelis
  */
 public final class TimeoutNow implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     public static final TimeoutNow INSTANCE = new TimeoutNow();
 
     private TimeoutNow() {
         // Hidden on purpose
     }
 
-    private Object writeReplace() {
-        return new Proxy();
-    }
-
-    private static class Proxy extends EmptyExternalizableProxy {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            super(INSTANCE);
-        }
+    @java.io.Serial
+    @SuppressWarnings("static-method")
+    private Object readResolve() {
+        return INSTANCE;
     }
 }
index fef6cc8b7c57a142d683d1e0f27214ad751fd9d3..7514dccff40c53aab2621cb56314859baee80d89 100644 (file)
@@ -13,7 +13,6 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.io.ByteSource;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.IOException;
@@ -21,11 +20,12 @@ import java.io.ObjectOutputStream;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.OptionalInt;
 import java.util.Queue;
 import java.util.concurrent.TimeUnit;
 import org.eclipse.jdt.annotation.Nullable;
@@ -33,19 +33,21 @@ import org.opendaylight.controller.cluster.io.SharedFileBackedOutputStream;
 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
 import org.opendaylight.controller.cluster.messaging.SliceOptions;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
-import org.opendaylight.controller.cluster.raft.ClientRequestTrackerImpl;
 import org.opendaylight.controller.cluster.raft.FollowerLogInformation;
 import org.opendaylight.controller.cluster.raft.PeerInfo;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.VotingState;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.CheckConsensusReached;
 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
@@ -98,7 +100,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     private final MessageSlicer appendEntriesMessageSlicer;
 
     private Cancellable heartbeatSchedule = null;
-    private Optional<SnapshotHolder> snapshotHolder = Optional.absent();
+    private Optional<SnapshotHolder> snapshotHolder = Optional.empty();
     private int minReplicationCount;
 
     protected AbstractLeader(final RaftActorContext context, final RaftState state,
@@ -106,7 +108,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         super(context, state);
 
         appendEntriesMessageSlicer = MessageSlicer.builder().logContext(logName())
-            .messageSliceSize(context.getConfigParams().getSnapshotChunkSize())
+            .messageSliceSize(context.getConfigParams().getMaximumMessageSliceSize())
             .expireStateAfterInactivity(context.getConfigParams().getElectionTimeOutInterval().toMillis() * 3,
                     TimeUnit.MILLISECONDS).build();
 
@@ -162,7 +164,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         followerToLog.remove(followerId);
     }
 
-    public void updateMinReplicaCount() {
+    public final void updateMinReplicaCount() {
         int numVoting = 0;
         for (PeerInfo peer: context.getPeers()) {
             if (peer.isVoting()) {
@@ -186,7 +188,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
 
     @VisibleForTesting
     void setSnapshotHolder(final @Nullable SnapshotHolder snapshotHolder) {
-        this.snapshotHolder = Optional.fromNullable(snapshotHolder);
+        this.snapshotHolder = Optional.ofNullable(snapshotHolder);
     }
 
     @VisibleForTesting
@@ -217,6 +219,13 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             return this;
         }
 
+        final var followerRaftVersion = appendEntriesReply.getRaftVersion();
+        if (followerRaftVersion < RaftVersions.FLUORINE_VERSION) {
+            log.warn("{}: handleAppendEntriesReply - ignoring reply from follower {} raft version {}", logName(),
+                followerId, followerRaftVersion);
+            return this;
+        }
+
         final long lastActivityNanos = followerLogInformation.nanosSinceLastActivity();
         if (lastActivityNanos > context.getConfigParams().getElectionTimeOutInterval().toNanos()) {
             log.warn("{} : handleAppendEntriesReply delayed beyond election timeout, "
@@ -227,7 +236,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
 
         followerLogInformation.markFollowerActive();
         followerLogInformation.setPayloadVersion(appendEntriesReply.getPayloadVersion());
-        followerLogInformation.setRaftVersion(appendEntriesReply.getRaftVersion());
+        followerLogInformation.setRaftVersion(followerRaftVersion);
         followerLogInformation.setNeedsLeaderAddress(appendEntriesReply.isNeedsLeaderAddress());
 
         long followerLastLogIndex = appendEntriesReply.getLogLastIndex();
@@ -432,27 +441,52 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         super.performSnapshotWithoutCapture(minReplicatedToAllIndex);
     }
 
-    @Override
-    protected ClientRequestTracker removeClientRequestTracker(final long logIndex) {
-        final Iterator<ClientRequestTracker> it = trackers.iterator();
+    /**
+     * Removes and returns the ClientRequestTracker for the specified log index.
+     * @param logIndex the log index
+     * @return the ClientRequestTracker or null if none available
+     */
+    private ClientRequestTracker removeClientRequestTracker(final long logIndex) {
+        final var it = trackers.iterator();
         while (it.hasNext()) {
-            final ClientRequestTracker t = it.next();
-            if (t.getIndex() == logIndex) {
+            final var tracker = it.next();
+            if (tracker.logIndex() == logIndex) {
                 it.remove();
-                return t;
+                return tracker;
             }
         }
-
         return null;
     }
 
     @Override
-    protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender,
-        final RequestVoteReply requestVoteReply) {
+    final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+        // first check whether a ClientRequestTracker exists for this entry.
+        // If it does that means the leader wasn't dropped before the transaction applied.
+        // That means that this transaction can be safely applied as a local transaction since we
+        // have the ClientRequestTracker.
+        final var tracker = removeClientRequestTracker(entry.getIndex());
+        if (tracker != null) {
+            return new ApplyState(tracker.clientActor(), tracker.identifier(), entry);
+        }
+
+        // Tracker is missing, this means that we switched behaviours between replicate and applystate
+        // and became the leader again,. We still want to apply this as a local modification because
+        // we have resumed leadership with that log entry having been committed.
+        if (entry.getData() instanceof IdentifiablePayload<?> identifiable) {
+            return new ApplyState(null, identifiable.getIdentifier(), entry);
+        }
+
+        return new ApplyState(null, null, entry);
+    }
+
+    @Override
+    protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender, final RequestVoteReply requestVoteReply) {
         return this;
     }
 
-    protected void beforeSendHeartbeat(){}
+    protected void beforeSendHeartbeat() {
+        // No-op
+    }
 
     @Override
     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
@@ -462,47 +496,45 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (message instanceof RaftRPC) {
-            RaftRPC rpc = (RaftRPC) message;
-            // If RPC request or response contains term T > currentTerm:
-            // set currentTerm = T, convert to follower (§5.1)
-            // This applies to all RPC messages and responses
-            if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
-                log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
-                        logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
-
-                context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
-
-                // This is a special case. Normally when stepping down as leader we don't process and reply to the
-                // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
-                // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
-                // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
-                // state and starting a new election and grabbing leadership back before the other candidate node can
-                // start a new election due to lack of responses. This case would only occur if there isn't a majority
-                // of other nodes available that can elect the requesting candidate. Since we're transferring
-                // leadership, we should make every effort to get the requesting node elected.
-                if (message instanceof RequestVote && context.getRaftActorLeadershipTransferCohort() != null) {
-                    log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
-                    super.handleMessage(sender, message);
-                }
-
-                return internalSwitchBehavior(RaftState.Follower);
+        // If RPC request or response contains term T > currentTerm:
+        // set currentTerm = T, convert to follower (§5.1)
+        // This applies to all RPC messages and responses
+        if (message instanceof RaftRPC rpc && rpc.getTerm() > context.getTermInformation().getCurrentTerm()
+                && shouldUpdateTerm(rpc)) {
+
+            log.info("{}: Term {} in \"{}\" message is greater than leader's term {} - switching to Follower",
+                logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
+
+            context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
+
+            // This is a special case. Normally when stepping down as leader we don't process and reply to the
+            // RaftRPC as per raft. But if we're in the process of transferring leadership and we get a
+            // RequestVote, process the RequestVote before switching to Follower. This enables the requesting
+            // candidate node to be elected the leader faster and avoids us possibly timing out in the Follower
+            // state and starting a new election and grabbing leadership back before the other candidate node can
+            // start a new election due to lack of responses. This case would only occur if there isn't a majority
+            // of other nodes available that can elect the requesting candidate. Since we're transferring
+            // leadership, we should make every effort to get the requesting node elected.
+            if (rpc instanceof RequestVote requestVote && context.getRaftActorLeadershipTransferCohort() != null) {
+                log.debug("{}: Leadership transfer in progress - processing RequestVote", logName());
+                requestVote(sender, requestVote);
             }
+
+            return internalSwitchBehavior(RaftState.Follower);
         }
 
         if (message instanceof SendHeartBeat) {
             beforeSendHeartbeat();
             sendHeartBeat();
             scheduleHeartBeat(context.getConfigParams().getHeartBeatInterval());
-        } else if (message instanceof SendInstallSnapshot) {
-            SendInstallSnapshot sendInstallSnapshot = (SendInstallSnapshot) message;
+        } else if (message instanceof SendInstallSnapshot sendInstallSnapshot) {
             setSnapshotHolder(new SnapshotHolder(sendInstallSnapshot.getSnapshot(),
                 sendInstallSnapshot.getSnapshotBytes()));
             sendInstallSnapshot();
-        } else if (message instanceof Replicate) {
-            replicate((Replicate) message);
-        } else if (message instanceof InstallSnapshotReply) {
-            handleInstallSnapshotReply((InstallSnapshotReply) message);
+        } else if (message instanceof Replicate replicate) {
+            replicate(replicate);
+        } else if (message instanceof InstallSnapshotReply installSnapshotReply) {
+            handleInstallSnapshotReply(installSnapshotReply);
         } else if (message instanceof CheckConsensusReached) {
             possiblyUpdateCommitIndex();
         } else {
@@ -533,6 +565,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             return;
         }
 
+        installSnapshotState.resetChunkTimer();
         followerLogInformation.markFollowerActive();
 
         if (installSnapshotState.getChunkIndex() == reply.getChunkIndex()) {
@@ -541,7 +574,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                 if (installSnapshotState.isLastChunk(reply.getChunkIndex())) {
                     //this was the last chunk reply
 
-                    long followerMatchIndex = snapshotHolder.get().getLastIncludedIndex();
+                    long followerMatchIndex = snapshotHolder.orElseThrow().getLastIncludedIndex();
                     followerLogInformation.setMatchIndex(followerMatchIndex);
                     followerLogInformation.setNextIndex(followerMatchIndex + 1);
                     followerLogInformation.clearLeaderInstallSnapshotState();
@@ -609,17 +642,16 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
     }
 
     private void replicate(final Replicate replicate) {
-        long logIndex = replicate.getReplicatedLogEntry().getIndex();
+        final long logIndex = replicate.logIndex();
 
-        log.debug("{}: Replicate message: identifier: {}, logIndex: {}, payload: {}, isSendImmediate: {}", logName(),
-                replicate.getIdentifier(), logIndex, replicate.getReplicatedLogEntry().getData().getClass(),
-                replicate.isSendImmediate());
+        log.debug("{}: Replicate message: identifier: {}, logIndex: {}, isSendImmediate: {}", logName(),
+                replicate.identifier(), logIndex, replicate.sendImmediate());
 
         // Create a tracker entry we will use this later to notify the
         // client actor
-        if (replicate.getClientActor() != null) {
-            trackers.add(new ClientRequestTrackerImpl(replicate.getClientActor(), replicate.getIdentifier(),
-                    logIndex));
+        final var clientActor = replicate.clientActor();
+        if (clientActor != null) {
+            trackers.add(new ClientRequestTracker(logIndex, clientActor, replicate.identifier()));
         }
 
         boolean applyModificationToState = !context.anyVotingPeers()
@@ -630,7 +662,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
             applyLogToStateMachine(logIndex);
         }
 
-        if (replicate.isSendImmediate() && !followerToLog.isEmpty()) {
+        if (replicate.sendImmediate() && !followerToLog.isEmpty()) {
             sendAppendEntries(0, false);
         }
     }
@@ -664,10 +696,19 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
 
             LeaderInstallSnapshotState installSnapshotState = followerLogInformation.getInstallSnapshotState();
             if (installSnapshotState != null) {
+
                 // if install snapshot is in process , then sent next chunk if possible
-                if (isFollowerActive && installSnapshotState.canSendNextChunk()) {
-                    sendSnapshotChunk(followerActor, followerLogInformation);
-                } else if (sendHeartbeat) {
+                if (isFollowerActive) {
+                    // 30 seconds with default settings, can be modified via heartbeat or election timeout factor
+                    FiniteDuration snapshotReplyTimeout = context.getConfigParams().getHeartBeatInterval()
+                            .$times(context.getConfigParams().getElectionTimeoutFactor() * 3);
+
+                    if (installSnapshotState.isChunkTimedOut(snapshotReplyTimeout)) {
+                        sendAppendEntries = !resendSnapshotChunk(followerActor, followerLogInformation);
+                    } else if (installSnapshotState.canSendNextChunk()) {
+                        sendSnapshotChunk(followerActor, followerLogInformation);
+                    }
+                } else if (sendHeartbeat || followerLogInformation.hasStaleCommitIndex(context.getCommitIndex())) {
                     // we send a heartbeat even if we have not received a reply for the last chunk
                     sendAppendEntries = true;
                 }
@@ -688,7 +729,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                     log.debug("{}: sendAppendEntries: {} is present for follower {}", logName(),
                             followerNextIndex, followerId);
 
-                    if (followerLogInformation.okToReplicate()) {
+                    if (followerLogInformation.okToReplicate(context.getCommitIndex())) {
                         entries = getEntriesToSend(followerLogInformation, followerActor);
                         sendAppendEntries = true;
                     }
@@ -716,7 +757,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                                 context.getReplicatedLog().size());
                     }
 
-                } else if (sendHeartbeat) {
+                } else if (sendHeartbeat || followerLogInformation.hasStaleCommitIndex(context.getCommitIndex())) {
                     // we send an AppendEntries, even if the follower is inactive
                     // in-order to update the followers timestamp, in case it becomes active again
                     sendAppendEntries = true;
@@ -735,14 +776,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         // Try to get all the entries in the journal but not exceeding the max data size for a single AppendEntries
         // message.
         int maxEntries = (int) context.getReplicatedLog().size();
-        final int maxDataSize = context.getConfigParams().getSnapshotChunkSize();
+        final int maxDataSize = context.getConfigParams().getMaximumMessageSliceSize();
         final long followerNextIndex = followerLogInfo.getNextIndex();
         List<ReplicatedLogEntry> entries = context.getReplicatedLog().getFrom(followerNextIndex,
                 maxEntries, maxDataSize);
 
         // If the first entry's size exceeds the max data size threshold, it will be returned from the call above. If
         // that is the case, then we need to slice it into smaller chunks.
-        if (!(entries.size() == 1 && entries.get(0).getData().size() > maxDataSize)) {
+        if (entries.size() != 1 || entries.get(0).getData().serializedSize() <= maxDataSize) {
             // Don't need to slice.
             return entries;
         }
@@ -827,6 +868,7 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                     appendEntries);
         }
 
+        followerLogInformation.setSentCommitIndex(leaderCommitIndex);
         followerActor.tell(appendEntries, actor());
     }
 
@@ -862,10 +904,10 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         }
 
         boolean captureInitiated = context.getSnapshotManager().captureToInstall(context.getReplicatedLog().last(),
-            this.getReplicatedToAllIndex(), followerId);
+            getReplicatedToAllIndex(), followerId);
         if (captureInitiated) {
             followerLogInfo.setLeaderInstallSnapshotState(new LeaderInstallSnapshotState(
-                context.getConfigParams().getSnapshotChunkSize(), logName()));
+                context.getConfigParams().getMaximumMessageSliceSize(), logName()));
         }
 
         return captureInitiated;
@@ -907,14 +949,14 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         if (snapshotHolder.isPresent()) {
             LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
             if (installSnapshotState == null) {
-                installSnapshotState = new LeaderInstallSnapshotState(context.getConfigParams().getSnapshotChunkSize(),
-                        logName());
+                installSnapshotState = new LeaderInstallSnapshotState(
+                        context.getConfigParams().getMaximumMessageSliceSize(), logName());
                 followerLogInfo.setLeaderInstallSnapshotState(installSnapshotState);
             }
 
             try {
                 // Ensure the snapshot bytes are set - this is a no-op.
-                installSnapshotState.setSnapshotBytes(snapshotHolder.get().getSnapshotBytes());
+                installSnapshotState.setSnapshotBytes(snapshotHolder.orElseThrow().getSnapshotBytes());
 
                 if (!installSnapshotState.canSendNextChunk()) {
                     return;
@@ -926,31 +968,63 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
                         nextSnapshotChunk.length);
 
                 int nextChunkIndex = installSnapshotState.incrementChunkIndex();
-                Optional<ServerConfigurationPayload> serverConfig = Optional.absent();
+                Optional<ServerConfigurationPayload> serverConfig = Optional.empty();
                 if (installSnapshotState.isLastChunk(nextChunkIndex)) {
-                    serverConfig = Optional.fromNullable(context.getPeerServerInfo(true));
+                    serverConfig = Optional.ofNullable(context.getPeerServerInfo(true));
                 }
 
-                followerActor.tell(
-                    new InstallSnapshot(currentTerm(), context.getId(),
-                        snapshotHolder.get().getLastIncludedIndex(),
-                        snapshotHolder.get().getLastIncludedTerm(),
-                        nextSnapshotChunk,
-                        nextChunkIndex,
-                        installSnapshotState.getTotalChunks(),
-                        Optional.of(installSnapshotState.getLastChunkHashCode()),
-                        serverConfig
-                    ).toSerializable(followerLogInfo.getRaftVersion()),
-                    actor()
-                );
+                sendSnapshotChunk(followerActor, followerLogInfo, nextSnapshotChunk, nextChunkIndex, serverConfig);
+
+                log.debug("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}", logName(), followerActor.path(),
+                        installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks());
 
             } catch (IOException e) {
-                throw new RuntimeException(e);
+                log.warn("{}: Unable to send chunk: {}/{}. Reseting snapshot progress. Snapshot state: {}", logName(),
+                        installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks(),
+                        installSnapshotState, e);
+                installSnapshotState.reset();
             }
+        }
+    }
+
+    private void sendSnapshotChunk(final ActorSelection followerActor, final FollowerLogInformation followerLogInfo,
+                                   final byte[] snapshotChunk, final int chunkIndex,
+                                   final Optional<ServerConfigurationPayload> serverConfig) {
+        LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
+
+        installSnapshotState.startChunkTimer();
+        followerActor.tell(
+                new InstallSnapshot(currentTerm(), context.getId(),
+                        snapshotHolder.orElseThrow().getLastIncludedIndex(),
+                        snapshotHolder.orElseThrow().getLastIncludedTerm(),
+                        snapshotChunk,
+                        chunkIndex,
+                        installSnapshotState.getTotalChunks(),
+                        OptionalInt.of(installSnapshotState.getLastChunkHashCode()),
+                        serverConfig,
+                        followerLogInfo.getRaftVersion()),
+                actor()
+        );
+    }
 
-            log.debug("{}: InstallSnapshot sent to follower {}, Chunk: {}/{}", logName(), followerActor.path(),
-                installSnapshotState.getChunkIndex(), installSnapshotState.getTotalChunks());
+    private boolean resendSnapshotChunk(final ActorSelection followerActor,
+                                        final FollowerLogInformation followerLogInfo) {
+        if (!snapshotHolder.isPresent()) {
+            // Seems like we should never hit this case, but just in case we do, reset the snapshot progress so that it
+            // can restart from the next AppendEntries.
+            log.warn("{}: Attempting to resend snapshot with no snapshot holder present.", logName());
+            followerLogInfo.clearLeaderInstallSnapshotState();
+            return false;
         }
+
+        LeaderInstallSnapshotState installSnapshotState = followerLogInfo.getInstallSnapshotState();
+        // we are resending, timer needs to be reset
+        installSnapshotState.resetChunkTimer();
+        installSnapshotState.markSendStatus(false);
+
+        sendSnapshotChunk(followerActor, followerLogInfo);
+
+        return true;
     }
 
     private void sendHeartBeat() {
@@ -1050,8 +1124,8 @@ public abstract class AbstractLeader extends AbstractRaftActorBehavior {
         private final ByteSource snapshotBytes;
 
         SnapshotHolder(final Snapshot snapshot, final ByteSource snapshotBytes) {
-            this.lastIncludedTerm = snapshot.getLastAppliedTerm();
-            this.lastIncludedIndex = snapshot.getLastAppliedIndex();
+            lastIncludedTerm = snapshot.getLastAppliedTerm();
+            lastIncludedIndex = snapshot.getLastAppliedIndex();
             this.snapshotBytes = snapshotBytes;
         }
 
index 087c656b1836daaf9c05ac7b91e32eba1057c02c..055a0535001f56f996cedc8bff349ae403aeb9b3 100644 (file)
@@ -5,16 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
-import com.google.common.base.Preconditions;
+import akka.cluster.Cluster;
+import akka.cluster.Member;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Random;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
@@ -22,6 +25,7 @@ import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
@@ -64,28 +68,21 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     private long replicatedToAllIndex = -1;
 
     AbstractRaftActorBehavior(final RaftActorContext context, final RaftState state) {
-        this.context = Preconditions.checkNotNull(context);
-        this.state = Preconditions.checkNotNull(state);
-        this.log = context.getLogger();
+        this.context = requireNonNull(context);
+        this.state = requireNonNull(state);
+        log = context.getLogger();
 
         logName = String.format("%s (%s)", context.getId(), state);
     }
 
     public static RaftActorBehavior createBehavior(final RaftActorContext context, final RaftState state) {
-        switch (state) {
-            case Candidate:
-                return new Candidate(context);
-            case Follower:
-                return new Follower(context);
-            case IsolatedLeader:
-                return new IsolatedLeader(context);
-            case Leader:
-                return new Leader(context);
-            case PreLeader:
-                return new PreLeader(context);
-            default:
-                throw new IllegalArgumentException("Unhandled state " + state);
-        }
+        return switch (state) {
+            case Candidate -> new Candidate(context);
+            case Follower -> new Follower(context);
+            case IsolatedLeader -> new IsolatedLeader(context);
+            case Leader -> new Leader(context);
+            case PreLeader -> new PreLeader(context);
+        };
     }
 
     @Override
@@ -208,10 +205,8 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
             // the log with the later term is more up-to-date. If the logs
             // end with the same term, then whichever log is longer is
             // more up-to-date.
-            if (requestVote.getLastLogTerm() > lastTerm()) {
-                candidateLatest = true;
-            } else if (requestVote.getLastLogTerm() == lastTerm()
-                    && requestVote.getLastLogIndex() >= lastIndex()) {
+            if (requestVote.getLastLogTerm() > lastTerm()
+                || requestVote.getLastLogTerm() == lastTerm() && requestVote.getLastLogIndex() >= lastIndex()) {
                 candidateLatest = true;
             }
 
@@ -243,7 +238,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      * @return a random election duration
      */
     protected FiniteDuration electionDuration() {
-        long variance = new Random().nextInt(context.getConfigParams().getElectionTimeVariance());
+        long variance = ThreadLocalRandom.current().nextInt(context.getConfigParams().getElectionTimeVariance());
         return context.getConfigParams().getElectionTimeOutInterval().$plus(
                 new FiniteDuration(variance, TimeUnit.MILLISECONDS));
     }
@@ -266,6 +261,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @param interval the duration after which we should trigger a new election
      */
+    // Non-final for testing
     protected void scheduleElection(final FiniteDuration interval) {
         stopElection();
 
@@ -297,7 +293,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @return the actor
      */
-    protected ActorRef actor() {
+    protected final ActorRef actor() {
         return context.getActor();
     }
 
@@ -319,15 +315,6 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         return context.getReplicatedLog().lastIndex();
     }
 
-    /**
-     * Removes and returns the ClientRequestTracker for the specified log index.
-     * @param logIndex the log index
-     * @return the ClientRequestTracker or null if none available
-     */
-    protected ClientRequestTracker removeClientRequestTracker(final long logIndex) {
-        return null;
-    }
-
     /**
      * Returns the actual index of the entry in replicated log for the given index or -1 if not found.
      *
@@ -392,13 +379,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
                 // Send a local message to the local RaftActor (it's derived class to be
                 // specific to apply the log to it's index)
 
-                final ApplyState applyState;
-                final ClientRequestTracker tracker = removeClientRequestTracker(i);
-                if (tracker != null) {
-                    applyState = new ApplyState(tracker.getClientActor(), tracker.getIdentifier(), replicatedLogEntry);
-                } else {
-                    applyState = new ApplyState(null, null, replicatedLogEntry);
-                }
+                final ApplyState applyState = getApplyStateFor(replicatedLogEntry);
 
                 log.debug("{}: Setting last applied to {}", logName(), i);
 
@@ -420,16 +401,24 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         actor().tell(new ApplyJournalEntries(context.getLastApplied()), actor());
     }
 
+    /**
+     * Create an ApplyState message for a particular log entry so we can determine how to apply this entry.
+     *
+     * @param entry the log entry
+     * @return ApplyState for this entry
+     */
+    abstract ApplyState getApplyStateFor(ReplicatedLogEntry entry);
+
     @Override
     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
-        if (message instanceof AppendEntries) {
-            return appendEntries(sender, (AppendEntries) message);
-        } else if (message instanceof AppendEntriesReply) {
-            return handleAppendEntriesReply(sender, (AppendEntriesReply) message);
-        } else if (message instanceof RequestVote) {
-            return requestVote(sender, (RequestVote) message);
-        } else if (message instanceof RequestVoteReply) {
-            return handleRequestVoteReply(sender, (RequestVoteReply) message);
+        if (message instanceof AppendEntries appendEntries) {
+            return appendEntries(sender, appendEntries);
+        } else if (message instanceof AppendEntriesReply appendEntriesReply) {
+            return handleAppendEntriesReply(sender, appendEntriesReply);
+        } else if (message instanceof RequestVote requestVote) {
+            return requestVote(sender, requestVote);
+        } else if (message instanceof RequestVoteReply requestVoteReply) {
+            return handleRequestVoteReply(sender, requestVoteReply);
         } else {
             return null;
         }
@@ -450,12 +439,12 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
             return this;
         }
 
-        log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), this.state(),
+        log.info("{} :- Switching from behavior {} to {}, election term: {}", logName(), state(),
                 newBehavior.state(), context.getTermInformation().getCurrentTerm());
         try {
             close();
         } catch (RuntimeException e) {
-            log.error("{}: Failed to close behavior : {}", logName(), this.state(), e);
+            log.error("{}: Failed to close behavior : {}", logName(), state(), e);
         }
         return newBehavior;
     }
@@ -499,7 +488,40 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         }
     }
 
-    protected String getId() {
+    protected final String getId() {
         return context.getId();
     }
+
+    // Check whether we should update the term. In case of half-connected nodes, we want to ignore RequestVote
+    // messages, as the candidate is not able to receive our response.
+    protected boolean shouldUpdateTerm(final RaftRPC rpc) {
+        if (!(rpc instanceof RequestVote requestVote)) {
+            return true;
+        }
+
+        log.debug("{}: Found higher term in RequestVote rpc, verifying whether it's safe to update term.", logName());
+        final Optional<Cluster> maybeCluster = context.getCluster();
+        if (!maybeCluster.isPresent()) {
+            return true;
+        }
+
+        final Cluster cluster = maybeCluster.orElseThrow();
+
+        final Set<Member> unreachable = cluster.state().getUnreachable();
+        log.debug("{}: Cluster state: {}", logName(), unreachable);
+
+        for (Member member : unreachable) {
+            for (String role : member.getRoles()) {
+                if (requestVote.getCandidateId().startsWith(role)) {
+                    log.debug("{}: Unreachable member: {}, matches candidateId in: {}, not updating term", logName(),
+                        member, requestVote);
+                    return false;
+                }
+            }
+        }
+
+        log.debug("{}: Candidate in requestVote:{} with higher term appears reachable, updating term.", logName(),
+            requestVote);
+        return true;
+    }
 }
index afa46892bea33754340f7ef6891f8cdbe646ed61..77f7a06c49dffe75557e6544e2deba03f16f5614 100644 (file)
@@ -5,16 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import java.util.ArrayList;
-import java.util.Collection;
+import com.google.common.collect.ImmutableList;
 import org.opendaylight.controller.cluster.raft.PeerInfo;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
@@ -42,22 +42,19 @@ import scala.concurrent.duration.FiniteDuration;
  * <li> If election timeout elapses: start new election
  * </ul>
  */
-public class Candidate extends AbstractRaftActorBehavior {
-
-    private int voteCount;
-
+public final class Candidate extends AbstractRaftActorBehavior {
+    private final ImmutableList<String> votingPeers;
     private final int votesRequired;
 
-    private final Collection<String> votingPeers = new ArrayList<>();
+    private int voteCount;
 
-    public Candidate(RaftActorContext context) {
+    public Candidate(final RaftActorContext context) {
         super(context, RaftState.Candidate);
 
-        for (PeerInfo peer: context.getPeers()) {
-            if (peer.isVoting()) {
-                votingPeers.add(peer.getId());
-            }
-        }
+        votingPeers = context.getPeers().stream()
+            .filter(PeerInfo::isVoting)
+            .map(PeerInfo::getId)
+            .collect(ImmutableList.toImmutableList());
 
         log.debug("{}: Election: Candidate has following voting peers: {}", logName(), votingPeers);
 
@@ -73,17 +70,17 @@ public class Candidate extends AbstractRaftActorBehavior {
     }
 
     @Override
-    public final String getLeaderId() {
+    public String getLeaderId() {
         return null;
     }
 
     @Override
-    public final short getLeaderPayloadVersion() {
+    public short getLeaderPayloadVersion() {
         return -1;
     }
 
     @Override
-    protected RaftActorBehavior handleAppendEntries(ActorRef sender, AppendEntries appendEntries) {
+    protected RaftActorBehavior handleAppendEntries(final ActorRef sender, final AppendEntries appendEntries) {
 
         log.debug("{}: handleAppendEntries: {}", logName(), appendEntries);
 
@@ -99,12 +96,13 @@ public class Candidate extends AbstractRaftActorBehavior {
     }
 
     @Override
-    protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender, AppendEntriesReply appendEntriesReply) {
+    protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
+            final AppendEntriesReply appendEntriesReply) {
         return this;
     }
 
     @Override
-    protected RaftActorBehavior handleRequestVoteReply(ActorRef sender, RequestVoteReply requestVoteReply) {
+    protected RaftActorBehavior handleRequestVoteReply(final ActorRef sender, final RequestVoteReply requestVoteReply) {
         log.debug("{}: handleRequestVoteReply: {}, current voteCount: {}", logName(), requestVoteReply, voteCount);
 
         if (requestVoteReply.isVoteGranted()) {
@@ -129,8 +127,14 @@ public class Candidate extends AbstractRaftActorBehavior {
         return super.electionDuration().$div(context.getConfigParams().getCandidateElectionTimeoutDivisor());
     }
 
+
     @Override
-    public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+    ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+        throw new IllegalStateException("A candidate should never attempt to apply " + entry);
+    }
+
+    @Override
+    public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
         if (message instanceof ElectionTimeout) {
             log.debug("{}: Received ElectionTimeout", logName());
 
@@ -149,9 +153,7 @@ public class Candidate extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (message instanceof RaftRPC) {
-
-            RaftRPC rpc = (RaftRPC) message;
+        if (message instanceof RaftRPC rpc) {
 
             log.debug("{}: RaftRPC message received {}, my term is {}", logName(), rpc,
                         context.getTermInformation().getCurrentTerm());
@@ -178,10 +180,7 @@ public class Candidate extends AbstractRaftActorBehavior {
         return super.handleMessage(sender, message);
     }
 
-
     private void startNewTerm() {
-
-
         // set voteCount back to 1 (that is voting for self)
         voteCount = 1;
 
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/FI.java
new file mode 100644 (file)
index 0000000..79c605a
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link FollowerIdentifier}.
+ */
+final class FI implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private String value;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FI() {
+        // For Externalizable
+    }
+
+    FI(final String value) {
+        this.value = requireNonNull(value);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(value);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        value = (String) in.readObject();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new FollowerIdentifier(value);
+    }
+}
index 4fa4dbfb57b34f620eecd6b82710dffc754cd88f..9dd630aade8a5e2e0a8265aa69564ceaace63cd4 100644 (file)
@@ -14,22 +14,24 @@ import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent.CurrentClusterState;
 import akka.cluster.Member;
 import akka.cluster.MemberStatus;
-import akka.japi.Procedure;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Stopwatch;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.IOException;
-import java.util.ArrayList;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Consumer;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
@@ -51,6 +53,7 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  * convert to candidate
  * </ul>
  */
+// Non-final for testing
 public class Follower extends AbstractRaftActorBehavior {
     private static final long MAX_ELECTION_TIMEOUT_FACTOR = 18;
 
@@ -67,11 +70,13 @@ public class Follower extends AbstractRaftActorBehavior {
         this(context, null, (short)-1);
     }
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "electionDuration() is not final for Candidate override")
     public Follower(final RaftActorContext context, final String initialLeaderId,
             final short initialLeaderPayloadVersion) {
         super(context, RaftState.Follower);
-        this.leaderId = initialLeaderId;
-        this.leaderPayloadVersion = initialLeaderPayloadVersion;
+        leaderId = initialLeaderId;
+        leaderPayloadVersion = initialLeaderPayloadVersion;
 
         initialSyncStatusTracker = new SyncStatusTracker(context.getActor(), getId(), context.getConfigParams()
             .getSyncIndexThreshold());
@@ -160,12 +165,11 @@ public class Follower extends AbstractRaftActorBehavior {
         leaderId = appendEntries.getLeaderId();
         leaderPayloadVersion = appendEntries.getPayloadVersion();
 
-        if (appendEntries.getLeaderAddress().isPresent()) {
-            final String address = appendEntries.getLeaderAddress().get();
-            log.debug("New leader address: {}", address);
-
-            context.setPeerAddress(leaderId, address);
-            context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, address);
+        final var leaderAddress = appendEntries.leaderAddress();
+        if (leaderAddress != null) {
+            log.debug("New leader address: {}", leaderAddress);
+            context.setPeerAddress(leaderId, leaderAddress);
+            context.getConfigParams().getPeerAddressResolver().setResolved(leaderId, leaderAddress);
         }
 
         // First check if the logs are in sync or not
@@ -303,7 +307,7 @@ public class Follower extends AbstractRaftActorBehavior {
         // applied to the state already, as the persistence callback occurs async, and we want those entries
         // purged from the persisted log as well.
         final AtomicBoolean shouldCaptureSnapshot = new AtomicBoolean(false);
-        final Procedure<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
+        final Consumer<ReplicatedLogEntry> appendAndPersistCallback = logEntry -> {
             final List<ReplicatedLogEntry> entries = appendEntries.getEntries();
             final ReplicatedLogEntry lastEntryToAppend = entries.get(entries.size() - 1);
             if (shouldCaptureSnapshot.get() && logEntry == lastEntryToAppend) {
@@ -322,8 +326,8 @@ public class Follower extends AbstractRaftActorBehavior {
             shouldCaptureSnapshot.compareAndSet(false,
                     context.getReplicatedLog().shouldCaptureSnapshot(entry.getIndex()));
 
-            if (entry.getData() instanceof ServerConfigurationPayload) {
-                context.updatePeerIds((ServerConfigurationPayload)entry.getData());
+            if (entry.getData() instanceof ServerConfigurationPayload serverConfiguration) {
+                context.updatePeerIds(serverConfiguration);
             }
         }
 
@@ -408,8 +412,8 @@ public class Follower extends AbstractRaftActorBehavior {
         return false;
     }
 
-    private void sendOutOfSyncAppendEntriesReply(final ActorRef sender, boolean forceInstallSnapshot,
-            short leaderRaftVersion) {
+    private void sendOutOfSyncAppendEntriesReply(final ActorRef sender, final boolean forceInstallSnapshot,
+            final short leaderRaftVersion) {
         // We found that the log was out of sync so just send a negative reply.
         final AppendEntriesReply reply = new AppendEntriesReply(context.getId(), currentTerm(), false, lastIndex(),
                 lastTerm(), context.getPayloadVersion(), forceInstallSnapshot, needsLeaderAddress(),
@@ -435,6 +439,11 @@ public class Follower extends AbstractRaftActorBehavior {
         return this;
     }
 
+    @Override
+    final ApplyState getApplyStateFor(final ReplicatedLogEntry entry) {
+        return new ApplyState(null, null, entry);
+    }
+
     @Override
     public RaftActorBehavior handleMessage(final ActorRef sender, final Object message) {
         if (message instanceof ElectionTimeout || message instanceof TimeoutNow) {
@@ -445,30 +454,29 @@ public class Follower extends AbstractRaftActorBehavior {
             return this;
         }
 
-        if (!(message instanceof RaftRPC)) {
+        if (!(message instanceof RaftRPC rpc)) {
             // The rest of the processing requires the message to be a RaftRPC
             return null;
         }
 
-        final RaftRPC rpc = (RaftRPC) message;
         // If RPC request or response contains term T > currentTerm:
         // set currentTerm = T, convert to follower (§5.1)
         // This applies to all RPC messages and responses
-        if (rpc.getTerm() > context.getTermInformation().getCurrentTerm()) {
+        if (rpc.getTerm() > context.getTermInformation().getCurrentTerm() && shouldUpdateTerm(rpc)) {
             log.info("{}: Term {} in \"{}\" message is greater than follower's term {} - updating term",
                 logName(), rpc.getTerm(), rpc, context.getTermInformation().getCurrentTerm());
 
             context.getTermInformation().updateAndPersist(rpc.getTerm(), null);
         }
 
-        if (rpc instanceof InstallSnapshot) {
-            handleInstallSnapshot(sender, (InstallSnapshot) rpc);
+        if (rpc instanceof InstallSnapshot installSnapshot) {
+            handleInstallSnapshot(sender, installSnapshot);
             restartLastLeaderMessageTimer();
             scheduleElection(electionDuration());
             return this;
         }
 
-        if (!(rpc instanceof RequestVote) || canGrantVote((RequestVote) rpc)) {
+        if (!(rpc instanceof RequestVote requestVote) || canGrantVote(requestVote)) {
             restartLastLeaderMessageTimer();
             scheduleElection(electionDuration());
         }
@@ -500,6 +508,10 @@ public class Follower extends AbstractRaftActorBehavior {
                 if (isLeaderAvailabilityKnown() && lastLeaderMessageInterval < maxElectionTimeout) {
                     log.debug("{}: Received ElectionTimeout but leader appears to be available", logName());
                     scheduleElection(electionDuration());
+                } else if (isThisFollowerIsolated()) {
+                    log.debug("{}: this follower is isolated. Do not switch to Candidate for now.", logName());
+                    setLeaderId(null);
+                    scheduleElection(electionDuration());
                 } else {
                     log.debug("{}: Received ElectionTimeout - switching to Candidate", logName());
                     return internalSwitchBehavior(RaftState.Candidate);
@@ -537,7 +549,7 @@ public class Follower extends AbstractRaftActorBehavior {
 
         Address leaderAddress = leaderActor.anchorPath().address();
 
-        CurrentClusterState state = cluster.get().state();
+        CurrentClusterState state = cluster.orElseThrow().state();
         Set<Member> unreachable = state.getUnreachable();
 
         log.debug("{}: Checking for leader {} in the cluster unreachable set {}", logName(), leaderAddress,
@@ -569,6 +581,36 @@ public class Follower extends AbstractRaftActorBehavior {
         return false;
     }
 
+    private boolean isThisFollowerIsolated() {
+        final Optional<Cluster> maybeCluster = context.getCluster();
+        if (!maybeCluster.isPresent()) {
+            return false;
+        }
+
+        final Cluster cluster = maybeCluster.orElseThrow();
+        final Member selfMember = cluster.selfMember();
+
+        final CurrentClusterState state = cluster.state();
+        final Set<Member> unreachable = state.getUnreachable();
+        final Iterable<Member> members = state.getMembers();
+
+        log.debug("{}: Checking if this node is isolated in the cluster unreachable set {},"
+                        + "all members {} self member: {}", logName(), unreachable, members, selfMember);
+
+        // no unreachable peers means we cannot be isolated
+        if (unreachable.isEmpty()) {
+            return false;
+        }
+
+        final Set<Member> membersToCheck = new HashSet<>();
+        members.forEach(membersToCheck::add);
+
+        membersToCheck.removeAll(unreachable);
+
+        // check if the only member not unreachable is us
+        return membersToCheck.size() == 1 && membersToCheck.iterator().next().equals(selfMember);
+    }
+
     private void handleInstallSnapshot(final ActorRef sender, final InstallSnapshot installSnapshot) {
 
         log.debug("{}: handleInstallSnapshot: {}", logName(), installSnapshot);
@@ -593,14 +635,14 @@ public class Follower extends AbstractRaftActorBehavior {
 
                 Snapshot snapshot = Snapshot.create(
                         context.getSnapshotManager().convertSnapshot(snapshotTracker.getSnapshotBytes()),
-                        new ArrayList<>(),
+                        List.of(),
                         installSnapshot.getLastIncludedIndex(),
                         installSnapshot.getLastIncludedTerm(),
                         installSnapshot.getLastIncludedIndex(),
                         installSnapshot.getLastIncludedTerm(),
                         context.getTermInformation().getCurrentTerm(),
                         context.getTermInformation().getVotedFor(),
-                        installSnapshot.getServerConfig().orNull());
+                        installSnapshot.getServerConfig().orElse(null));
 
                 ApplySnapshot.Callback applySnapshotCallback = new ApplySnapshot.Callback() {
                     @Override
@@ -627,8 +669,7 @@ public class Follower extends AbstractRaftActorBehavior {
         } catch (IOException e) {
             log.debug("{}: Exception in InstallSnapshot of follower", logName(), e);
 
-            sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(),
-                    -1, false), actor());
+            sender.tell(new InstallSnapshotReply(currentTerm(), context.getId(), -1, false), actor());
 
             closeSnapshotTracker();
         }
index 32c6da4b527431d9e874000e6dd0acaff696a9d5..2586f2091eec649afa80fbece5d3efe2ac999556 100644 (file)
@@ -7,10 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.behaviors;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import org.opendaylight.yangtools.util.AbstractStringIdentifier;
 
 /**
@@ -18,44 +14,16 @@ import org.opendaylight.yangtools.util.AbstractStringIdentifier;
  *
  * @author Thomas Pantelis
  */
-class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+final class FollowerIdentifier extends AbstractStringIdentifier<FollowerIdentifier> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    FollowerIdentifier(String followerId) {
+    FollowerIdentifier(final String followerId) {
         super(followerId);
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private FollowerIdentifier identifier;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(FollowerIdentifier identifier) {
-            this.identifier = identifier;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(identifier.getValue());
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            identifier = new FollowerIdentifier((String) in.readObject());
-        }
-
-        private Object readResolve() {
-            return identifier;
-        }
+        return new FI(getValue());
     }
 }
index 21f823d08960b71111750120b3e451be7b7d4137..0e293520523c39c2e8a7e70f0d924373b90a67a5 100644 (file)
@@ -12,8 +12,8 @@ import static java.util.Objects.requireNonNull;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.base.Stopwatch;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
@@ -57,16 +57,16 @@ public class Leader extends AbstractLeader {
     private final Stopwatch isolatedLeaderCheck = Stopwatch.createStarted();
     private @Nullable LeadershipTransferContext leadershipTransferContext;
 
-    Leader(RaftActorContext context, @Nullable AbstractLeader initializeFromLeader) {
+    Leader(final RaftActorContext context, @Nullable final AbstractLeader initializeFromLeader) {
         super(context, RaftState.Leader, initializeFromLeader);
     }
 
-    public Leader(RaftActorContext context) {
+    public Leader(final RaftActorContext context) {
         this(context, null);
     }
 
     @Override
-    public RaftActorBehavior handleMessage(ActorRef sender, Object originalMessage) {
+    public RaftActorBehavior handleMessage(final ActorRef sender, final Object originalMessage) {
         requireNonNull(sender, "sender should not be null");
 
         if (ISOLATED_LEADER_CHECK.equals(originalMessage)) {
@@ -98,7 +98,8 @@ public class Leader extends AbstractLeader {
     }
 
     @Override
-    protected RaftActorBehavior handleAppendEntriesReply(ActorRef sender, AppendEntriesReply appendEntriesReply) {
+    protected RaftActorBehavior handleAppendEntriesReply(final ActorRef sender,
+            final AppendEntriesReply appendEntriesReply) {
         RaftActorBehavior returnBehavior = super.handleAppendEntriesReply(sender, appendEntriesReply);
         tryToCompleteLeadershipTransfer(appendEntriesReply.getFollowerId());
         return returnBehavior;
@@ -122,7 +123,7 @@ public class Leader extends AbstractLeader {
      *
      * @param leadershipTransferCohort the cohort participating in the leadership transfer
      */
-    public void transferLeadership(@NonNull RaftActorLeadershipTransferCohort leadershipTransferCohort) {
+    public void transferLeadership(@NonNull final RaftActorLeadershipTransferCohort leadershipTransferCohort) {
         log.debug("{}: Attempting to transfer leadership", logName());
 
         leadershipTransferContext = new LeadershipTransferContext(leadershipTransferCohort);
@@ -131,14 +132,14 @@ public class Leader extends AbstractLeader {
         sendAppendEntries(0, false);
     }
 
-    private void tryToCompleteLeadershipTransfer(String followerId) {
+    private void tryToCompleteLeadershipTransfer(final String followerId) {
         if (leadershipTransferContext == null) {
             return;
         }
 
         final Optional<String> requestedFollowerIdOptional
                 = leadershipTransferContext.transferCohort.getRequestedFollowerId();
-        if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.get().equals(followerId)) {
+        if (requestedFollowerIdOptional.isPresent() && !requestedFollowerIdOptional.orElseThrow().equals(followerId)) {
             // we want to transfer leadership to specific follower
             return;
         }
@@ -184,12 +185,12 @@ public class Leader extends AbstractLeader {
     }
 
     @VisibleForTesting
-    void markFollowerActive(String followerId) {
+    void markFollowerActive(final String followerId) {
         getFollower(followerId).markFollowerActive();
     }
 
     @VisibleForTesting
-    void markFollowerInActive(String followerId) {
+    void markFollowerInActive(final String followerId) {
         getFollower(followerId).markFollowerInActive();
     }
 
@@ -197,11 +198,11 @@ public class Leader extends AbstractLeader {
         RaftActorLeadershipTransferCohort transferCohort;
         Stopwatch timer = Stopwatch.createStarted();
 
-        LeadershipTransferContext(RaftActorLeadershipTransferCohort transferCohort) {
+        LeadershipTransferContext(final RaftActorLeadershipTransferCohort transferCohort) {
             this.transferCohort = transferCohort;
         }
 
-        boolean isExpired(long timeout) {
+        boolean isExpired(final long timeout) {
             if (timer.elapsed(TimeUnit.MILLISECONDS) >= timeout) {
                 transferCohort.abortTransfer();
                 return true;
index 946c56bec091e6dba8c490a1c9ede601e8d7e532..a2617dc63960c676ccb7227a7922361f76568323 100644 (file)
@@ -7,12 +7,16 @@
  */
 package org.opendaylight.controller.cluster.raft.behaviors;
 
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Stopwatch;
 import com.google.common.io.ByteSource;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
 
 /**
  * Encapsulates the leader state and logic for sending snapshot chunks to a follower.
@@ -26,15 +30,17 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     // The index that the follower should respond with if it needs the install snapshot to be reset
     static final int INVALID_CHUNK_INDEX = -1;
 
+    static final int INITIAL_OFFSET = -1;
+
     // This would be passed as the hash code of the last chunk when sending the first chunk
     static final int INITIAL_LAST_CHUNK_HASH_CODE = -1;
 
     private final int snapshotChunkSize;
     private final String logName;
     private ByteSource snapshotBytes;
-    private int offset = 0;
+    private long offset = INITIAL_OFFSET;
     // the next snapshot chunk is sent only if the replyReceivedForOffset matches offset
-    private int replyReceivedForOffset = -1;
+    private long replyReceivedForOffset = -1;
     // if replyStatus is false, the previous chunk is attempted
     private boolean replyStatus = false;
     private int chunkIndex = FIRST_CHUNK_INDEX;
@@ -43,6 +49,8 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     private int nextChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE;
     private long snapshotSize;
     private InputStream snapshotInputStream;
+    private final Stopwatch chunkTimer = Stopwatch.createUnstarted();
+    private byte[] currentChunk = null;
 
     LeaderInstallSnapshotState(final int snapshotChunkSize, final String logName) {
         this.snapshotChunkSize = snapshotChunkSize;
@@ -63,13 +71,15 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
 
         LOG.debug("{}: Snapshot {} bytes, total chunks to send: {}", logName, snapshotSize, totalChunks);
 
-        replyReceivedForOffset = -1;
+        replyReceivedForOffset = INITIAL_OFFSET;
         chunkIndex = FIRST_CHUNK_INDEX;
     }
 
-    int incrementOffset() {
-        if (replyStatus) {
-            // if prev chunk failed, we would want to sent the same chunk again
+    private long incrementOffset() {
+        // if offset is -1 doesn't matter whether it was the initial value or reset, move the offset to 0 to begin with
+        if (offset == INITIAL_OFFSET) {
+            offset = 0;
+        } else {
             offset = offset + snapshotChunkSize;
         }
         return offset;
@@ -77,12 +87,24 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
 
     int incrementChunkIndex() {
         if (replyStatus) {
-            // if prev chunk failed, we would want to sent the same chunk again
+            // if prev chunk failed, we would want to send the same chunk again
             chunkIndex =  chunkIndex + 1;
         }
         return chunkIndex;
     }
 
+    void startChunkTimer() {
+        chunkTimer.start();
+    }
+
+    void resetChunkTimer() {
+        chunkTimer.reset();
+    }
+
+    boolean isChunkTimedOut(final FiniteDuration timeout) {
+        return chunkTimer.elapsed(TimeUnit.SECONDS) > timeout.toSeconds();
+    }
+
     int getChunkIndex() {
         return chunkIndex;
     }
@@ -108,33 +130,39 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
             replyStatus = true;
             lastChunkHashCode = nextChunkHashCode;
         } else {
-            // if the chunk sent was failure
-            replyReceivedForOffset = offset;
+            // if the chunk sent was failure, revert offset to previous so we can retry
+            offset = replyReceivedForOffset;
             replyStatus = false;
         }
     }
 
     byte[] getNextChunk() throws IOException {
-        int start = incrementOffset();
-        int size = snapshotChunkSize;
-        if (snapshotChunkSize > snapshotSize) {
-            size = (int) snapshotSize;
-        } else if (start + snapshotChunkSize > snapshotSize) {
-            size = (int) (snapshotSize - start);
-        }
+        // increment offset to indicate next chunk is in flight, canSendNextChunk() wont let us hit this again until,
+        // markSendStatus() is called with either success or failure
+        final var start = incrementOffset();
+        if (replyStatus || currentChunk == null) {
+            int size = snapshotChunkSize;
+            if (snapshotChunkSize > snapshotSize) {
+                size = (int) snapshotSize;
+            } else if (start + snapshotChunkSize > snapshotSize) {
+                size = (int) (snapshotSize - start);
+            }
 
-        byte[] nextChunk = new byte[size];
-        int numRead = snapshotInputStream.read(nextChunk);
-        if (numRead != size) {
-            throw new IOException(String.format(
-                    "The # of bytes read from the input stream, %d, does not match the expected # %d", numRead, size));
-        }
+            currentChunk = new byte[size];
+            final var numRead = snapshotInputStream.read(currentChunk);
+            if (numRead != size) {
+                throw new IOException(String.format(
+                        "The # of bytes read from the input stream, %d, does not match the expected # %d",
+                        numRead, size));
+            }
 
-        nextChunkHashCode = Arrays.hashCode(nextChunk);
+            nextChunkHashCode = Arrays.hashCode(currentChunk);
 
-        LOG.debug("{}: Next chunk: total length={}, offset={}, size={}, hashCode={}", logName,
-                snapshotSize, start, size, nextChunkHashCode);
-        return nextChunk;
+            LOG.debug("{}: Next chunk: total length={}, offset={}, size={}, hashCode={}", logName,
+                    snapshotSize, start, size, nextChunkHashCode);
+        }
+
+        return currentChunk;
     }
 
     /**
@@ -142,17 +170,20 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
      */
     void reset() {
         closeStream();
+        chunkTimer.reset();
 
-        offset = 0;
+        offset = INITIAL_OFFSET;
         replyStatus = false;
-        replyReceivedForOffset = offset;
+        replyReceivedForOffset = INITIAL_OFFSET;
         chunkIndex = FIRST_CHUNK_INDEX;
+        currentChunk = null;
         lastChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE;
+        nextChunkHashCode = INITIAL_LAST_CHUNK_HASH_CODE;
 
         try {
             snapshotInputStream = snapshotBytes.openStream();
         } catch (IOException e) {
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -167,7 +198,7 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
             try {
                 snapshotInputStream.close();
             } catch (IOException e) {
-                LOG.warn("{}: Error closing snapshot stream", logName);
+                LOG.warn("{}: Error closing snapshot stream", logName, e);
             }
 
             snapshotInputStream = null;
@@ -177,4 +208,20 @@ public final class LeaderInstallSnapshotState implements AutoCloseable {
     int getLastChunkHashCode() {
         return lastChunkHashCode;
     }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this)
+                .add("snapshotChunkSize", snapshotChunkSize)
+                .add("offset", offset)
+                .add("replyReceivedForOffset", replyReceivedForOffset)
+                .add("replyStatus", replyStatus)
+                .add("chunkIndex", chunkIndex)
+                .add("totalChunks", totalChunks)
+                .add("lastChunkHashCode", lastChunkHashCode)
+                .add("nextChunkHashCode", nextChunkHashCode)
+                .add("snapshotSize", snapshotSize)
+                .add("chunkTimer", chunkTimer)
+                .toString();
+    }
 }
index f8969fcfc14892f4fbf000bbe00af4a17a8f07d3..c3b75161b05a2d314ebb65026fd6d86dfb753062 100644 (file)
@@ -5,15 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.io.ByteSource;
 import java.io.BufferedOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.OptionalInt;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.slf4j.Logger;
@@ -32,10 +32,10 @@ class SnapshotTracker implements AutoCloseable {
     private int lastChunkHashCode = LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE;
     private long count;
 
-    SnapshotTracker(Logger log, int totalChunks, String leaderId, RaftActorContext context) {
+    SnapshotTracker(final Logger log, final int totalChunks, final String leaderId, final RaftActorContext context) {
         this.log = log;
         this.totalChunks = totalChunks;
-        this.leaderId = Preconditions.checkNotNull(leaderId);
+        this.leaderId = requireNonNull(leaderId);
         fileBackedStream = context.getFileBackedOutputStreamFactory().newInstance();
         bufferedStream = new BufferedOutputStream(fileBackedStream);
     }
@@ -48,11 +48,12 @@ class SnapshotTracker implements AutoCloseable {
      * @param lastChunkHashCode the optional hash code for the chunk
      * @return true if this is the last chunk is received
      * @throws InvalidChunkException if the chunk index is invalid or out of order
+     * @throws IOException if there is a problem writing to the stream
      */
-    boolean addChunk(int chunkIndex, byte[] chunk, Optional<Integer> maybeLastChunkHashCode)
-            throws InvalidChunkException, IOException {
+    boolean addChunk(final int chunkIndex, final byte[] chunk, final OptionalInt maybeLastChunkHashCode)
+            throws IOException {
         log.debug("addChunk: chunkIndex={}, lastChunkIndex={}, collectedChunks.size={}, lastChunkHashCode={}",
-                chunkIndex, lastChunkIndex, count, this.lastChunkHashCode);
+                chunkIndex, lastChunkIndex, count, lastChunkHashCode);
 
         if (sealed) {
             throw new InvalidChunkException("Invalid chunk received with chunkIndex " + chunkIndex
@@ -63,10 +64,10 @@ class SnapshotTracker implements AutoCloseable {
             throw new InvalidChunkException("Expected chunkIndex " + (lastChunkIndex + 1) + " got " + chunkIndex);
         }
 
-        if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.get() != this.lastChunkHashCode) {
+        if (maybeLastChunkHashCode.isPresent() && maybeLastChunkHashCode.orElseThrow() != lastChunkHashCode) {
             throw new InvalidChunkException("The hash code of the recorded last chunk does not match "
-                    + "the senders hash code, expected " + this.lastChunkHashCode + " was "
-                    + maybeLastChunkHashCode.get());
+                    + "the senders hash code, expected " + lastChunkHashCode + " was "
+                    + maybeLastChunkHashCode.orElseThrow());
         }
 
         bufferedStream.write(chunk);
@@ -74,7 +75,7 @@ class SnapshotTracker implements AutoCloseable {
         count += chunk.length;
         sealed = chunkIndex == totalChunks;
         lastChunkIndex = chunkIndex;
-        this.lastChunkHashCode = Arrays.hashCode(chunk);
+        lastChunkHashCode = Arrays.hashCode(chunk);
         return sealed;
     }
 
@@ -99,7 +100,7 @@ class SnapshotTracker implements AutoCloseable {
     public static class InvalidChunkException extends IOException {
         private static final long serialVersionUID = 1L;
 
-        InvalidChunkException(String message) {
+        InvalidChunkException(final String message) {
             super(message);
         }
     }
index 20714493e84658d7a8c780cc3a1efd0573962b13..2b700ffc43c1ec80cce36f2ab4293cfaebc09547 100644 (file)
@@ -5,11 +5,12 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -28,7 +29,7 @@ public class SyncStatusTracker {
         final String leaderId;
 
         LeaderInfo(final String leaderId, final long minimumCommitIndex) {
-            this.leaderId = Preconditions.checkNotNull(leaderId);
+            this.leaderId = requireNonNull(leaderId);
             this.minimumCommitIndex = minimumCommitIndex;
         }
     }
@@ -46,14 +47,14 @@ public class SyncStatusTracker {
     private boolean syncStatus;
 
     public SyncStatusTracker(final ActorRef actor, final String id, final long syncThreshold) {
-        this.actor = Preconditions.checkNotNull(actor, "actor should not be null");
-        this.id = Preconditions.checkNotNull(id, "id should not be null");
-        Preconditions.checkArgument(syncThreshold >= 0, "syncThreshold should be greater than or equal to 0");
+        this.actor = requireNonNull(actor, "actor should not be null");
+        this.id = requireNonNull(id, "id should not be null");
+        checkArgument(syncThreshold >= 0, "syncThreshold should be greater than or equal to 0");
         this.syncThreshold = syncThreshold;
     }
 
     public void update(final String leaderId, final long leaderCommit, final long commitIndex) {
-        Preconditions.checkNotNull(leaderId, "leaderId should not be null");
+        requireNonNull(leaderId, "leaderId should not be null");
 
         if (syncTarget == null || !leaderId.equals(syncTarget.leaderId)) {
             LOG.debug("{}: Last sync leader does not match current leader {}, need to catch up to {}", id,
index d51f93f7eed780e4d5991ae37dce33dadbb98fcd..c9bc2ed3b2174451921a93fb33d6d135bbdf0bf4 100644 (file)
@@ -7,15 +7,25 @@
  */
 package org.opendaylight.controller.cluster.raft.client.messages;
 
+import akka.util.Timeout;
+import java.util.Optional;
+
 /**
- * Internal client message to get a snapshot of the current state based on whether or not persistence is
- * enabled. Returns a {@link GetSnapshotReply} instance.
+ * Internal client message to get a snapshot of the current state based on whether or not persistence is enabled.
+ * Returns a {@link GetSnapshotReply} instance.
  *
  * @author Thomas Pantelis
  */
 public final class GetSnapshot {
-    public static final GetSnapshot INSTANCE = new GetSnapshot();
+    public static final GetSnapshot INSTANCE = new GetSnapshot(null);
+
+    private final Timeout timeout;
+
+    public GetSnapshot(final Timeout timeout) {
+        this.timeout = timeout;
+    }
 
-    private GetSnapshot() {
+    public Optional<Timeout> getTimeout() {
+        return Optional.ofNullable(timeout);
     }
 }
index 7ded55b3ce91686d93f26d1dafe9f15c2585a1c3..f6e673bffdaf449ab2f16ad11ab858cfd7c2bcee 100644 (file)
@@ -12,6 +12,7 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
 
 /**
  * The response to a GetOnDemandRaftState message.
index b670243b42f3167cc124f0b929d2ed9b8079e495..7fba245bf26fedd040e09c0b13dfc0cb425e2bcf 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.raft.client.messages;
 
 import akka.dispatch.ControlMessage;
 import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizableProxy;
 
 /**
  * Message sent to a raft actor to shutdown gracefully. If it's the leader it will transfer leadership to a
@@ -19,25 +18,18 @@ import org.opendaylight.controller.cluster.raft.base.messages.EmptyExternalizabl
  * @author Thomas Pantelis
  */
 public final class Shutdown implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+
     public static final Shutdown INSTANCE = new Shutdown();
 
     private Shutdown() {
         // Hidden on purpose
     }
 
-    private Object writeReplace() {
-        return new Proxy();
-    }
-
-    private static class Proxy extends EmptyExternalizableProxy {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            super(INSTANCE);
-        }
+    @java.io.Serial
+    @SuppressWarnings("static-method")
+    private Object readResolve() {
+        return INSTANCE;
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AE.java
new file mode 100644 (file)
index 0000000..491ca3f
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Argon serialization proxy for {@link AppendEntries}.
+ */
+final class AE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private AppendEntries appendEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AE() {
+        // For Externalizable
+    }
+
+    AE(final AppendEntries appendEntries) {
+        this.appendEntries = requireNonNull(appendEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeShort(appendEntries.getLeaderRaftVersion());
+        WritableObjects.writeLong(out, appendEntries.getTerm());
+        out.writeObject(appendEntries.getLeaderId());
+
+        WritableObjects.writeLongs(out, appendEntries.getPrevLogTerm(), appendEntries.getPrevLogIndex());
+        WritableObjects.writeLongs(out, appendEntries.getLeaderCommit(), appendEntries.getReplicatedToAllIndex());
+
+        out.writeShort(appendEntries.getPayloadVersion());
+
+        final var entries = appendEntries.getEntries();
+        out.writeInt(entries.size());
+        for (var e : entries) {
+            WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+            out.writeObject(e.getData());
+        }
+
+        out.writeObject(appendEntries.leaderAddress());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        short leaderRaftVersion = in.readShort();
+        long term = WritableObjects.readLong(in);
+        String leaderId = (String) in.readObject();
+
+        byte hdr = WritableObjects.readLongHeader(in);
+        long prevLogTerm = WritableObjects.readFirstLong(in, hdr);
+        long prevLogIndex = WritableObjects.readSecondLong(in, hdr);
+
+        hdr = WritableObjects.readLongHeader(in);
+        long leaderCommit = WritableObjects.readFirstLong(in, hdr);
+        long replicatedToAllIndex = WritableObjects.readSecondLong(in, hdr);
+        short payloadVersion = in.readShort();
+
+        int size = in.readInt();
+        var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+        for (int i = 0; i < size; i++) {
+            hdr = WritableObjects.readLongHeader(in);
+            entries.add(new SimpleReplicatedLogEntry(WritableObjects.readFirstLong(in, hdr),
+                WritableObjects.readSecondLong(in, hdr), (Payload) in.readObject()));
+        }
+
+        String leaderAddress = (String)in.readObject();
+
+        appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
+                replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
+                leaderAddress);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(appendEntries);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AR.java
new file mode 100644 (file)
index 0000000..6aa2ed8
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link AppendEntriesReply}.
+ */
+final class AR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flag bits
+    private static final int SUCCESS                = 0x10;
+    private static final int FORCE_INSTALL_SNAPSHOT = 0x20;
+    private static final int NEEDS_LEADER_ADDRESS   = 0x40;
+
+    private AppendEntriesReply appendEntriesReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AR() {
+        // For Externalizable
+    }
+
+    AR(final AppendEntriesReply appendEntriesReply) {
+        this.appendEntriesReply = requireNonNull(appendEntriesReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeShort(appendEntriesReply.getRaftVersion());
+
+        int flags = 0;
+        if (appendEntriesReply.isSuccess()) {
+            flags |= SUCCESS;
+        }
+        if (appendEntriesReply.isForceInstallSnapshot()) {
+            flags |= FORCE_INSTALL_SNAPSHOT;
+        }
+        if (appendEntriesReply.isNeedsLeaderAddress()) {
+            flags |= NEEDS_LEADER_ADDRESS;
+        }
+        WritableObjects.writeLong(out, appendEntriesReply.getTerm(), flags);
+
+        out.writeObject(appendEntriesReply.getFollowerId());
+
+        WritableObjects.writeLongs(out, appendEntriesReply.getLogLastIndex(), appendEntriesReply.getLogLastTerm());
+
+        out.writeShort(appendEntriesReply.getPayloadVersion());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        short raftVersion = in.readShort();
+
+        byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String followerId = (String) in.readObject();
+
+        hdr = WritableObjects.readLongHeader(in);
+        long logLastIndex = WritableObjects.readFirstLong(in, hdr);
+        long logLastTerm = WritableObjects.readSecondLong(in, hdr);
+
+        short payloadVersion = in.readShort();
+
+        appendEntriesReply = new AppendEntriesReply(followerId, term, getFlag(flags, SUCCESS), logLastIndex,
+            logLastTerm, payloadVersion, getFlag(flags, FORCE_INSTALL_SNAPSHOT), getFlag(flags, NEEDS_LEADER_ADDRESS),
+            raftVersion, RaftVersions.CURRENT_VERSION);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(appendEntriesReply);
+    }
+
+    private static boolean getFlag(final int flags, final int bit) {
+        return (flags & bit) != 0;
+    }
+}
index fc5255e2712cea11e9932d18ecd9a898cae0c660..038ad48b8e4aa0cae2778a3e0fce110a55d44a6f 100644 (file)
@@ -5,29 +5,25 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-public class AbstractRaftRPC implements RaftRPC {
+public abstract class AbstractRaftRPC implements RaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -6061342433962854822L;
 
     // term
-    private long term;
+    private final long term;
 
-    protected AbstractRaftRPC(long term) {
+    AbstractRaftRPC(final long term) {
         this.term = term;
     }
 
-    // added for testing while serialize-messages=on
-    public AbstractRaftRPC() {
-    }
-
     @Override
-    public long getTerm() {
+    public final long getTerm() {
         return term;
     }
 
-    protected void setTerm(long term) {
-        this.term = term;
-    }
+    // All implementations must use Externalizable Proxy pattern
+    @java.io.Serial
+    abstract Object writeReplace();
 }
index 67c6899231912df7a412e7e9f162736e8b689d99..892ea3356a58259b5a91b6ab3ea7bcfda8ef70b9 100644 (file)
@@ -10,29 +10,27 @@ package org.opendaylight.controller.cluster.raft.messages;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.util.ArrayList;
 import java.util.List;
-import java.util.Optional;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
- * Invoked by leader to replicate log entries (§5.3); also used as
- * heartbeat (§5.2).
+ * Invoked by leader to replicate log entries (§5.3); also used as heartbeat (§5.2).
  */
-public class AppendEntries extends AbstractRaftRPC {
+public final class AppendEntries extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     // So that follower can redirect clients
-    private final String leaderId;
+    private final @NonNull String leaderId;
 
     // Index of log entry immediately preceding new ones
     private final long prevLogIndex;
@@ -41,7 +39,7 @@ public class AppendEntries extends AbstractRaftRPC {
     private final long prevLogTerm;
 
     // log entries to store (empty for heart beat - may send more than one for efficiency)
-    private final List<ReplicatedLogEntry> entries;
+    private final @NonNull List<ReplicatedLogEntry> entries;
 
     // leader's commitIndex
     private final long leaderCommit;
@@ -57,9 +55,10 @@ public class AppendEntries extends AbstractRaftRPC {
 
     private final String leaderAddress;
 
-    private AppendEntries(long term, @NonNull String leaderId, long prevLogIndex, long prevLogTerm,
-            @NonNull List<ReplicatedLogEntry> entries, long leaderCommit, long replicatedToAllIndex,
-            short payloadVersion, short recipientRaftVersion, short leaderRaftVersion, @Nullable String leaderAddress) {
+    AppendEntries(final long term, @NonNull final String leaderId, final long prevLogIndex,
+            final long prevLogTerm, @NonNull final List<ReplicatedLogEntry> entries, final long leaderCommit,
+            final long replicatedToAllIndex, final short payloadVersion, final short recipientRaftVersion,
+            final short leaderRaftVersion, @Nullable final String leaderAddress) {
         super(term);
         this.leaderId = requireNonNull(leaderId);
         this.prevLogIndex = prevLogIndex;
@@ -73,17 +72,18 @@ public class AppendEntries extends AbstractRaftRPC {
         this.leaderAddress = leaderAddress;
     }
 
-    public AppendEntries(long term, @NonNull String leaderId, long prevLogIndex, long prevLogTerm,
-            @NonNull List<ReplicatedLogEntry> entries, long leaderCommit, long replicatedToAllIndex,
-            short payloadVersion, short recipientRaftVersion, @Nullable String leaderAddress) {
+    public AppendEntries(final long term, final @NonNull String leaderId, final long prevLogIndex,
+            final long prevLogTerm, final @NonNull List<ReplicatedLogEntry> entries, final long leaderCommit,
+            final long replicatedToAllIndex, final short payloadVersion, final short recipientRaftVersion,
+            final @Nullable String leaderAddress) {
         this(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit, replicatedToAllIndex, payloadVersion,
                 recipientRaftVersion, RaftVersions.CURRENT_VERSION, leaderAddress);
     }
 
     @VisibleForTesting
-    public AppendEntries(long term, @NonNull String leaderId, long prevLogIndex, long prevLogTerm,
-            @NonNull List<ReplicatedLogEntry> entries, long leaderCommit, long replicatedToAllIndex,
-            short payloadVersion) {
+    public AppendEntries(final long term, final @NonNull String leaderId, final long prevLogIndex,
+            final long prevLogTerm, final @NonNull List<ReplicatedLogEntry> entries, final long leaderCommit,
+            final long replicatedToAllIndex, final short payloadVersion) {
         this(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit, replicatedToAllIndex, payloadVersion,
                 RaftVersions.CURRENT_VERSION, null);
     }
@@ -116,8 +116,8 @@ public class AppendEntries extends AbstractRaftRPC {
         return payloadVersion;
     }
 
-    public Optional<String> getLeaderAddress() {
-        return Optional.ofNullable(leaderAddress);
+    public @Nullable String leaderAddress() {
+        return leaderAddress;
     }
 
     public short getLeaderRaftVersion() {
@@ -138,14 +138,16 @@ public class AppendEntries extends AbstractRaftRPC {
                 + ", entries=" + entries + "]";
     }
 
-    private Object writeReplace() {
-        return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new Proxy(this);
+    @Override
+    Object writeReplace() {
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new ProxyV2(this) : new AE(this);
     }
 
     /**
      * Fluorine version that adds the leader address.
      */
     private static class ProxyV2 implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private AppendEntries appendEntries;
@@ -156,12 +158,12 @@ public class AppendEntries extends AbstractRaftRPC {
         public ProxyV2() {
         }
 
-        ProxyV2(AppendEntries appendEntries) {
+        ProxyV2(final AppendEntries appendEntries) {
             this.appendEntries = appendEntries;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeShort(appendEntries.leaderRaftVersion);
             out.writeLong(appendEntries.getTerm());
             out.writeObject(appendEntries.leaderId);
@@ -182,7 +184,7 @@ public class AppendEntries extends AbstractRaftRPC {
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             short leaderRaftVersion = in.readShort();
             long term = in.readLong();
             String leaderId = (String) in.readObject();
@@ -193,80 +195,19 @@ public class AppendEntries extends AbstractRaftRPC {
             short payloadVersion = in.readShort();
 
             int size = in.readInt();
-            List<ReplicatedLogEntry> entries = new ArrayList<>(size);
+            var entries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
             for (int i = 0; i < size; i++) {
                 entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
             }
 
             String leaderAddress = (String)in.readObject();
 
-            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
+            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries.build(), leaderCommit,
                     replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, leaderRaftVersion,
                     leaderAddress);
         }
 
-        private Object readResolve() {
-            return appendEntries;
-        }
-    }
-
-    /**
-     * Pre-Fluorine version.
-     */
-    @Deprecated
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private AppendEntries appendEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(AppendEntries appendEntries) {
-            this.appendEntries = appendEntries;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeLong(appendEntries.getTerm());
-            out.writeObject(appendEntries.leaderId);
-            out.writeLong(appendEntries.prevLogTerm);
-            out.writeLong(appendEntries.prevLogIndex);
-            out.writeLong(appendEntries.leaderCommit);
-            out.writeLong(appendEntries.replicatedToAllIndex);
-            out.writeShort(appendEntries.payloadVersion);
-
-            out.writeInt(appendEntries.entries.size());
-            for (ReplicatedLogEntry e: appendEntries.entries) {
-                out.writeLong(e.getIndex());
-                out.writeLong(e.getTerm());
-                out.writeObject(e.getData());
-            }
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String leaderId = (String) in.readObject();
-            long prevLogTerm = in.readLong();
-            long prevLogIndex = in.readLong();
-            long leaderCommit = in.readLong();
-            long replicatedToAllIndex = in.readLong();
-            short payloadVersion = in.readShort();
-
-            int size = in.readInt();
-            List<ReplicatedLogEntry> entries = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                entries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject()));
-            }
-
-            appendEntries = new AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit,
-                replicatedToAllIndex, payloadVersion, RaftVersions.CURRENT_VERSION, RaftVersions.BORON_VERSION, null);
-        }
-
+        @java.io.Serial
         private Object readResolve() {
             return appendEntries;
         }
index 902b9a03b757930ccceeb07c4eb621bab2dbeb78..033a19a7b26e758a30430b902085649af4c886ef 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -18,7 +17,8 @@ import org.opendaylight.controller.cluster.raft.RaftVersions;
 /**
  * Reply for the AppendEntries message.
  */
-public class AppendEntriesReply extends AbstractRaftRPC {
+public final class AppendEntriesReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -7487547356392536683L;
 
     // true if follower contained entry matching
@@ -47,23 +47,22 @@ public class AppendEntriesReply extends AbstractRaftRPC {
     private final short recipientRaftVersion;
 
     @VisibleForTesting
-    public AppendEntriesReply(String followerId, long term, boolean success, long logLastIndex, long logLastTerm,
-            short payloadVersion) {
+    public AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
+            final long logLastTerm, final short payloadVersion) {
         this(followerId, term, success, logLastIndex, logLastTerm, payloadVersion, false, false,
                 RaftVersions.CURRENT_VERSION);
     }
 
-    public AppendEntriesReply(String followerId, long term, boolean success, long logLastIndex, long logLastTerm,
-            short payloadVersion, boolean forceInstallSnapshot, boolean needsLeaderAddress,
-            short recipientRaftVersion) {
+    public AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
+            final long logLastTerm, final short payloadVersion, final boolean forceInstallSnapshot,
+            final boolean needsLeaderAddress, final short recipientRaftVersion) {
         this(followerId, term, success, logLastIndex, logLastTerm, payloadVersion, forceInstallSnapshot,
                 needsLeaderAddress, RaftVersions.CURRENT_VERSION, recipientRaftVersion);
-
     }
 
-    private AppendEntriesReply(String followerId, long term, boolean success, long logLastIndex, long logLastTerm,
-            short payloadVersion, boolean forceInstallSnapshot, boolean needsLeaderAddress, short raftVersion,
-            short recipientRaftVersion) {
+    AppendEntriesReply(final String followerId, final long term, final boolean success, final long logLastIndex,
+            final long logLastTerm, final short payloadVersion, final boolean forceInstallSnapshot,
+            final boolean needsLeaderAddress, final short raftVersion, final short recipientRaftVersion) {
         super(term);
         this.followerId = followerId;
         this.success = success;
@@ -117,14 +116,16 @@ public class AppendEntriesReply extends AbstractRaftRPC {
                 + ", recipientRaftVersion=" + recipientRaftVersion + "]";
     }
 
-    private Object writeReplace() {
-        return recipientRaftVersion >= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new Proxy(this);
+    @Override
+    Object writeReplace() {
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy2(this) : new AR(this);
     }
 
     /**
      * Fluorine version that adds the needsLeaderAddress flag.
      */
     private static class Proxy2 implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private AppendEntriesReply appendEntriesReply;
@@ -135,12 +136,12 @@ public class AppendEntriesReply extends AbstractRaftRPC {
         public Proxy2() {
         }
 
-        Proxy2(AppendEntriesReply appendEntriesReply) {
+        Proxy2(final AppendEntriesReply appendEntriesReply) {
             this.appendEntriesReply = appendEntriesReply;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeShort(appendEntriesReply.raftVersion);
             out.writeLong(appendEntriesReply.getTerm());
             out.writeObject(appendEntriesReply.followerId);
@@ -153,7 +154,7 @@ public class AppendEntriesReply extends AbstractRaftRPC {
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             short raftVersion = in.readShort();
             long term = in.readLong();
             String followerId = (String) in.readObject();
@@ -169,57 +170,7 @@ public class AppendEntriesReply extends AbstractRaftRPC {
                     RaftVersions.CURRENT_VERSION);
         }
 
-        private Object readResolve() {
-            return appendEntriesReply;
-        }
-    }
-
-    /**
-     * Pre-Fluorine version.
-     */
-    @Deprecated
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private AppendEntriesReply appendEntriesReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(AppendEntriesReply appendEntriesReply) {
-            this.appendEntriesReply = appendEntriesReply;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeShort(appendEntriesReply.raftVersion);
-            out.writeLong(appendEntriesReply.getTerm());
-            out.writeObject(appendEntriesReply.followerId);
-            out.writeBoolean(appendEntriesReply.success);
-            out.writeLong(appendEntriesReply.logLastIndex);
-            out.writeLong(appendEntriesReply.logLastTerm);
-            out.writeShort(appendEntriesReply.payloadVersion);
-            out.writeBoolean(appendEntriesReply.forceInstallSnapshot);
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            short raftVersion = in.readShort();
-            long term = in.readLong();
-            String followerId = (String) in.readObject();
-            boolean success = in.readBoolean();
-            long logLastIndex = in.readLong();
-            long logLastTerm = in.readLong();
-            short payloadVersion = in.readShort();
-            boolean forceInstallSnapshot = in.readBoolean();
-
-            appendEntriesReply = new AppendEntriesReply(followerId, term, success, logLastIndex, logLastTerm,
-                    payloadVersion, forceInstallSnapshot, false, raftVersion, RaftVersions.CURRENT_VERSION);
-        }
-
+        @java.io.Serial
         private Object readResolve() {
             return appendEntriesReply;
         }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IR.java
new file mode 100644 (file)
index 0000000..e9d95d8
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshotReply}.
+ */
+final class IR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int SUCCESS = 0x10;
+
+    private InstallSnapshotReply installSnapshotReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public IR() {
+        // For Externalizable
+    }
+
+    IR(final InstallSnapshotReply installSnapshotReply) {
+        this.installSnapshotReply = requireNonNull(installSnapshotReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, installSnapshotReply.getTerm(), installSnapshotReply.isSuccess() ? SUCCESS : 0);
+        out.writeObject(installSnapshotReply.getFollowerId());
+        out.writeInt(installSnapshotReply.getChunkIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String followerId = (String) in.readObject();
+        int chunkIndex = in.readInt();
+
+        installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, (flags & SUCCESS) != 0);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(installSnapshotReply);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IS.java
new file mode 100644 (file)
index 0000000..3247bb2
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Optional;
+import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
+import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link InstallSnapshot}.
+ */
+final class IS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int LAST_CHUNK_HASHCODE = 0x10;
+    private static final int SERVER_CONFIG       = 0x20;
+
+    private InstallSnapshot installSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public IS() {
+        // For Externalizable
+    }
+
+    IS(final InstallSnapshot installSnapshot) {
+        this.installSnapshot = requireNonNull(installSnapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        int flags = 0;
+        final var lastChunkHashCode = installSnapshot.getLastChunkHashCode();
+        if (lastChunkHashCode.isPresent()) {
+            flags |= LAST_CHUNK_HASHCODE;
+        }
+        final var serverConfig = installSnapshot.getServerConfig();
+        if (serverConfig.isPresent()) {
+            flags |= SERVER_CONFIG;
+        }
+
+        WritableObjects.writeLong(out, installSnapshot.getTerm(), flags);
+        out.writeObject(installSnapshot.getLeaderId());
+        WritableObjects.writeLongs(out, installSnapshot.getLastIncludedIndex(), installSnapshot.getLastIncludedTerm());
+        out.writeInt(installSnapshot.getChunkIndex());
+        out.writeInt(installSnapshot.getTotalChunks());
+
+        if (lastChunkHashCode.isPresent()) {
+            out.writeInt(lastChunkHashCode.orElseThrow());
+        }
+        if (serverConfig.isPresent()) {
+            out.writeObject(serverConfig.orElseThrow());
+        }
+
+        out.writeObject(installSnapshot.getData());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        byte hdr = WritableObjects.readLongHeader(in);
+        final int flags = WritableObjects.longHeaderFlags(hdr);
+
+        long term = WritableObjects.readLongBody(in, hdr);
+        String leaderId = (String) in.readObject();
+
+        hdr = WritableObjects.readLongHeader(in);
+        long lastIncludedIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastIncludedTerm = WritableObjects.readSecondLong(in, hdr);
+        int chunkIndex = in.readInt();
+        int totalChunks = in.readInt();
+
+        OptionalInt lastChunkHashCode = getFlag(flags, LAST_CHUNK_HASHCODE) ? OptionalInt.of(in.readInt())
+            : OptionalInt.empty();
+        Optional<ServerConfigurationPayload> serverConfig = getFlag(flags, SERVER_CONFIG)
+                ? Optional.of((ServerConfigurationPayload)in.readObject()) : Optional.empty();
+
+        byte[] data = (byte[])in.readObject();
+
+        installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
+                chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(installSnapshot);
+    }
+
+    private static boolean getFlag(final int flags, final int bit) {
+        return (flags & bit) != 0;
+    }
+}
+
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/IdentifiablePayload.java
new file mode 100644 (file)
index 0000000..a6034c5
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+public abstract class IdentifiablePayload<T extends Identifier> extends Payload implements Identifiable<T> {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+}
index b64e902f3ff9f93e782c4114e6e690bbef971b0a..3cd470f6cc6d3c7186dc4fb20324482b216153d7 100644 (file)
@@ -5,21 +5,24 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import com.google.common.base.Optional;
+import com.google.common.annotations.VisibleForTesting;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
+import java.util.Optional;
+import java.util.OptionalInt;
+import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 
 /**
  * Message sent from a leader to install a snapshot chunk on a follower.
  */
-public class InstallSnapshot extends AbstractRaftRPC {
+public final class InstallSnapshot extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final String leaderId;
@@ -28,15 +31,20 @@ public class InstallSnapshot extends AbstractRaftRPC {
     private final byte[] data;
     private final int chunkIndex;
     private final int totalChunks;
-    private final Optional<Integer> lastChunkHashCode;
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via writeReplace()")
+    private final OptionalInt lastChunkHashCode;
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via writeReplace()")
     private final Optional<ServerConfigurationPayload> serverConfig;
-
-    @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] "
-            + "object but this is OK since this class is merely a DTO and does not process byte[] internally. "
-            + "Also it would be inefficient to create a copy as the byte[] could be large.")
-    public InstallSnapshot(long term, String leaderId, long lastIncludedIndex, long lastIncludedTerm, byte[] data,
-            int chunkIndex, int totalChunks, Optional<Integer> lastChunkHashCode,
-            Optional<ServerConfigurationPayload> serverConfig) {
+    private final short recipientRaftVersion;
+
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = """
+        Stores a reference to an externally mutable byte[] object but this is OK since this class is merely a DTO and \
+        does not process byte[] internally. Also it would be inefficient to create a copy as the byte[] could be \
+        large.""")
+    public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
+            final long lastIncludedTerm, final byte[] data, final int chunkIndex, final int totalChunks,
+            final OptionalInt lastChunkHashCode, final Optional<ServerConfigurationPayload> serverConfig,
+            final short recipientRaftVersion) {
         super(term);
         this.leaderId = leaderId;
         this.lastIncludedIndex = lastIncludedIndex;
@@ -46,12 +54,15 @@ public class InstallSnapshot extends AbstractRaftRPC {
         this.totalChunks = totalChunks;
         this.lastChunkHashCode = lastChunkHashCode;
         this.serverConfig = serverConfig;
+        this.recipientRaftVersion = recipientRaftVersion;
     }
 
-    public InstallSnapshot(long term, String leaderId, long lastIncludedIndex,
-                           long lastIncludedTerm, byte[] data, int chunkIndex, int totalChunks) {
-        this(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, chunkIndex, totalChunks,
-                Optional.<Integer>absent(), Optional.<ServerConfigurationPayload>absent());
+    @VisibleForTesting
+    public InstallSnapshot(final long term, final String leaderId, final long lastIncludedIndex,
+                           final long lastIncludedTerm, final byte[] data, final int chunkIndex,
+                           final int totalChunks) {
+        this(term, leaderId, lastIncludedIndex, lastIncludedTerm, data, chunkIndex, totalChunks, OptionalInt.empty(),
+            Optional.empty(), RaftVersions.CURRENT_VERSION);
     }
 
     public String getLeaderId() {
@@ -66,9 +77,10 @@ public class InstallSnapshot extends AbstractRaftRPC {
         return lastIncludedTerm;
     }
 
-    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
-            + "this is OK since this class is merely a DTO and does not process the byte[] internally. "
-            + "Also it would be inefficient to create a return copy as the byte[] could be large.")
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = """
+        Exposes a mutable object stored in a field but this is OK since this class is merely a DTO and does not \
+        process the byte[] internally. Also it would be inefficient to create a return copy as the byte[] could be \
+        large.""")
     public byte[] getData() {
         return data;
     }
@@ -81,7 +93,7 @@ public class InstallSnapshot extends AbstractRaftRPC {
         return totalChunks;
     }
 
-    public Optional<Integer> getLastChunkHashCode() {
+    public OptionalInt getLastChunkHashCode() {
         return lastChunkHashCode;
     }
 
@@ -89,24 +101,21 @@ public class InstallSnapshot extends AbstractRaftRPC {
         return serverConfig;
     }
 
-
-    public <T> Object toSerializable(short version) {
-        return this;
-    }
-
     @Override
     public String toString() {
         return "InstallSnapshot [term=" + getTerm() + ", leaderId=" + leaderId + ", lastIncludedIndex="
                 + lastIncludedIndex + ", lastIncludedTerm=" + lastIncludedTerm + ", datasize=" + data.length
                 + ", Chunk=" + chunkIndex + "/" + totalChunks + ", lastChunkHashCode=" + lastChunkHashCode
-                + ", serverConfig=" + serverConfig.orNull() + "]";
+                + ", serverConfig=" + serverConfig.orElse(null) + "]";
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
+    @Override
+    Object writeReplace() {
+        return recipientRaftVersion <= RaftVersions.FLUORINE_VERSION ? new Proxy(this) : new IS(this);
     }
 
     private static class Proxy implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private InstallSnapshot installSnapshot;
@@ -117,12 +126,12 @@ public class InstallSnapshot extends AbstractRaftRPC {
         public Proxy() {
         }
 
-        Proxy(InstallSnapshot installSnapshot) {
+        Proxy(final InstallSnapshot installSnapshot) {
             this.installSnapshot = installSnapshot;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeLong(installSnapshot.getTerm());
             out.writeObject(installSnapshot.leaderId);
             out.writeLong(installSnapshot.lastIncludedIndex);
@@ -132,19 +141,19 @@ public class InstallSnapshot extends AbstractRaftRPC {
 
             out.writeByte(installSnapshot.lastChunkHashCode.isPresent() ? 1 : 0);
             if (installSnapshot.lastChunkHashCode.isPresent()) {
-                out.writeInt(installSnapshot.lastChunkHashCode.get().intValue());
+                out.writeInt(installSnapshot.lastChunkHashCode.orElseThrow());
             }
 
             out.writeByte(installSnapshot.serverConfig.isPresent() ? 1 : 0);
             if (installSnapshot.serverConfig.isPresent()) {
-                out.writeObject(installSnapshot.serverConfig.get());
+                out.writeObject(installSnapshot.serverConfig.orElseThrow());
             }
 
             out.writeObject(installSnapshot.data);
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             long term = in.readLong();
             String leaderId = (String) in.readObject();
             long lastIncludedIndex = in.readLong();
@@ -152,24 +161,17 @@ public class InstallSnapshot extends AbstractRaftRPC {
             int chunkIndex = in.readInt();
             int totalChunks = in.readInt();
 
-            Optional<Integer> lastChunkHashCode = Optional.absent();
-            boolean chunkHashCodePresent = in.readByte() == 1;
-            if (chunkHashCodePresent) {
-                lastChunkHashCode = Optional.of(in.readInt());
-            }
-
-            Optional<ServerConfigurationPayload> serverConfig = Optional.absent();
-            boolean serverConfigPresent = in.readByte() == 1;
-            if (serverConfigPresent) {
-                serverConfig = Optional.of((ServerConfigurationPayload)in.readObject());
-            }
+            OptionalInt lastChunkHashCode = in.readByte() == 1 ? OptionalInt.of(in.readInt()) : OptionalInt.empty();
+            Optional<ServerConfigurationPayload> serverConfig = in.readByte() == 1
+                    ? Optional.of((ServerConfigurationPayload)in.readObject()) : Optional.empty();
 
             byte[] data = (byte[])in.readObject();
 
             installSnapshot = new InstallSnapshot(term, leaderId, lastIncludedIndex, lastIncludedTerm, data,
-                    chunkIndex, totalChunks, lastChunkHashCode, serverConfig);
+                    chunkIndex, totalChunks, lastChunkHashCode, serverConfig, RaftVersions.CURRENT_VERSION);
         }
 
+        @java.io.Serial
         private Object readResolve() {
             return installSnapshot;
         }
index bbee5c4175e800d71f689db9310b77b03b4cdf9c..ed8b2800816f29a42c46b34adfa1aa5ca3004067 100644 (file)
@@ -5,15 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-public class InstallSnapshotReply extends AbstractRaftRPC {
+public final class InstallSnapshotReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 642227896390779503L;
 
     // The followerId - this will be used to figure out which follower is
@@ -22,7 +17,7 @@ public class InstallSnapshotReply extends AbstractRaftRPC {
     private final int chunkIndex;
     private final boolean success;
 
-    public InstallSnapshotReply(long term, String followerId, int chunkIndex, boolean success) {
+    public InstallSnapshotReply(final long term, final String followerId, final int chunkIndex, final boolean success) {
         super(term);
         this.followerId = followerId;
         this.chunkIndex = chunkIndex;
@@ -49,45 +44,8 @@ public class InstallSnapshotReply extends AbstractRaftRPC {
                 + ", success=" + success + "]";
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private InstallSnapshotReply installSnapshotReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(InstallSnapshotReply installSnapshotReply) {
-            this.installSnapshotReply = installSnapshotReply;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeLong(installSnapshotReply.getTerm());
-            out.writeObject(installSnapshotReply.followerId);
-            out.writeInt(installSnapshotReply.chunkIndex);
-            out.writeBoolean(installSnapshotReply.success);
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String followerId = (String) in.readObject();
-            int chunkIndex = in.readInt();
-            boolean success = in.readBoolean();
-
-            installSnapshotReply = new InstallSnapshotReply(term, followerId, chunkIndex, success);
-        }
-
-        private Object readResolve() {
-            return installSnapshotReply;
-        }
+    @Override
+    Object writeReplace() {
+        return new IR(this);
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/Payload.java
new file mode 100644 (file)
index 0000000..c75385a
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import java.io.Serializable;
+
+/**
+ * An instance of a {@link Payload} class is meant to be used as the Payload for {@link AppendEntries}.
+ *
+ * <p>
+ * When an actor which is derived from RaftActor attempts to persistData it must pass an instance of the Payload class.
+ * Similarly when state needs to be applied to the derived RaftActor it will be passed an instance of the Payload class.
+ */
+public abstract class Payload implements Serializable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * Return the estimate of in-memory size of this payload.
+     *
+     * @return An estimate of the in-memory size of this payload.
+     */
+    public abstract int size();
+
+    /**
+     * Return the estimate of serialized size of this payload when passed through serialization. The estimate needs to
+     * be reasonably accurate and should err on the side of caution and report a slightly-higher size in face of
+     * uncertainty.
+     *
+     * @return An estimate of serialized size.
+     */
+    public abstract int serializedSize();
+
+    /**
+     * Return the serialization proxy for this object.
+     *
+     * @return Serialization proxy
+     */
+    @java.io.Serial
+    protected abstract Object writeReplace();
+}
@@ -5,7 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
+package org.opendaylight.controller.cluster.raft.messages;
 
 /**
  * This is a tagging interface for a Payload implementation that needs to always be persisted regardless of
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/RV.java
new file mode 100644 (file)
index 0000000..b75f1b7
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVote}.
+ */
+final class RV implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private RequestVote requestVote;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public RV() {
+        // For Externalizable
+    }
+
+    RV(final RequestVote requestVote) {
+        this.requestVote = requireNonNull(requestVote);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, requestVote.getTerm());
+        out.writeObject(requestVote.getCandidateId());
+        WritableObjects.writeLongs(out, requestVote.getLastLogIndex(), requestVote.getLastLogTerm());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        long term = WritableObjects.readLong(in);
+        String candidateId = (String) in.readObject();
+
+        final byte hdr = WritableObjects.readLongHeader(in);
+        long lastLogIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastLogTerm = WritableObjects.readSecondLong(in, hdr);
+
+        requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return requestVote;
+    }
+}
index 9f4977e83ca4b56f319ba2aad61de90667ee8023..0c74d890d82f2c23783bb825e2dc0e05965bf28a 100644 (file)
@@ -5,10 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Serializable;
 
 /**
@@ -19,8 +19,8 @@ public class RemoveServer implements Serializable {
 
     private final String serverId;
 
-    public RemoveServer(String serverId) {
-        this.serverId = Preconditions.checkNotNull(serverId);
+    public RemoveServer(final String serverId) {
+        this.serverId = requireNonNull(serverId);
     }
 
     public String getServerId() {
index 5561085539a179939dc1167fc74c8d8c7da58d26..766c5c4338d4cd5aa5cfd00ec03460a6d8ebc64f 100644 (file)
@@ -5,11 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import java.io.Serializable;
 
 /**
@@ -22,8 +22,8 @@ public final class RequestLeadership implements Serializable {
     private final ActorRef replyTo;
 
     public RequestLeadership(final String requestedFollowerId, final ActorRef replyTo) {
-        this.requestedFollowerId = Preconditions.checkNotNull(requestedFollowerId);
-        this.replyTo = Preconditions.checkNotNull(replyTo);
+        this.requestedFollowerId = requireNonNull(requestedFollowerId);
+        this.replyTo = requireNonNull(replyTo);
     }
 
     public String getRequestedFollowerId() {
index d5a581aab8cdbe1d78f29da9b31ccf52025d6912..2b33a12950620accf369300b23074710c8b5b3c2 100644 (file)
@@ -5,18 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
 /**
  * Invoked by candidates to gather votes (§5.2).
  */
-public class RequestVote extends AbstractRaftRPC {
+public final class RequestVote extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = -6967509186297108657L;
 
     // candidate requesting vote
@@ -28,7 +23,7 @@ public class RequestVote extends AbstractRaftRPC {
     // term of candidate’s last log entry (§5.4)
     private final long lastLogTerm;
 
-    public RequestVote(long term, String candidateId, long lastLogIndex, long lastLogTerm) {
+    public RequestVote(final long term, final String candidateId, final long lastLogIndex, final long lastLogTerm) {
         super(term);
         this.candidateId = candidateId;
         this.lastLogIndex = lastLogIndex;
@@ -56,45 +51,8 @@ public class RequestVote extends AbstractRaftRPC {
                 + "]";
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private RequestVote requestVote;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(RequestVote requestVote) {
-            this.requestVote = requestVote;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeLong(requestVote.getTerm());
-            out.writeObject(requestVote.candidateId);
-            out.writeLong(requestVote.lastLogIndex);
-            out.writeLong(requestVote.lastLogTerm);
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            long term = in.readLong();
-            String candidateId = (String) in.readObject();
-            long lastLogIndex = in.readLong();
-            long lastLogTerm = in.readLong();
-
-            requestVote = new RequestVote(term, candidateId, lastLogIndex, lastLogTerm);
-        }
-
-        private Object readResolve() {
-            return requestVote;
-        }
+    @Override
+    Object writeReplace() {
+        return new RV(this);
     }
 }
index 9b7a2f34566342ecd86a52aec1e96ef783f4b9ef..01fd9abe2e1266572122e155630e4eca82cfb4d1 100644 (file)
@@ -5,21 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
 public final class RequestVoteReply extends AbstractRaftRPC {
+    @java.io.Serial
     private static final long serialVersionUID = 8427899326488775660L;
 
     // true means candidate received vote
     private final boolean voteGranted;
 
-    public RequestVoteReply(long term, boolean voteGranted) {
+    public RequestVoteReply(final long term, final boolean voteGranted) {
         super(term);
         this.voteGranted = voteGranted;
     }
@@ -33,41 +28,8 @@ public final class RequestVoteReply extends AbstractRaftRPC {
         return "RequestVoteReply [term=" + getTerm() + ", voteGranted=" + voteGranted + "]";
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    private static class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private RequestVoteReply requestVoteReply;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(RequestVoteReply requestVoteReply) {
-            this.requestVoteReply = requestVoteReply;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeLong(requestVoteReply.getTerm());
-            out.writeBoolean(requestVoteReply.voteGranted);
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException {
-            long term = in.readLong();
-            boolean voteGranted = in.readBoolean();
-
-            requestVoteReply = new RequestVoteReply(term, voteGranted);
-        }
-
-        private Object readResolve() {
-            return requestVoteReply;
-        }
+    @Override
+    Object writeReplace() {
+        return new VR(this);
     }
 }
index 3375137adac63d876463ac46982d3626f2f21137..a7de8ea1a7b5c2e37944ed3ef06d6b3f58d8ba92 100644 (file)
@@ -5,10 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Serializable;
 
 /**
@@ -20,8 +20,8 @@ public class ServerRemoved implements Serializable {
 
     private final String serverId;
 
-    public ServerRemoved(String serverId) {
-        this.serverId = Preconditions.checkNotNull(serverId);
+    public ServerRemoved(final String serverId) {
+        this.serverId = requireNonNull(serverId);
     }
 
     public String getServerId() {
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/VR.java
new file mode 100644 (file)
index 0000000..d5a489b
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link RequestVoteReply}.
+ */
+final class VR implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    // Flags
+    private static final int VOTE_GRANTED = 0x10;
+
+    private RequestVoteReply requestVoteReply;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public VR() {
+        // For Externalizable
+    }
+
+    VR(final RequestVoteReply requestVoteReply) {
+        this.requestVoteReply = requireNonNull(requestVoteReply);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, requestVoteReply.getTerm(), requestVoteReply.isVoteGranted() ? VOTE_GRANTED : 0);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        requestVoteReply = new RequestVoteReply(WritableObjects.readLongBody(in, hdr),
+            (WritableObjects.longHeaderFlags(hdr) & VOTE_GRANTED) != 0);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(requestVoteReply);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/AJE.java
new file mode 100644 (file)
index 0000000..4e39e98
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link ApplyJournalEntries}.
+ */
+final class AJE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ApplyJournalEntries applyEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AJE() {
+        // For Externalizable
+    }
+
+    AJE(final ApplyJournalEntries applyEntries) {
+        this.applyEntries = requireNonNull(applyEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, applyEntries.getToIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        applyEntries = new ApplyJournalEntries(WritableObjects.readLong(in));
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(applyEntries);
+    }
+}
index 3c0a8ac700c7de8e4e1fd156eb07ffbf0761d614..30da667c2695c666f04eaae0d3f2a216772fe125 100644 (file)
@@ -8,10 +8,6 @@
 package org.opendaylight.controller.cluster.raft.persisted;
 
 import akka.dispatch.ControlMessage;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
@@ -22,38 +18,8 @@ import java.io.Serializable;
  *
  * @author Thomas Pantelis
  */
-public class ApplyJournalEntries implements Serializable, ControlMessage {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ApplyJournalEntries applyEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ApplyJournalEntries applyEntries) {
-            this.applyEntries = applyEntries;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(applyEntries.toIndex);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            applyEntries = new ApplyJournalEntries(in.readLong());
-        }
-
-        private Object readResolve() {
-            return applyEntries;
-        }
-    }
-
+public final class ApplyJournalEntries implements Serializable, ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long toIndex;
@@ -66,12 +32,13 @@ public class ApplyJournalEntries implements Serializable, ControlMessage {
         return toIndex;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "ApplyJournalEntries [toIndex=" + toIndex + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new AJE(this);
+    }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/DE.java
new file mode 100644 (file)
index 0000000..6bd34c2
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link DeleteEntries}.
+ */
+final class DE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DeleteEntries deleteEntries;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DE() {
+        // For Externalizable
+    }
+
+    DE(final DeleteEntries deleteEntries) {
+        this.deleteEntries = requireNonNull(deleteEntries);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, deleteEntries.getFromIndex());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        deleteEntries = new DeleteEntries(WritableObjects.readLong(in));
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(deleteEntries);
+    }
+}
index 57f5af3a034e90d31f716eea1da38626832f593f..8b4eb8388af80799ffd77dc439aca9c10a29cac6 100644 (file)
@@ -7,10 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
@@ -18,38 +14,8 @@ import java.io.Serializable;
  *
  * @author Thomas Pantelis
  */
-public class DeleteEntries implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private DeleteEntries deleteEntries;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final DeleteEntries deleteEntries) {
-            this.deleteEntries = deleteEntries;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(deleteEntries.fromIndex);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            deleteEntries = new DeleteEntries(in.readLong());
-        }
-
-        private Object readResolve() {
-            return deleteEntries;
-        }
-    }
-
+public final class DeleteEntries implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long fromIndex;
@@ -62,12 +28,13 @@ public class DeleteEntries implements Serializable {
         return fromIndex;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "DeleteEntries [fromIndex=" + fromIndex + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new DE(this);
+    }
 }
index aee90ace41e9907d2823ac842ebc96cea8e77b30..9939e2f2b09b3201159a7cd4ada8fd7a4d990789 100644 (file)
@@ -13,13 +13,16 @@ package org.opendaylight.controller.cluster.raft.persisted;
  * @author Thomas Pantelis
  */
 public final class EmptyState implements Snapshot.State {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public static final EmptyState INSTANCE = new EmptyState();
 
     private EmptyState() {
+        // Hidden on purpose
     }
 
+    @java.io.Serial
     @SuppressWarnings("static-method")
     private Object readResolve() {
         return INSTANCE;
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LE.java
new file mode 100644 (file)
index 0000000..7e609ab
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link SimpleReplicatedLogEntry}.
+ */
+final class LE implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private long index;
+    private long term;
+    private Payload data;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public LE() {
+        // For Externalizable
+    }
+
+    // For size estimation only, use full bit size
+    LE(final Void dummy) {
+        index = Long.MIN_VALUE;
+        term = Long.MIN_VALUE;
+        data = null;
+    }
+
+    LE(final SimpleReplicatedLogEntry logEntry) {
+        index = logEntry.getIndex();
+        term = logEntry.getTerm();
+        data = logEntry.getData();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLongs(out, index, term);
+        out.writeObject(data);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final byte hdr = WritableObjects.readLongHeader(in);
+        index = WritableObjects.readFirstLong(in, hdr);
+        term = WritableObjects.readSecondLong(in, hdr);
+        data = (Payload) in.readObject();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new SimpleReplicatedLogEntry(index, term, data);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/LegacySerializable.java
new file mode 100644 (file)
index 0000000..0e75d88
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+/**
+ * Marker interface for serializable objects which have been migrated. It implements {@link MigratedSerializable} and
+ * always returns {@code true} from {@link #isMigrated()}. This interface is marked as deprecated , as any of its users
+ * should also be marked as deprecated.
+ */
+@Deprecated
+public interface LegacySerializable extends MigratedSerializable {
+    @Override
+    @Deprecated(forRemoval = true)
+    default boolean isMigrated() {
+        return true;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/NP.java
new file mode 100644 (file)
index 0000000..a041f2f
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import java.io.Serializable;
+
+/**
+ * Serialization proxy for {@link NoopPayload}.
+ */
+// There is no need for Externalizable
+final class NP implements Serializable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    @java.io.Serial
+    private Object readResolve() {
+        return NoopPayload.INSTANCE;
+    }
+}
+
index 46628c6078408445662942029b2f8df004aab2bd..0f076c55d755a3286433d388aad2e33faeef1fdb 100644 (file)
@@ -8,8 +8,9 @@
 package org.opendaylight.controller.cluster.raft.persisted;
 
 import akka.dispatch.ControlMessage;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * Payload used for no-op log entries that are put into the journal by the PreLeader in order to commit
@@ -17,22 +18,17 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa
  *
  * @author Thomas Pantelis
  */
-public final class NoopPayload extends Payload implements Serializable, ControlMessage {
-    public static final NoopPayload INSTANCE = new NoopPayload();
-
-    // There is no need for Externalizable
-    private static final class Proxy implements Serializable {
-        private static final long serialVersionUID = 1L;
-
-        private Object readResolve() {
-            return INSTANCE;
-        }
-    }
-
+public final class NoopPayload extends Payload implements ControlMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private static final Proxy PROXY = new Proxy();
+    private static final @NonNull NP PROXY = new NP();
+    // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative
+    private static final int PROXY_SIZE = SerializationUtils.serialize(PROXY).length;
+
+    public static final @NonNull NoopPayload INSTANCE = new NoopPayload();
 
     private NoopPayload() {
+        // Hidden on purpose
     }
 
     @Override
@@ -40,7 +36,13 @@ public final class NoopPayload extends Payload implements Serializable, ControlM
         return 0;
     }
 
-    private Object writeReplace() {
+    @Override
+    public int serializedSize() {
+        return PROXY_SIZE;
+    }
+
+    @Override
+    protected Object writeReplace() {
         return PROXY;
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/SS.java
new file mode 100644 (file)
index 0000000..0523d08
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Externalizable proxy for {@link Snapshot}.
+ */
+final class SS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private Snapshot snapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SS() {
+        // For Externalizable
+    }
+
+    SS(final Snapshot snapshot) {
+        this.snapshot = requireNonNull(snapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLongs(out, snapshot.getLastIndex(), snapshot.getLastTerm());
+        WritableObjects.writeLongs(out, snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm());
+        WritableObjects.writeLong(out, snapshot.getElectionTerm());
+        out.writeObject(snapshot.getElectionVotedFor());
+        out.writeObject(snapshot.getServerConfiguration());
+
+        final var unAppliedEntries = snapshot.getUnAppliedEntries();
+        out.writeInt(unAppliedEntries.size());
+        for (var e : unAppliedEntries) {
+            WritableObjects.writeLongs(out, e.getIndex(), e.getTerm());
+            out.writeObject(e.getData());
+        }
+
+        out.writeObject(snapshot.getState());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        byte hdr = WritableObjects.readLongHeader(in);
+        long lastIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastTerm = WritableObjects.readSecondLong(in, hdr);
+
+        hdr = WritableObjects.readLongHeader(in);
+        long lastAppliedIndex = WritableObjects.readFirstLong(in, hdr);
+        long lastAppliedTerm = WritableObjects.readSecondLong(in, hdr);
+        long electionTerm = WritableObjects.readLong(in);
+        String electionVotedFor = (String) in.readObject();
+        ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
+
+        int size = in.readInt();
+        var unAppliedEntries = ImmutableList.<ReplicatedLogEntry>builderWithExpectedSize(size);
+        for (int i = 0; i < size; i++) {
+            hdr = WritableObjects.readLongHeader(in);
+            unAppliedEntries.add(new SimpleReplicatedLogEntry(
+                WritableObjects.readFirstLong(in, hdr), WritableObjects.readSecondLong(in, hdr),
+                (Payload) in.readObject()));
+        }
+
+        State state = (State) in.readObject();
+
+        snapshot = Snapshot.create(state, unAppliedEntries.build(), lastIndex, lastTerm, lastAppliedIndex,
+            lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshot);
+    }
+}
index 055984229bc2920a5cf1be0d609e2c8d12e0e53d..dbb64f1d82f505c9ea493fe6598ce7f17dbefe77 100644 (file)
@@ -15,12 +15,10 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -29,8 +27,9 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public final class ServerConfigurationPayload extends Payload implements PersistentPayload, Serializable {
+public final class ServerConfigurationPayload extends Payload implements PersistentPayload {
     private static final class Proxy implements Externalizable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private List<ServerInfo> serverConfig;
@@ -43,35 +42,39 @@ public final class ServerConfigurationPayload extends Payload implements Persist
         }
 
         Proxy(final ServerConfigurationPayload payload) {
-            this.serverConfig = payload.getServerConfig();
+            serverConfig = payload.getServerConfig();
         }
 
         @Override
         public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeInt(serverConfig.size());
-            for (ServerInfo i : serverConfig) {
-                out.writeObject(i.getId());
-                out.writeBoolean(i.isVoting());
+            for (var serverInfo : serverConfig) {
+                out.writeObject(serverInfo.peerId());
+                out.writeBoolean(serverInfo.isVoting());
             }
         }
 
         @Override
         public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             final int size = in.readInt();
-            serverConfig = new ArrayList<>(size);
+
+            final var builder = ImmutableList.<ServerInfo>builderWithExpectedSize(size);
             for (int i = 0; i < size; ++i) {
                 final String id = (String) in.readObject();
                 final boolean voting = in.readBoolean();
-                serverConfig.add(new ServerInfo(id, voting));
+                builder.add(new ServerInfo(id, voting));
             }
+            serverConfig = builder.build();
         }
 
+        @java.io.Serial
         private Object readResolve() {
             return new ServerConfigurationPayload(serverConfig);
         }
     }
 
     private static final Logger LOG = LoggerFactory.getLogger(ServerConfigurationPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -90,6 +93,11 @@ public final class ServerConfigurationPayload extends Payload implements Persist
 
     @Override
     public int size() {
+        return serializedSize();
+    }
+
+    @Override
+    public int serializedSize() {
         if (serializedSize < 0) {
             try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) {
                 try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
@@ -112,21 +120,9 @@ public final class ServerConfigurationPayload extends Payload implements Persist
     }
 
     @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (obj == null) {
-            return false;
-        }
-
-        if (getClass() != obj.getClass()) {
-            return false;
-        }
-
-        ServerConfigurationPayload other = (ServerConfigurationPayload) obj;
-        return serverConfig.equals(other.serverConfig);
+    public boolean equals(final Object obj) {
+        return this == obj || obj instanceof ServerConfigurationPayload other
+            && serverConfig.equals(other.serverConfig);
     }
 
     @Override
@@ -134,7 +130,8 @@ public final class ServerConfigurationPayload extends Payload implements Persist
         return "ServerConfigurationPayload [serverConfig=" + serverConfig + "]";
     }
 
-    private Object writeReplace() {
+    @Override
+    protected Object writeReplace() {
         return new Proxy(this);
     }
 }
index 6e1ca82f77d80571f74e6d4b49229ed02445ea98..de70e17d3609d381451c9d3903a785620a3dfe5e 100644 (file)
@@ -17,47 +17,8 @@ import org.eclipse.jdt.annotation.NonNull;
  *
  * @author Thomas Pantelis
  */
-public final class ServerInfo {
-    private final String id;
-    private final boolean isVoting;
-
-    public ServerInfo(@NonNull String id, boolean isVoting) {
-        this.id = requireNonNull(id);
-        this.isVoting = isVoting;
-    }
-
-    public @NonNull String getId() {
-        return id;
-    }
-
-    public boolean isVoting() {
-        return isVoting;
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + Boolean.hashCode(isVoting);
-        result = prime * result + id.hashCode();
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof ServerInfo)) {
-            return false;
-        }
-
-        final ServerInfo other = (ServerInfo) obj;
-        return isVoting == other.isVoting && id.equals(other.id);
-    }
-
-    @Override
-    public String toString() {
-        return "ServerInfo [id=" + id + ", isVoting=" + isVoting + "]";
+public record ServerInfo(@NonNull String peerId, boolean isVoting) {
+    public ServerInfo {
+        requireNonNull(peerId);
     }
 }
\ No newline at end of file
index 14ce5420d26081c06eed9a442e3dd39287c1c730..610d53a9e72efcb7bded805e79d7d6a4a5e1b63f 100644 (file)
@@ -5,17 +5,14 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import com.google.common.base.Preconditions;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Serializable;
+import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 
 /**
  * A {@link ReplicatedLogEntry} implementation.
@@ -23,45 +20,10 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa
  * @author Thomas Pantelis
  */
 public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ReplicatedLogEntry replicatedLogEntry;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ReplicatedLogEntry replicatedLogEntry) {
-            this.replicatedLogEntry = replicatedLogEntry;
-        }
-
-        static int estimatedSerializedSize(ReplicatedLogEntry replicatedLogEntry) {
-            return 8 /* index */ + 8 /* term */ + replicatedLogEntry.getData().size()
-                    + 400 /* estimated extra padding for class info */;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(replicatedLogEntry.getIndex());
-            out.writeLong(replicatedLogEntry.getTerm());
-            out.writeObject(replicatedLogEntry.getData());
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            replicatedLogEntry = new SimpleReplicatedLogEntry(in.readLong(), in.readLong(), (Payload) in.readObject());
-        }
-
-        private Object readResolve() {
-            return replicatedLogEntry;
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    // Estimate to how big the proxy is. Note this includes object stream overhead, so it is a bit conservative.
+    private static final int PROXY_SIZE = SerializationUtils.serialize(new LE((Void) null)).length;
 
     private final long index;
     private final long term;
@@ -75,10 +37,10 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
      * @param term the term
      * @param payload the payload
      */
-    public SimpleReplicatedLogEntry(long index, long term, Payload payload) {
+    public SimpleReplicatedLogEntry(final long index, final long term, final Payload payload) {
         this.index = index;
         this.term = term;
-        this.payload = Preconditions.checkNotNull(payload);
+        this.payload = requireNonNull(payload);
     }
 
     @Override
@@ -98,7 +60,12 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
 
     @Override
     public int size() {
-        return getData().size();
+        return payload.size();
+    }
+
+    @Override
+    public int serializedSize() {
+        return PROXY_SIZE + payload.serializedSize();
     }
 
     @Override
@@ -107,18 +74,10 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
     }
 
     @Override
-    public void setPersistencePending(boolean pending) {
+    public void setPersistencePending(final boolean pending) {
         persistencePending = pending;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    public int estimatedSerializedSize() {
-        return Proxy.estimatedSerializedSize(this);
-    }
-
     @Override
     public int hashCode() {
         final int prime = 31;
@@ -130,21 +89,18 @@ public final class SimpleReplicatedLogEntry implements ReplicatedLogEntry, Seria
     }
 
     @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (obj == null || getClass() != obj.getClass()) {
-            return false;
-        }
-
-        SimpleReplicatedLogEntry other = (SimpleReplicatedLogEntry) obj;
-        return index == other.index && term == other.term && payload.equals(other.payload);
+    public boolean equals(final Object obj) {
+        return this == obj || obj instanceof SimpleReplicatedLogEntry other && index == other.index
+            && term == other.term && payload.equals(other.payload);
     }
 
     @Override
     public String toString() {
         return "SimpleReplicatedLogEntry [index=" + index + ", term=" + term + ", payload=" + payload + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new LE(this);
+    }
 }
index ca6e6dff30156b3e571281c1c85f89ac619a390c..250551a780d0a5842e409f2b59315f8d63b27191 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.ExtendedActorSystem;
@@ -45,11 +44,12 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer {
     }
 
     @Override
-    public byte[] toBinary(Object obj) {
-        checkArgument(obj instanceof SimpleReplicatedLogEntry, "Unsupported object type %s", obj.getClass());
+    public byte[] toBinary(final Object obj) {
+        if (!(obj instanceof SimpleReplicatedLogEntry replicatedLogEntry)) {
+            throw new IllegalArgumentException("Unsupported object type " + obj.getClass());
+        }
 
-        SimpleReplicatedLogEntry replicatedLogEntry = (SimpleReplicatedLogEntry)obj;
-        final int estimatedSerializedSize = replicatedLogEntry.estimatedSerializedSize();
+        final int estimatedSerializedSize = replicatedLogEntry.serializedSize();
 
         final ByteArrayOutputStream bos = new ByteArrayOutputStream(estimatedSerializedSize);
         SerializationUtils.serialize(replicatedLogEntry, bos);
@@ -62,7 +62,7 @@ public class SimpleReplicatedLogEntrySerializer extends JSerializer {
     }
 
     @Override
-    public Object fromBinaryJava(byte[] bytes, Class<?> manifest) {
+    public Object fromBinaryJava(final byte[] bytes, final Class<?> manifest) {
         try (ClassLoaderObjectInputStream is = new ClassLoaderObjectInputStream(system.dynamicAccess().classLoader(),
                 new ByteArrayInputStream(bytes))) {
             return is.readObject();
index 42a4a8de810bd5a2b4d04d872b2e00620ff9bd07..81d2331bb4b6c9af4b700c9be2b1a2a37f8328da 100644 (file)
@@ -7,96 +7,34 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 
 /**
  * Represents a snapshot of the raft data.
  *
  * @author Thomas Pantelis
  */
-// Not final for mocking
-public class Snapshot implements Serializable {
-
+public final class Snapshot implements Serializable {
     /**
      * Implementations of this interface are used as the state payload for a snapshot.
      *
      * @author Thomas Pantelis
      */
     public interface State extends Serializable {
-    }
-
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private Snapshot snapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final Snapshot snapshot) {
-            this.snapshot = snapshot;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(snapshot.lastIndex);
-            out.writeLong(snapshot.lastTerm);
-            out.writeLong(snapshot.lastAppliedIndex);
-            out.writeLong(snapshot.lastAppliedTerm);
-            out.writeLong(snapshot.electionTerm);
-            out.writeObject(snapshot.electionVotedFor);
-            out.writeObject(snapshot.serverConfig);
-
-            out.writeInt(snapshot.unAppliedEntries.size());
-            for (ReplicatedLogEntry e: snapshot.unAppliedEntries) {
-                out.writeLong(e.getIndex());
-                out.writeLong(e.getTerm());
-                out.writeObject(e.getData());
-            }
-
-            out.writeObject(snapshot.state);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            long lastIndex = in.readLong();
-            long lastTerm = in.readLong();
-            long lastAppliedIndex = in.readLong();
-            long lastAppliedTerm = in.readLong();
-            long electionTerm = in.readLong();
-            String electionVotedFor = (String) in.readObject();
-            ServerConfigurationPayload serverConfig = (ServerConfigurationPayload) in.readObject();
-
-            int size = in.readInt();
-            List<ReplicatedLogEntry> unAppliedEntries = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                unAppliedEntries.add(new SimpleReplicatedLogEntry(in.readLong(), in.readLong(),
-                        (Payload) in.readObject()));
-            }
-
-            State state = (State) in.readObject();
-
-            snapshot = Snapshot.create(state, unAppliedEntries, lastIndex, lastTerm, lastAppliedIndex, lastAppliedTerm,
-                    electionTerm, electionVotedFor, serverConfig);
-        }
-
-        private Object readResolve() {
-            return snapshot;
+        /**
+         * Indicate whether the snapshot requires migration, i.e. a new snapshot should be created after recovery.
+         * Default implementation returns false, i.e. do not re-snapshot.
+         *
+         * @return True if complete recovery based upon this snapshot should trigger a new snapshot.
+         */
+        default boolean needsMigration() {
+            return false;
         }
     }
 
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final State state;
@@ -109,7 +47,7 @@ public class Snapshot implements Serializable {
     private final String electionVotedFor;
     private final ServerConfigurationPayload serverConfig;
 
-    Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
+    private Snapshot(final State state, final List<ReplicatedLogEntry> unAppliedEntries, final long lastIndex,
             final long lastTerm, final long lastAppliedIndex, final long lastAppliedTerm, final long electionTerm,
             final String electionVotedFor, final ServerConfigurationPayload serverConfig) {
         this.state = state;
@@ -151,7 +89,7 @@ public class Snapshot implements Serializable {
     }
 
     public long getLastIndex() {
-        return this.lastIndex;
+        return lastIndex;
     }
 
     public long getElectionTerm() {
@@ -166,10 +104,6 @@ public class Snapshot implements Serializable {
         return serverConfig;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "Snapshot [lastIndex=" + lastIndex + ", lastTerm=" + lastTerm + ", lastAppliedIndex=" + lastAppliedIndex
@@ -177,4 +111,9 @@ public class Snapshot implements Serializable {
                 + ", state=" + state + ", electionTerm=" + electionTerm + ", electionVotedFor="
                 + electionVotedFor + ", ServerConfigPayload="  + serverConfig + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new SS(this);
+    }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/persisted/UT.java
new file mode 100644 (file)
index 0000000..0fc6f6d
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * Serialization proxy for {@link UpdateElectionTerm}.
+ */
+final class UT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private UpdateElectionTerm updateElectionTerm;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public UT() {
+        // For Externalizable
+    }
+
+    UT(final UpdateElectionTerm updateElectionTerm) {
+        this.updateElectionTerm = requireNonNull(updateElectionTerm);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        WritableObjects.writeLong(out, updateElectionTerm.getCurrentTerm());
+        out.writeObject(updateElectionTerm.getVotedFor());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        updateElectionTerm = new UpdateElectionTerm(WritableObjects.readLong(in), (String) in.readObject());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(updateElectionTerm);
+    }
+}
index 939d893a2ec505538f918a1f30ad5c927c56666a..3ef7acbea369c35f4fa365ac5736aa0b3f34df7f 100644 (file)
@@ -7,48 +7,13 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 
 /**
  * Message class to persist election term information.
  */
-public class UpdateElectionTerm implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private UpdateElectionTerm updateElectionTerm;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final UpdateElectionTerm updateElectionTerm) {
-            this.updateElectionTerm = updateElectionTerm;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeLong(updateElectionTerm.currentTerm);
-            out.writeObject(updateElectionTerm.votedFor);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            updateElectionTerm = new UpdateElectionTerm(in.readLong(), (String) in.readObject());
-        }
-
-        private Object readResolve() {
-            return updateElectionTerm;
-        }
-    }
-
+public final class UpdateElectionTerm implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private final long currentTerm;
@@ -67,13 +32,14 @@ public class UpdateElectionTerm implements Serializable {
         return votedFor;
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
     @Override
     public String toString() {
         return "UpdateElectionTerm [currentTerm=" + currentTerm + ", votedFor=" + votedFor + "]";
     }
+
+    @java.io.Serial
+    private Object writeReplace() {
+        return new UT(this);
+    }
 }
 
index 82017fa99f97eb48e874c7544efbeb066222978e..f59598876de66a975c06766577f39ad3371aa62c 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
-import static akka.pattern.Patterns.ask;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
@@ -17,16 +16,15 @@ import akka.actor.PoisonPill;
 import akka.actor.Terminated;
 import akka.dispatch.Dispatchers;
 import akka.dispatch.Mailboxes;
+import akka.pattern.Patterns;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.OutputStream;
 import java.time.Duration;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -44,10 +42,10 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
@@ -91,6 +89,34 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         }
     }
 
+    /**
+     * Message intended for testing to allow triggering persistData via the mailbox.
+     */
+    public static final class TestPersist {
+
+        private final ActorRef actorRef;
+        private final Identifier identifier;
+        private final Payload payload;
+
+        TestPersist(final ActorRef actorRef, final Identifier identifier, final Payload payload) {
+            this.actorRef = actorRef;
+            this.identifier = identifier;
+            this.payload = payload;
+        }
+
+        public ActorRef getActorRef() {
+            return actorRef;
+        }
+
+        public Identifier getIdentifier() {
+            return identifier;
+        }
+
+        public Payload getPayload() {
+            return payload;
+        }
+    }
+
     public static class TestRaftActor extends MockRaftActor {
 
         private final ActorRef collectorActor;
@@ -98,7 +124,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
 
         TestRaftActor(final Builder builder) {
             super(builder);
-            this.collectorActor = builder.collectorActor;
+            collectorActor = builder.collectorActor;
         }
 
         public void startDropMessages(final Class<?> msgClass) {
@@ -120,20 +146,23 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         @SuppressWarnings({ "rawtypes", "unchecked", "checkstyle:IllegalCatch" })
         @Override
         public void handleCommand(final Object message) {
-            if (message instanceof MockPayload) {
-                MockPayload payload = (MockPayload) message;
+            if (message instanceof MockPayload payload) {
                 super.persistData(collectorActor, new MockIdentifier(payload.toString()), payload, false);
                 return;
             }
 
-            if (message instanceof ServerConfigurationPayload) {
-                super.persistData(collectorActor, new MockIdentifier("serverConfig"), (Payload) message, false);
+            if (message instanceof ServerConfigurationPayload payload) {
+                super.persistData(collectorActor, new MockIdentifier("serverConfig"), payload, false);
+                return;
+            }
+
+            if (message instanceof SetPeerAddress setPeerAddress) {
+                setPeerAddress(setPeerAddress.getPeerId(), setPeerAddress.getPeerAddress());
                 return;
             }
 
-            if (message instanceof SetPeerAddress) {
-                setPeerAddress(((SetPeerAddress) message).getPeerId(),
-                        ((SetPeerAddress) message).getPeerAddress());
+            if (message instanceof TestPersist testPersist) {
+                persistData(testPersist.getActorRef(), testPersist.getIdentifier(), testPersist.getPayload(), false);
                 return;
             }
 
@@ -156,9 +185,9 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
-            MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+            MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+                SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
             }
 
             actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
@@ -180,13 +209,14 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
             }
 
             public Builder collectorActor(final ActorRef newCollectorActor) {
-                this.collectorActor = newCollectorActor;
+                collectorActor = newCollectorActor;
                 return this;
             }
         }
     }
 
-    protected static final int SNAPSHOT_CHUNK_SIZE = 100;
+    // FIXME: this is an arbitrary limit. Document interactions and/or improve them to improve maintainability
+    protected static final int MAXIMUM_MESSAGE_SLICE_SIZE = 700;
 
     protected final Logger testLog = LoggerFactory.getLogger(getClass());
 
@@ -208,16 +238,16 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
     protected String follower2Id = factory.generateActorId("follower");
     protected TestActorRef<TestRaftActor> follower2Actor;
     protected ActorRef follower2CollectorActor;
-    protected  RaftActorBehavior follower2;
+    protected RaftActorBehavior follower2;
     protected RaftActorContext follower2Context;
 
-    protected ImmutableMap<String, String> peerAddresses;
+    protected Map<String, String> peerAddresses;
 
     protected long initialTerm = 5;
     protected long currentTerm;
 
     protected int snapshotBatchCount = 4;
-    protected int snapshotChunkSize = SNAPSHOT_CHUNK_SIZE;
+    protected int maximumMessageSliceSize = MAXIMUM_MESSAGE_SLICE_SIZE;
 
     protected List<MockPayload> expSnapshotState = new ArrayList<>();
 
@@ -235,7 +265,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         configParams.setSnapshotBatchCount(snapshotBatchCount);
         configParams.setSnapshotDataThresholdPercentage(70);
         configParams.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
-        configParams.setSnapshotChunkSize(snapshotChunkSize);
+        configParams.setMaximumMessageSliceSize(maximumMessageSliceSize);
         return configParams;
     }
 
@@ -253,7 +283,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
     protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final Map<String, String> newPeerAddresses,
             final ConfigParams configParams) {
         return newTestRaftActor(id, TestRaftActor.newBuilder().peerAddresses(newPeerAddresses != null
-                ? newPeerAddresses : Collections.<String, String>emptyMap()).config(configParams));
+                ? newPeerAddresses : Map.of()).config(configParams));
     }
 
     protected TestActorRef<TestRaftActor> newTestRaftActor(final String id, final TestRaftActor.Builder builder) {
@@ -389,7 +419,7 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             try {
-                OnDemandRaftState raftState = (OnDemandRaftState)Await.result(ask(raftActor,
+                OnDemandRaftState raftState = (OnDemandRaftState)Await.result(Patterns.ask(raftActor,
                         GetOnDemandRaftState.INSTANCE, timeout), timeout.duration());
                 verifier.accept(raftState);
                 return;
index 8ff68310243ac8129fb719ed9150cad1c5c69123..65ac83d0d00c17d6c8a7e47136a2e95772a754bb 100644 (file)
@@ -10,14 +10,14 @@ package org.opendaylight.controller.cluster.raft;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
-import akka.japi.Procedure;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import org.junit.Assert;
+import java.util.function.Consumer;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
@@ -53,8 +53,8 @@ public class AbstractReplicatedLogImplTest {
         assertEquals("lastTerm", -1, replicatedLogImpl.lastTerm());
         assertEquals("isPresent", false, replicatedLogImpl.isPresent(0));
         assertEquals("isInSnapshot", false, replicatedLogImpl.isInSnapshot(0));
-        Assert.assertNull("get(0)", replicatedLogImpl.get(0));
-        Assert.assertNull("last", replicatedLogImpl.last());
+        assertNull("get(0)", replicatedLogImpl.get(0));
+        assertNull("last", replicatedLogImpl.last());
 
         List<ReplicatedLogEntry> list = replicatedLogImpl.getFrom(0, 1, ReplicatedLog.NO_MAX_SIZE);
         assertEquals("getFrom size", 0, list.size());
@@ -139,39 +139,46 @@ public class AbstractReplicatedLogImplTest {
     @Test
     public void testGetFromWithMax() {
         List<ReplicatedLogEntry> from = replicatedLogImpl.getFrom(0, 1, ReplicatedLog.NO_MAX_SIZE);
-        Assert.assertEquals(1, from.size());
-        Assert.assertEquals("A", from.get(0).getData().toString());
+        assertEquals(1, from.size());
+        assertEquals("A", from.get(0).getData().toString());
 
         from = replicatedLogImpl.getFrom(0, 20, ReplicatedLog.NO_MAX_SIZE);
-        Assert.assertEquals(4, from.size());
-        Assert.assertEquals("A", from.get(0).getData().toString());
-        Assert.assertEquals("D", from.get(3).getData().toString());
+        assertEquals(4, from.size());
+        assertEquals("A", from.get(0).getData().toString());
+        assertEquals("B", from.get(1).getData().toString());
+        assertEquals("C", from.get(2).getData().toString());
+        assertEquals("D", from.get(3).getData().toString());
+
+        // Pre-calculate sizing information for use with capping
+        final int sizeB = from.get(1).serializedSize();
+        final int sizeC = from.get(2).serializedSize();
+        final int sizeD = from.get(3).serializedSize();
 
         from = replicatedLogImpl.getFrom(1, 2, ReplicatedLog.NO_MAX_SIZE);
-        Assert.assertEquals(2, from.size());
-        Assert.assertEquals("B", from.get(0).getData().toString());
-        Assert.assertEquals("C", from.get(1).getData().toString());
-
-        from = replicatedLogImpl.getFrom(1, 3, 2);
-        Assert.assertEquals(2, from.size());
-        Assert.assertEquals("B", from.get(0).getData().toString());
-        Assert.assertEquals("C", from.get(1).getData().toString());
-
-        from = replicatedLogImpl.getFrom(1, 3, 3);
-        Assert.assertEquals(3, from.size());
-        Assert.assertEquals("B", from.get(0).getData().toString());
-        Assert.assertEquals("C", from.get(1).getData().toString());
-        Assert.assertEquals("D", from.get(2).getData().toString());
-
-        from = replicatedLogImpl.getFrom(1, 2, 3);
-        Assert.assertEquals(2, from.size());
-        Assert.assertEquals("B", from.get(0).getData().toString());
-        Assert.assertEquals("C", from.get(1).getData().toString());
+        assertEquals(2, from.size());
+        assertEquals("B", from.get(0).getData().toString());
+        assertEquals("C", from.get(1).getData().toString());
+
+        from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC);
+        assertEquals(2, from.size());
+        assertEquals("B", from.get(0).getData().toString());
+        assertEquals("C", from.get(1).getData().toString());
+
+        from = replicatedLogImpl.getFrom(1, 3, sizeB + sizeC + sizeD);
+        assertEquals(3, from.size());
+        assertEquals("B", from.get(0).getData().toString());
+        assertEquals("C", from.get(1).getData().toString());
+        assertEquals("D", from.get(2).getData().toString());
+
+        from = replicatedLogImpl.getFrom(1, 2, sizeB + sizeC + sizeD);
+        assertEquals(2, from.size());
+        assertEquals("B", from.get(0).getData().toString());
+        assertEquals("C", from.get(1).getData().toString());
 
         replicatedLogImpl.append(new SimpleReplicatedLogEntry(4, 2, new MockPayload("12345")));
         from = replicatedLogImpl.getFrom(4, 2, 2);
-        Assert.assertEquals(1, from.size());
-        Assert.assertEquals("12345", from.get(0).getData().toString());
+        assertEquals(1, from.size());
+        assertEquals("12345", from.get(0).getData().toString());
     }
 
     @Test
@@ -224,10 +231,10 @@ public class AbstractReplicatedLogImplTest {
         assertEquals("lastIndex", 3, replicatedLogImpl.lastIndex());
         assertEquals("lastTerm", 2, replicatedLogImpl.lastTerm());
 
-        Assert.assertNull("get(0)", replicatedLogImpl.get(0));
-        Assert.assertNull("get(1)", replicatedLogImpl.get(1));
-        Assert.assertNotNull("get(2)", replicatedLogImpl.get(2));
-        Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+        assertNull("get(0)", replicatedLogImpl.get(0));
+        assertNull("get(1)", replicatedLogImpl.get(1));
+        assertNotNull("get(2)", replicatedLogImpl.get(2));
+        assertNotNull("get(3)", replicatedLogImpl.get(3));
     }
 
     @Test
@@ -245,8 +252,8 @@ public class AbstractReplicatedLogImplTest {
         assertEquals("dataSize", 4, replicatedLogImpl.dataSize());
         assertEquals("getSnapshotIndex", -1, replicatedLogImpl.getSnapshotIndex());
         assertEquals("getSnapshotTerm", -1, replicatedLogImpl.getSnapshotTerm());
-        Assert.assertNotNull("get(0)", replicatedLogImpl.get(0));
-        Assert.assertNotNull("get(3)", replicatedLogImpl.get(3));
+        assertNotNull("get(0)", replicatedLogImpl.get(0));
+        assertNotNull("get(3)", replicatedLogImpl.get(3));
     }
 
     @Test
@@ -318,24 +325,28 @@ public class AbstractReplicatedLogImplTest {
 
     }
 
-    class MockAbstractReplicatedLogImpl extends AbstractReplicatedLogImpl {
+    static class MockAbstractReplicatedLogImpl extends AbstractReplicatedLogImpl {
         @Override
         public boolean removeFromAndPersist(final long index) {
             return true;
         }
 
         @Override
-        public boolean appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback,
-                boolean doAsync) {
+        public boolean appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
+                final Consumer<ReplicatedLogEntry> callback, final boolean doAsync) {
+            if (callback != null) {
+                callback.accept(replicatedLogEntry);
+            }
             return true;
         }
 
         @Override
-        public void captureSnapshotIfReady(ReplicatedLogEntry replicatedLogEntry) {
+        public void captureSnapshotIfReady(final ReplicatedLogEntry replicatedLogEntry) {
+            // No-op
         }
 
         @Override
-        public boolean shouldCaptureSnapshot(long logIndex) {
+        public boolean shouldCaptureSnapshot(final long logIndex) {
             return false;
         }
     }
index 275dadf0534c665423a05adcfa51477656f418c0..ef54be1759dfd23dda09d0e10af56ccf3dc64c1f 100644 (file)
@@ -11,11 +11,11 @@ import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.verify;
 
 import akka.japi.Procedure;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.slf4j.Logger;
@@ -26,19 +26,15 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ElectionTermImplTest {
     private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
 
     @Mock
     private DataPersistenceProvider mockPersistence;
 
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
+    @SuppressWarnings({ "rawtypes", "unchecked" })
     public void testUpdateAndPersist() throws Exception {
         ElectionTermImpl impl = new ElectionTermImpl(mockPersistence, "test", LOG);
 
index 8e80e30d8bee736e6aad87cd3992c2065085d932..9788b8fe8d427670c12b8a9619872a4c3b6a10ca 100644 (file)
@@ -56,7 +56,7 @@ public class FollowerLogInformationTest {
     // we cannot rely comfortably that the sleep will indeed sleep for the desired time
     // hence getting the actual elapsed time and do a match.
     // if the sleep has spilled over, then return the test gracefully
-    private static long sleepWithElaspsedTimeReturned(long millis) {
+    private static long sleepWithElaspsedTimeReturned(final long millis) {
         Stopwatch stopwatch = Stopwatch.createStarted();
         Uninterruptibles.sleepUninterruptibly(millis, TimeUnit.MILLISECONDS);
         stopwatch.stop();
@@ -70,16 +70,17 @@ public class FollowerLogInformationTest {
         FollowerLogInformation followerLogInformation =
                 new FollowerLogInformation(new PeerInfo("follower1", null, VotingState.VOTING), 10, context);
 
-        assertTrue(followerLogInformation.okToReplicate());
-        assertFalse(followerLogInformation.okToReplicate());
+        followerLogInformation.setSentCommitIndex(0);
+        assertTrue(followerLogInformation.okToReplicate(0));
+        assertFalse(followerLogInformation.okToReplicate(0));
 
         // wait for 150 milliseconds and it should work again
         Uninterruptibles.sleepUninterruptibly(150, TimeUnit.MILLISECONDS);
-        assertTrue(followerLogInformation.okToReplicate());
+        assertTrue(followerLogInformation.okToReplicate(0));
 
         //increment next index and try immediately and it should work again
         followerLogInformation.incrNextIndex();
-        assertTrue(followerLogInformation.okToReplicate());
+        assertTrue(followerLogInformation.okToReplicate(0));
     }
 
     @Test
@@ -89,13 +90,13 @@ public class FollowerLogInformationTest {
         context.setCommitIndex(0);
         FollowerLogInformation followerLogInformation = new FollowerLogInformation(peerInfo, context);
 
-        assertFalse(followerLogInformation.okToReplicate());
+        assertFalse(followerLogInformation.okToReplicate(0));
 
         followerLogInformation.markFollowerActive();
         assertFalse(followerLogInformation.isFollowerActive());
 
         peerInfo.setVotingState(VotingState.VOTING);
-        assertTrue(followerLogInformation.okToReplicate());
+        assertTrue(followerLogInformation.okToReplicate(0));
 
         followerLogInformation.markFollowerActive();
         assertTrue(followerLogInformation.isFollowerActive());
@@ -108,7 +109,7 @@ public class FollowerLogInformationTest {
         context.setCommitIndex(0);
         FollowerLogInformation followerLogInformation = new FollowerLogInformation(peerInfo, context);
 
-        assertTrue(followerLogInformation.okToReplicate());
+        assertTrue(followerLogInformation.okToReplicate(0));
 
         followerLogInformation.markFollowerActive();
         assertTrue(followerLogInformation.isFollowerActive());
index e99215ddbaa8cee74d457b2ef1f55e098653a8f4..a565932a02a5da9fae22b55a27d2987350ca4ec1 100644 (file)
@@ -18,11 +18,10 @@ import akka.actor.Status;
 import akka.pattern.Patterns;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
@@ -143,9 +142,9 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat
     private void createRaftActors() {
         testLog.info("createRaftActors starting");
 
-        final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.emptyList(), -1, -1, -1, -1,
+        final Snapshot snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(), -1, -1, -1, -1,
                 1, null, new org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload(
-                        Arrays.asList(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
+                        List.of(new ServerInfo(leaderId, true), new ServerInfo(follower1Id, true),
                                 new ServerInfo(follower2Id, true), new ServerInfo(follower3Id, false))));
 
         InMemorySnapshotStore.addSnapshot(leaderId, snapshot);
@@ -156,28 +155,28 @@ public class LeadershipTransferIntegrationTest extends AbstractRaftActorIntegrat
         follower1NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower1Id + "-notifier"));
         follower1Actor = newTestRaftActor(follower1Id, TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
+                Map.of(leaderId, testActorPath(leaderId), follower2Id, testActorPath(follower2Id),
                         follower3Id, testActorPath(follower3Id)))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower1NotifierActor));
 
         follower2NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower2Id + "-notifier"));
         follower2Actor = newTestRaftActor(follower2Id,TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
                         follower3Id, testActorPath(follower3Id)))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower2NotifierActor));
 
         follower3NotifierActor = factory.createActor(MessageCollectorActor.props(),
                 factory.generateActorId(follower3Id + "-notifier"));
         follower3Actor = newTestRaftActor(follower3Id,TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString(),
                         follower2Id, follower2Actor.path().toString()))
                 .config(newFollowerConfigParams()).roleChangeNotifier(follower3NotifierActor));
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString())
-                .put(follower3Id, follower3Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString(),
+                follower3Id, follower3Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderConfigParams.setElectionTimeoutFactor(3);
index 5a1ac54c7cab6f8a477bce09d670e80463269c84..276ffb27f4f8a5faf0e206c2cdedbc807fa12fb2 100644 (file)
@@ -12,12 +12,12 @@ import static org.junit.Assert.assertEquals;
 import akka.actor.ActorRef;
 import akka.dispatch.Dispatchers;
 import akka.testkit.TestActorRef;
-import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.OutputStream;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import org.junit.After;
@@ -72,16 +72,16 @@ public class MigratedMessagesTest extends AbstractActorTest {
 
         RaftActorSnapshotCohort snapshotCohort = new RaftActorSnapshotCohort() {
             @Override
-            public void createSnapshot(ActorRef actorRef, java.util.Optional<OutputStream> installSnapshotStream) {
+            public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
                 actorRef.tell(new CaptureSnapshotReply(ByteState.empty(), installSnapshotStream), actorRef);
             }
 
             @Override
-            public void applySnapshot(Snapshot.State snapshotState) {
+            public void applySnapshot(final Snapshot.State snapshotState) {
             }
 
             @Override
-            public State deserializeSnapshot(ByteSource snapshotBytes) {
+            public State deserializeSnapshot(final ByteSource snapshotBytes) {
                 throw new UnsupportedOperationException();
             }
         };
@@ -102,8 +102,8 @@ public class MigratedMessagesTest extends AbstractActorTest {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private TestActorRef<MockRaftActor> doTestSnapshotAfterStartupWithMigratedMessage(String id, boolean persistent,
-            Consumer<Snapshot> snapshotVerifier, final State snapshotState) {
+    private TestActorRef<MockRaftActor> doTestSnapshotAfterStartupWithMigratedMessage(final String id,
+            final boolean persistent, final Consumer<Snapshot> snapshotVerifier, final State snapshotState) {
         InMemorySnapshotStore.addSnapshotSavedLatch(id);
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
@@ -111,16 +111,16 @@ public class MigratedMessagesTest extends AbstractActorTest {
 
         RaftActorSnapshotCohort snapshotCohort = new RaftActorSnapshotCohort() {
             @Override
-            public void createSnapshot(ActorRef actorRef, java.util.Optional<OutputStream> installSnapshotStream) {
+            public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
                 actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
             }
 
             @Override
-            public void applySnapshot(State newState) {
+            public void applySnapshot(final State newState) {
             }
 
             @Override
-            public State deserializeSnapshot(ByteSource snapshotBytes) {
+            public State deserializeSnapshot(final ByteSource snapshotBytes) {
                 throw new UnsupportedOperationException();
             }
         };
index 9c58e6b7ca10400614a7eec6a3a2585e97522153..83aebc37c316729261a321d3329738e22547c9f9 100644 (file)
@@ -13,8 +13,6 @@ import static org.mockito.Mockito.mock;
 
 import akka.actor.ActorRef;
 import akka.actor.Props;
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.IOException;
@@ -23,13 +21,16 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 import org.apache.commons.lang3.SerializationUtils;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.concepts.Identifier;
 
 public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort, RaftActorSnapshotCohort {
@@ -50,16 +51,16 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
 
     protected MockRaftActor(final AbstractBuilder<?, ?> builder) {
         super(builder.id, builder.peerAddresses != null ? builder.peerAddresses :
-            Collections.<String, String>emptyMap(), Optional.fromNullable(builder.config), PAYLOAD_VERSION);
-        state = new ArrayList<>();
-        this.actorDelegate = mock(RaftActor.class);
-        this.recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
+            Collections.emptyMap(), Optional.ofNullable(builder.config), PAYLOAD_VERSION);
+        state = Collections.synchronizedList(new ArrayList<>());
+        actorDelegate = mock(RaftActor.class);
+        recoveryCohortDelegate = mock(RaftActorRecoveryCohort.class);
 
-        this.snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
+        snapshotCohortDelegate = builder.snapshotCohort != null ? builder.snapshotCohort :
             mock(RaftActorSnapshotCohort.class);
 
         if (builder.dataPersistenceProvider == null) {
-            setPersistence(builder.persistent.isPresent() ? builder.persistent.get() : true);
+            setPersistence(builder.persistent.isPresent() ? builder.persistent.orElseThrow() : true);
         } else {
             setPersistence(builder.dataPersistenceProvider);
         }
@@ -174,14 +175,14 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     private void applySnapshotState(final Snapshot.State newState) {
-        if (newState instanceof MockSnapshotState) {
+        if (newState instanceof MockSnapshotState mockState) {
             state.clear();
-            state.addAll(((MockSnapshotState)newState).getState());
+            state.addAll(mockState.getState());
         }
     }
 
     @Override
-    public void createSnapshot(final ActorRef actorRef, final java.util.Optional<OutputStream> installSnapshotStream) {
+    public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
         LOG.info("{}: createSnapshot called", persistenceId());
         snapshotCohortDelegate.createSnapshot(actorRef, installSnapshotStream);
     }
@@ -209,11 +210,11 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
 
     @Override
     protected Optional<ActorRef> getRoleChangeNotifier() {
-        return Optional.fromNullable(roleChangeNotifier);
+        return Optional.ofNullable(roleChangeNotifier);
     }
 
     @Override public String persistenceId() {
-        return this.getId();
+        return getId();
     }
 
     protected void newBehavior(final RaftActorBehavior newBehavior) {
@@ -243,15 +244,15 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
     }
 
     public static List<Object> fromState(final Snapshot.State from) {
-        if (from instanceof MockSnapshotState) {
-            return ((MockSnapshotState)from).getState();
+        if (from instanceof MockSnapshotState mockState) {
+            return mockState.getState();
         }
 
         throw new IllegalStateException("Unexpected snapshot State: " + from);
     }
 
     public ReplicatedLog getReplicatedLog() {
-        return this.getRaftActorContext().getReplicatedLog();
+        return getRaftActorContext().getReplicatedLog();
     }
 
     @Override
@@ -281,7 +282,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
         private ActorRef roleChangeNotifier;
         private RaftActorSnapshotMessageSupport snapshotMessageSupport;
         private Snapshot restoreFromSnapshot;
-        private Optional<Boolean> persistent = Optional.absent();
+        private Optional<Boolean> persistent = Optional.empty();
         private final Class<A> actorClass;
         private Function<Runnable, Void> pauseLeaderFunction;
         private RaftActorSnapshotCohort snapshotCohort;
@@ -296,52 +297,52 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
         }
 
         public T id(final String newId) {
-            this.id = newId;
+            id = newId;
             return self();
         }
 
         public T peerAddresses(final Map<String, String> newPeerAddresses) {
-            this.peerAddresses = newPeerAddresses;
+            peerAddresses = newPeerAddresses;
             return self();
         }
 
         public T config(final ConfigParams newConfig) {
-            this.config = newConfig;
+            config = newConfig;
             return self();
         }
 
         public T dataPersistenceProvider(final DataPersistenceProvider newDataPersistenceProvider) {
-            this.dataPersistenceProvider = newDataPersistenceProvider;
+            dataPersistenceProvider = newDataPersistenceProvider;
             return self();
         }
 
         public T roleChangeNotifier(final ActorRef newRoleChangeNotifier) {
-            this.roleChangeNotifier = newRoleChangeNotifier;
+            roleChangeNotifier = newRoleChangeNotifier;
             return self();
         }
 
         public T snapshotMessageSupport(final RaftActorSnapshotMessageSupport newSnapshotMessageSupport) {
-            this.snapshotMessageSupport = newSnapshotMessageSupport;
+            snapshotMessageSupport = newSnapshotMessageSupport;
             return self();
         }
 
         public T restoreFromSnapshot(final Snapshot newRestoreFromSnapshot) {
-            this.restoreFromSnapshot = newRestoreFromSnapshot;
+            restoreFromSnapshot = newRestoreFromSnapshot;
             return self();
         }
 
         public T persistent(final Optional<Boolean> newPersistent) {
-            this.persistent = newPersistent;
+            persistent = newPersistent;
             return self();
         }
 
         public T pauseLeaderFunction(final Function<Runnable, Void> newPauseLeaderFunction) {
-            this.pauseLeaderFunction = newPauseLeaderFunction;
+            pauseLeaderFunction = newPauseLeaderFunction;
             return self();
         }
 
         public T snapshotCohort(final RaftActorSnapshotCohort newSnapshotCohort) {
-            this.snapshotCohort = newSnapshotCohort;
+            snapshotCohort = newSnapshotCohort;
             return self();
         }
 
@@ -371,10 +372,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
 
         @Override
         public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + (state == null ? 0 : state.hashCode());
-            return result;
+            return Objects.hash(state);
         }
 
         @Override
@@ -389,11 +387,7 @@ public class MockRaftActor extends RaftActor implements RaftActorRecoveryCohort,
                 return false;
             }
             MockSnapshotState other = (MockSnapshotState) obj;
-            if (state == null) {
-                if (other.state != null) {
-                    return false;
-                }
-            } else if (!state.equals(other.state)) {
+            if (!Objects.equals(state, other.state)) {
                 return false;
             }
             return true;
index 2e05a7e5708b49381082353518b81cd299521f4f..6d4ec22e3d6be213f9a5f09d9b62042b9649d527 100644 (file)
@@ -8,27 +8,30 @@
 
 package org.opendaylight.controller.cluster.raft;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
-import akka.japi.Procedure;
 import com.google.common.io.ByteSource;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.Serializable;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.NonPersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ByteState;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,8 +59,8 @@ public class MockRaftActorContext extends RaftActorContextImpl {
 
             @Override
             public void update(final long newTerm, final String newVotedFor) {
-                this.currentTerm = newTerm;
-                this.votedFor = newVotedFor;
+                currentTerm = newTerm;
+                votedFor = newVotedFor;
 
                 // TODO : Write to some persistent state
             }
@@ -74,13 +77,15 @@ public class MockRaftActorContext extends RaftActorContextImpl {
 
     public MockRaftActorContext() {
         super(null, null, "test", newElectionTerm(), -1, -1, new HashMap<>(),
-                new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG);
+                new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG,
+                MoreExecutors.directExecutor());
         setReplicatedLog(new MockReplicatedLogBuilder().build());
     }
 
     public MockRaftActorContext(final String id, final ActorSystem system, final ActorRef actor) {
         super(actor, null, id, newElectionTerm(), -1, -1, new HashMap<>(),
-            new DefaultConfigParamsImpl(), createProvider(), applyState -> actor.tell(applyState, actor), LOG);
+            new DefaultConfigParamsImpl(), createProvider(), applyState -> actor.tell(applyState, actor), LOG,
+            MoreExecutors.directExecutor());
 
         this.system = system;
 
@@ -107,7 +112,7 @@ public class MockRaftActorContext extends RaftActorContextImpl {
     }
 
     @Override public ActorSystem getActorSystem() {
-        return this.system;
+        return system;
     }
 
     @Override public ActorSelection getPeerActorSelection(final String peerId) {
@@ -187,38 +192,33 @@ public class MockRaftActorContext extends RaftActorContextImpl {
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
         public boolean appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
-                final Procedure<ReplicatedLogEntry> callback, final boolean doAsync) {
+                final Consumer<ReplicatedLogEntry> callback, final boolean doAsync) {
             append(replicatedLogEntry);
 
             if (callback != null) {
-                try {
-                    callback.apply(replicatedLogEntry);
-                } catch (RuntimeException e) {
-                    throw e;
-                } catch (Exception e) {
-                    throw new RuntimeException(e);
-                }
+                callback.accept(replicatedLogEntry);
             }
 
             return true;
         }
     }
 
-    public static class MockPayload extends Payload implements Serializable {
+    public static final class MockPayload extends Payload {
         private static final long serialVersionUID = 3121380393130864247L;
-        private String value = "";
-        private int size;
+
+        private final String data;
+        private final int size;
 
         public MockPayload() {
+            this("");
         }
 
         public MockPayload(final String data) {
-            this.value = data;
-            size = value.length();
+            this(data, data.length());
         }
 
         public MockPayload(final String data, final int size) {
-            this(data);
+            this.data = requireNonNull(data);
             this.size = size;
         }
 
@@ -227,39 +227,46 @@ public class MockRaftActorContext extends RaftActorContextImpl {
             return size;
         }
 
+        @Override
+        public int serializedSize() {
+            return size;
+        }
+
         @Override
         public String toString() {
-            return value;
+            return data;
         }
 
         @Override
         public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + (value == null ? 0 : value.hashCode());
-            return result;
+            return data.hashCode();
         }
 
         @Override
         public boolean equals(final Object obj) {
-            if (this == obj) {
-                return true;
-            }
-            if (obj == null) {
-                return false;
-            }
-            if (getClass() != obj.getClass()) {
-                return false;
-            }
-            MockPayload other = (MockPayload) obj;
-            if (value == null) {
-                if (other.value != null) {
-                    return false;
-                }
-            } else if (!value.equals(other.value)) {
-                return false;
-            }
-            return true;
+            return this == obj || obj instanceof MockPayload other && Objects.equals(data, other.data)
+                && size == other.size;
+        }
+
+        @Override
+        protected Object writeReplace() {
+            return new MockPayloadProxy(data, size);
+        }
+    }
+
+    private static final class MockPayloadProxy implements Serializable {
+        private static final long serialVersionUID = 1L;
+
+        private final String value;
+        private final int size;
+
+        MockPayloadProxy(String value, int size) {
+            this.value = value;
+            this.size = size;
+        }
+
+        Object readResolve() {
+            return new MockPayload(value, size);
         }
     }
 
@@ -268,19 +275,19 @@ public class MockRaftActorContext extends RaftActorContextImpl {
 
         public  MockReplicatedLogBuilder createEntries(final int start, final int end, final int term) {
             for (int i = start; i < end; i++) {
-                this.mockLog.append(new SimpleReplicatedLogEntry(i, term,
+                mockLog.append(new SimpleReplicatedLogEntry(i, term,
                         new MockRaftActorContext.MockPayload(Integer.toString(i))));
             }
             return this;
         }
 
         public  MockReplicatedLogBuilder addEntry(final int index, final int term, final MockPayload payload) {
-            this.mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
+            mockLog.append(new SimpleReplicatedLogEntry(index, term, payload));
             return this;
         }
 
         public ReplicatedLog build() {
-            return this.mockLog;
+            return mockLog;
         }
     }
 
index 660d7914382a4e774b43ff98621ab971953342f6..f875f891b148a64a1e322425d8f682acfc367f3b 100644 (file)
@@ -10,10 +10,10 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 
 import akka.actor.ActorRef;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
@@ -291,7 +291,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
         //
         // We also add another voting follower actor into the mix even though it shoildn't affect the
         // outcome.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false),
                 new ServerInfo(follower2Id, true), new ServerInfo("downPeer", false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, currentTerm,
@@ -305,13 +305,13 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
         DefaultConfigParamsImpl follower2ConfigParams = newFollowerConfigParams();
         follower2ConfigParams.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
         follower2Actor = newTestRaftActor(follower2Id, TestRaftActor.newBuilder().peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
+                Map.of(leaderId, testActorPath(leaderId), follower1Id, follower1Actor.path().toString()))
                     .config(follower2ConfigParams).persistent(Optional.of(false)));
         TestRaftActor follower2Instance = follower2Actor.underlyingActor();
         follower2Instance.waitForRecoveryComplete();
         follower2CollectorActor = follower2Instance.collectorActor();
 
-        peerAddresses = ImmutableMap.of(follower1Id, follower1Actor.path().toString(),
+        peerAddresses = Map.of(follower1Id, follower1Actor.path().toString(),
                 follower2Id, follower2Actor.path().toString());
 
         createNewLeaderActor();
@@ -399,7 +399,7 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         // Set up a persisted ServerConfigurationPayload with the leader voting and the follower non-voting.
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true), new ServerInfo(follower1Id, false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, persistedTerm,
                 persistedServerConfig);
@@ -411,11 +411,10 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         follower1Actor = newTestRaftActor(follower1Id, follower1Builder.peerAddresses(
-                ImmutableMap.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
+                Map.of(leaderId, testActorPath(leaderId))).config(followerConfigParams)
                     .persistent(Optional.of(false)));
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString()).build();
+        peerAddresses = Map.of(follower1Id, follower1Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, TestRaftActor.newBuilder().peerAddresses(peerAddresses)
@@ -436,16 +435,16 @@ public class NonVotingFollowerIntegrationTest extends AbstractRaftActorIntegrati
 
         currentTerm = persistedTerm + 1;
         assertEquals("Leader term", currentTerm, leaderContext.getTermInformation().getCurrentTerm());
-        assertEquals("Leader server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
-                Sets.newHashSet(leaderContext.getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Leader server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+                Set.copyOf(leaderContext.getPeerServerInfo(true).getServerConfig()));
         assertEquals("Leader isVotingMember", true, leaderContext.isVotingMember());
 
         // Verify follower's context after startup
 
         MessageCollectorActor.expectFirstMatching(follower1CollectorActor, AppendEntries.class);
         assertEquals("Follower term", currentTerm, follower1Context.getTermInformation().getCurrentTerm());
-        assertEquals("Follower server config", Sets.newHashSet(persistedServerConfig.getServerConfig()),
-                Sets.newHashSet(follower1Context.getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Follower server config", Set.copyOf(persistedServerConfig.getServerConfig()),
+                Set.copyOf(follower1Context.getPeerServerInfo(true).getServerConfig()));
         assertEquals("FollowerisVotingMember", false, follower1Context.isVotingMember());
     }
 }
index 54661b67fc7a12e24fbf6bf2144bcb39f6d066c8..fabfc6c280468792da400318a8fe42b8c72c00d5 100644 (file)
@@ -19,10 +19,9 @@ import static org.mockito.Mockito.verify;
 
 import akka.actor.Props;
 import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import java.util.Arrays;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import org.junit.After;
 import org.junit.Test;
@@ -60,7 +59,7 @@ public class RaftActorContextImplTest extends AbstractActorTest {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
                 "test", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
-                peerMap, configParams, createProvider(), applyState -> { }, LOG);
+                peerMap, configParams, createProvider(), applyState -> { }, LOG,  MoreExecutors.directExecutor());
 
         assertEquals("getPeerAddress", "peerAddress1", context.getPeerAddress("peer1"));
         assertEquals("getPeerAddress", null, context.getPeerAddress("peer2"));
@@ -84,8 +83,8 @@ public class RaftActorContextImplTest extends AbstractActorTest {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
                 "test", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
-                Maps.newHashMap(ImmutableMap.<String, String>of("peer1", "peerAddress1")), configParams,
-                createProvider(), applyState -> { }, LOG);
+                Map.of("peer1", "peerAddress1"), configParams,
+                createProvider(), applyState -> { }, LOG,  MoreExecutors.directExecutor());
 
         context.setPeerAddress("peer1", "peerAddress1_1");
         assertEquals("getPeerAddress", "peerAddress1_1", context.getPeerAddress("peer1"));
@@ -98,23 +97,24 @@ public class RaftActorContextImplTest extends AbstractActorTest {
     public void testUpdatePeerIds() {
         RaftActorContextImpl context = new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
                 "self", new ElectionTermImpl(createProvider(), "test", LOG), -1, -1,
-                Maps.newHashMap(ImmutableMap.<String, String>of("peer1", "peerAddress1")),
-                new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG);
+                Map.of("peer1", "peerAddress1"),
+                new DefaultConfigParamsImpl(), createProvider(), applyState -> { }, LOG,
+                MoreExecutors.directExecutor());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", false),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", false),
                 new ServerInfo("peer2", true), new ServerInfo("peer3", false))));
         verifyPeerInfo(context, "peer1", null);
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", false);
         assertEquals("isVotingMember", false, context.isVotingMember());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("self", true),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("self", true),
                 new ServerInfo("peer2", true), new ServerInfo("peer3", true))));
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", true);
         assertEquals("isVotingMember", true, context.isVotingMember());
 
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo("peer2", true),
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo("peer2", true),
                 new ServerInfo("peer3", true))));
         verifyPeerInfo(context, "peer2", true);
         verifyPeerInfo(context, "peer3", true);
@@ -129,7 +129,7 @@ public class RaftActorContextImplTest extends AbstractActorTest {
         PeerInfo peerInfo = context.getPeerInfo(peerId);
         if (voting != null) {
             assertNotNull("Expected peer " + peerId, peerInfo);
-            assertEquals("getVotingState for " + peerId, voting.booleanValue()
+            assertEquals("getVotingState for " + peerId, voting
                     ? VotingState.VOTING : VotingState.NON_VOTING, peerInfo.getVotingState());
         } else {
             assertNull("Unexpected peer " + peerId, peerInfo);
index 708f371bcf1439ef250bc0febaa68f1e80e16edf..22369d78870b64b2d1a3ebce71bdac2b795a5aaf 100644 (file)
@@ -15,19 +15,21 @@ import static org.mockito.Mockito.verify;
 import akka.japi.Procedure;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.PersistentPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
+import org.opendaylight.controller.cluster.raft.messages.PersistentPayload;
 
 /**
  * Unit tests for RaftActorDelegatingPersistentDataProvider.
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class RaftActorDelegatingPersistentDataProviderTest {
     private static final Payload PERSISTENT_PAYLOAD = new TestPersistentPayload();
 
@@ -55,7 +57,6 @@ public class RaftActorDelegatingPersistentDataProviderTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
         doReturn(PERSISTENT_PAYLOAD).when(mockPersistentLogEntry).getData();
         doReturn(NON_PERSISTENT_PAYLOAD).when(mockNonPersistentLogEntry).getData();
         provider = new RaftActorDelegatingPersistentDataProvider(mockDelegateProvider, mockPersistentProvider);
@@ -97,12 +98,28 @@ public class RaftActorDelegatingPersistentDataProviderTest {
     }
 
     static class TestNonPersistentPayload extends Payload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
         @Override
         public int size() {
             return 0;
         }
+
+        @Override
+        public int serializedSize() {
+            return 0;
+        }
+
+        @Override
+        protected Object writeReplace() {
+            // Not needed
+            throw new UnsupportedOperationException();
+        }
     }
 
     static class TestPersistentPayload extends TestNonPersistentPayload implements PersistentPayload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
     }
 }
index 44b0d2be6b5ca86a7135a297f4ef56428d7c081b..c48f87a7ccc1e565c97274920e7c247d78a29d59 100644 (file)
@@ -13,7 +13,7 @@ import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
 
 import akka.dispatch.Dispatchers;
-import com.google.common.base.Function;
+import java.util.function.Function;
 import org.junit.After;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.RaftActorLeadershipTransferCohort.OnComplete;
@@ -38,7 +38,7 @@ public class RaftActorLeadershipTransferCohortTest extends AbstractActorTest {
         factory.close();
     }
 
-    private void setup(String testName) {
+    private void setup(final String testName) {
         String persistenceId = factory.generateActorId(testName + "-leader-");
         config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
         mockRaftActor = factory.<MockRaftActor>createTestActor(MockRaftActor.builder().id(persistenceId).config(config)
@@ -95,7 +95,7 @@ public class RaftActorLeadershipTransferCohortTest extends AbstractActorTest {
 
         Leader leader = new Leader(mockRaftActor.getRaftActorContext()) {
             @Override
-            public void transferLeadership(RaftActorLeadershipTransferCohort leadershipTransferCohort) {
+            public void transferLeadership(final RaftActorLeadershipTransferCohort leadershipTransferCohort) {
                 leadershipTransferCohort.transferComplete();
             }
         };
index 494ca11d6837616169bb0f778b6c7a2926e6cffa..cceea83740116a00968cf6d864213edf0e945acc 100644 (file)
@@ -12,29 +12,48 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
-import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
 
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
 import akka.persistence.RecoveryCompleted;
 import akka.persistence.SnapshotMetadata;
 import akka.persistence.SnapshotOffer;
-import com.google.common.collect.Sets;
-import java.util.Arrays;
-import java.util.Collections;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentMatchers;
 import org.mockito.InOrder;
 import org.mockito.Mock;
 import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.raft.MockRaftActor.MockSnapshotState;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.DeleteEntries;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
@@ -42,7 +61,7 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,37 +70,37 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class RaftActorRecoverySupportTest {
-
     private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
 
     @Mock
     private DataPersistenceProvider mockPersistence;
 
-
     @Mock
     private RaftActorRecoveryCohort mockCohort;
 
-    @Mock
-    private RaftActorSnapshotCohort mockSnapshotCohort;
-
     @Mock
     PersistentDataProvider mockPersistentProvider;
 
+    ActorRef mockActorRef;
+
+    ActorSystem mockActorSystem;
+
     private RaftActorRecoverySupport support;
 
     private RaftActorContext context;
     private final DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
     private final String localId = "leader";
 
-
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
-        context = new RaftActorContextImpl(null, null, localId, new ElectionTermImpl(mockPersistentProvider, "test",
-                LOG), -1, -1, Collections.<String,String>emptyMap(), configParams,
-                mockPersistence, applyState -> { }, LOG);
+        mockActorSystem = ActorSystem.create();
+        mockActorRef = mockActorSystem.actorOf(Props.create(DoNothingActor.class));
+        context = new RaftActorContextImpl(mockActorRef, null, localId,
+                new ElectionTermImpl(mockPersistentProvider, "test", LOG), -1, -1,
+                Map.of(), configParams, mockPersistence, applyState -> {
+        }, LOG, MoreExecutors.directExecutor());
 
         support = new RaftActorRecoverySupport(context, mockCohort);
 
@@ -90,6 +109,11 @@ public class RaftActorRecoverySupportTest {
         context.setReplicatedLog(ReplicatedLogImpl.newInstance(context));
     }
 
+    @After
+    public void tearDown() {
+        TestKit.shutdownActorSystem(mockActorSystem);
+    }
+
     private void sendMessageToSupport(final Object message) {
         sendMessageToSupport(message, false);
     }
@@ -158,6 +182,40 @@ public class RaftActorRecoverySupportTest {
         inOrder.verifyNoMoreInteractions();
     }
 
+    @Test
+    public void testIncrementalRecovery() {
+        int recoverySnapshotInterval = 3;
+        int numberOfEntries = 5;
+        configParams.setRecoverySnapshotIntervalSeconds(recoverySnapshotInterval);
+        Consumer<Optional<OutputStream>> mockSnapshotConsumer = mock(Consumer.class);
+        context.getSnapshotManager().setCreateSnapshotConsumer(mockSnapshotConsumer);
+
+        ScheduledExecutorService applyEntriesExecutor = Executors.newSingleThreadScheduledExecutor();
+        ReplicatedLog replicatedLog = context.getReplicatedLog();
+
+        for (int i = 0; i <= numberOfEntries; i++) {
+            replicatedLog.append(new SimpleReplicatedLogEntry(i, 1,
+                new MockRaftActorContext.MockPayload(String.valueOf(i))));
+        }
+
+        AtomicInteger entryCount = new AtomicInteger();
+        ScheduledFuture<?> applyEntriesFuture = applyEntriesExecutor.scheduleAtFixedRate(() -> {
+            int run = entryCount.getAndIncrement();
+            LOG.info("Sending entry number {}", run);
+            sendMessageToSupport(new ApplyJournalEntries(run));
+        }, 0, 1, TimeUnit.SECONDS);
+
+        ScheduledFuture<Boolean> canceller = applyEntriesExecutor.schedule(() -> applyEntriesFuture.cancel(false),
+            numberOfEntries, TimeUnit.SECONDS);
+        try {
+            canceller.get();
+            verify(mockSnapshotConsumer, times(1)).accept(any());
+            applyEntriesExecutor.shutdown();
+        } catch (InterruptedException | ExecutionException e) {
+            Assert.fail();
+        }
+    }
+
     @Test
     public void testOnSnapshotOffer() {
 
@@ -177,13 +235,13 @@ public class RaftActorRecoverySupportTest {
         long electionTerm = 2;
         String electionVotedFor = "member-2";
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(new MockPayload("1")));
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(new MockPayload("1")));
         Snapshot snapshot = Snapshot.create(snapshotState,
-                Arrays.asList(unAppliedEntry1, unAppliedEntry2), lastIndexDuringSnapshotCapture, 1,
+                List.of(unAppliedEntry1, unAppliedEntry2), lastIndexDuringSnapshotCapture, 1,
                 lastAppliedDuringSnapshotCapture, 1, electionTerm, electionVotedFor, null);
 
         SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
-        SnapshotOffer snapshotOffer = new SnapshotOffer(metadata , snapshot);
+        SnapshotOffer snapshotOffer = new SnapshotOffer(metadata, snapshot);
 
         sendMessageToSupport(snapshotOffer);
 
@@ -258,12 +316,11 @@ public class RaftActorRecoverySupportTest {
 
     @Test
     public void testDataRecoveredWithPersistenceDisabled() {
-        doNothing().when(mockCohort).applyRecoverySnapshot(any());
         doReturn(false).when(mockPersistence).isRecoveryApplicable();
         doReturn(10L).when(mockPersistentProvider).getLastSequenceNumber();
 
-        Snapshot snapshot = Snapshot.create(new MockSnapshotState(Arrays.asList(new MockPayload("1"))),
-                Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1, -1, null, null);
+        Snapshot snapshot = Snapshot.create(new MockSnapshotState(List.of(new MockPayload("1"))),
+                List.of(), 3, 1, 3, 1, -1, null, null);
         SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
 
         sendMessageToSupport(snapshotOffer);
@@ -297,14 +354,12 @@ public class RaftActorRecoverySupportTest {
     }
 
     static UpdateElectionTerm updateElectionTerm(final long term, final String votedFor) {
-        return ArgumentMatchers.argThat(
-            other -> term == other.getCurrentTerm() && votedFor.equals(other.getVotedFor()));
+        return ArgumentMatchers.argThat(other ->
+                term == other.getCurrentTerm() && votedFor.equals(other.getVotedFor()));
     }
 
     @Test
     public void testNoDataRecoveredWithPersistenceDisabled() {
-        doReturn(false).when(mockPersistence).isRecoveryApplicable();
-
         sendMessageToSupport(new UpdateElectionTerm(5, "member2"));
 
         assertEquals("Current term", 5, context.getTermInformation().getCurrentTerm());
@@ -326,7 +381,7 @@ public class RaftActorRecoverySupportTest {
         context.addToPeers(follower2, null, VotingState.VOTING);
 
         //add new Server
-        ServerConfigurationPayload obj = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload obj = new ServerConfigurationPayload(List.of(
                 new ServerInfo(localId, true),
                 new ServerInfo(follower1, true),
                 new ServerInfo(follower2, false),
@@ -336,8 +391,7 @@ public class RaftActorRecoverySupportTest {
 
         //verify new peers
         assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
-        assertEquals("New peer Ids", Sets.newHashSet(follower1, follower2, follower3),
-                Sets.newHashSet(context.getPeerIds()));
+        assertEquals("New peer Ids", Set.of(follower1, follower2, follower3), Set.copyOf(context.getPeerIds()));
         assertEquals("follower1 isVoting", true, context.getPeerInfo(follower1).isVoting());
         assertEquals("follower2 isVoting", false, context.getPeerInfo(follower2).isVoting());
         assertEquals("follower3 isVoting", true, context.getPeerInfo(follower3).isVoting());
@@ -348,7 +402,7 @@ public class RaftActorRecoverySupportTest {
         verify(mockCohort, never()).appendRecoveredLogEntry(any(Payload.class));
 
         //remove existing follower1
-        obj = new ServerConfigurationPayload(Arrays.asList(
+        obj = new ServerConfigurationPayload(List.of(
                 new ServerInfo(localId, true),
                 new ServerInfo("follower2", true),
                 new ServerInfo("follower3", true)));
@@ -357,7 +411,7 @@ public class RaftActorRecoverySupportTest {
 
         //verify new peers
         assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
-        assertEquals("New peer Ids", Sets.newHashSet(follower2, follower3), Sets.newHashSet(context.getPeerIds()));
+        assertEquals("New peer Ids", Set.of(follower2, follower3), Set.copyOf(context.getPeerIds()));
     }
 
     @Test
@@ -365,30 +419,30 @@ public class RaftActorRecoverySupportTest {
         doReturn(false).when(mockPersistence).isRecoveryApplicable();
 
         String follower = "follower";
-        ServerConfigurationPayload obj = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload obj = new ServerConfigurationPayload(List.of(
                 new ServerInfo(localId, true), new ServerInfo(follower, true)));
 
         sendMessageToSupport(new SimpleReplicatedLogEntry(0, 1, obj));
 
         //verify new peers
-        assertEquals("New peer Ids", Sets.newHashSet(follower), Sets.newHashSet(context.getPeerIds()));
+        assertEquals("New peer Ids", Set.of(follower), Set.copyOf(context.getPeerIds()));
     }
 
     @Test
     public void testOnSnapshotOfferWithServerConfiguration() {
         long electionTerm = 2;
         String electionVotedFor = "member-2";
-        ServerConfigurationPayload serverPayload = new ServerConfigurationPayload(Arrays.asList(
-                                                        new ServerInfo(localId, true),
-                                                        new ServerInfo("follower1", true),
-                                                        new ServerInfo("follower2", true)));
+        ServerConfigurationPayload serverPayload = new ServerConfigurationPayload(List.of(
+                new ServerInfo(localId, true),
+                new ServerInfo("follower1", true),
+                new ServerInfo("follower2", true)));
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(new MockPayload("1")));
-        Snapshot snapshot = Snapshot.create(snapshotState, Collections.<ReplicatedLogEntry>emptyList(),
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(new MockPayload("1")));
+        Snapshot snapshot = Snapshot.create(snapshotState, List.of(),
                 -1, -1, -1, -1, electionTerm, electionVotedFor, serverPayload);
 
         SnapshotMetadata metadata = new SnapshotMetadata("test", 6, 12345);
-        SnapshotOffer snapshotOffer = new SnapshotOffer(metadata , snapshot);
+        SnapshotOffer snapshotOffer = new SnapshotOffer(metadata, snapshot);
 
         sendMessageToSupport(snapshotOffer);
 
@@ -396,7 +450,6 @@ public class RaftActorRecoverySupportTest {
         assertEquals("Election term", electionTerm, context.getTermInformation().getCurrentTerm());
         assertEquals("Election votedFor", electionVotedFor, context.getTermInformation().getVotedFor());
         assertTrue("Dynamic server configuration", context.isDynamicServerConfigurationInUse());
-        assertEquals("Peer List", Sets.newHashSet("follower1", "follower2"),
-            Sets.newHashSet(context.getPeerIds()));
+        assertEquals("Peer List", Set.of("follower1", "follower2"), Set.copyOf(context.getPeerIds()));
     }
-}
+}
\ No newline at end of file
index 8207fd76a89aea7a6dda483ddaef33b26891f3e2..884b16c11e8aceed093dfa2f063e7c5499c9a854 100644 (file)
@@ -21,19 +21,15 @@ import akka.actor.Props;
 import akka.dispatch.Dispatchers;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.base.Optional;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
 import com.google.common.io.ByteSource;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.io.OutputStream;
 import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
@@ -155,7 +151,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         followerActorContext.setCurrentBehavior(follower);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         followerActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -176,7 +172,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -202,10 +198,9 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in both followers
 
-        assertEquals("Follower peers", Sets.newHashSet(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
+        assertEquals("Follower peers", Set.of(LEADER_ID, NEW_SERVER_ID), followerActorContext.getPeerIds());
 
-        assertEquals("New follower peers", Sets.newHashSet(LEADER_ID, FOLLOWER_ID),
-                newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID, FOLLOWER_ID), newFollowerActorContext.getPeerIds());
 
         assertEquals("Follower commit index", 3, followerActorContext.getCommitIndex());
         assertEquals("Follower last applied index", 3, followerActorContext.getLastApplied());
@@ -237,8 +232,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 0, 2, 1).build());
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -256,7 +251,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -276,7 +271,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in the new follower
 
-        assertEquals("New follower peers", Sets.newHashSet(LEADER_ID), newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
 
         LOG.info("testAddServerWithNoExistingFollower ending");
     }
@@ -289,8 +284,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -302,7 +297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
 
@@ -323,7 +318,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Verify new server config was applied in the new follower
 
-        assertEquals("New follower peers", Sets.newHashSet(LEADER_ID), newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID), newFollowerActorContext.getPeerIds());
 
         assertNoneMatching(newFollowerCollectorActor, InstallSnapshot.class, 500);
 
@@ -339,7 +334,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", java.util.Optional.of(LEADER_ID), addServerReply.getLeaderHint());
+        assertEquals("getLeaderHint", Optional.of(LEADER_ID), addServerReply.getLeaderHint());
 
         expectFirstMatching(leaderCollectorActor, ApplyState.class);
         assertEquals("Leader journal last index", 1, leaderActorContext.getReplicatedLog().lastIndex());
@@ -359,8 +354,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -408,8 +403,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // Verify ServerConfigurationPayload entry in the new follower
 
         expectMatching(newFollowerCollectorActor, ApplyState.class, 2);
-        assertEquals("New follower peers", Sets.newHashSet(LEADER_ID, NEW_SERVER_ID2),
-               newFollowerActorContext.getPeerIds());
+        assertEquals("New follower peers", Set.of(LEADER_ID, NEW_SERVER_ID2), newFollowerActorContext.getPeerIds());
 
         LOG.info("testAddServerWithOperationInProgress ending");
     }
@@ -422,8 +416,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -445,7 +439,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         expectFirstMatching(newFollowerCollectorActor, ApplySnapshot.class);
 
@@ -469,8 +463,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -501,8 +495,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -550,8 +544,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -595,8 +589,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -629,7 +623,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -651,8 +645,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.<String, String>of(),
-                        initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
+                MockLeaderRaftActor.props(Map.of(), initialActorContext)
+                    .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
         MockLeaderRaftActor leaderRaftActor = leaderActor.underlyingActor();
@@ -686,7 +680,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // The first AddServer should succeed with OK even though consensus wasn't reached
         AddServerReply addServerReply = testKit.expectMsgClass(Duration.ofSeconds(5), AddServerReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, addServerReply.getStatus());
-        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().get());
+        assertEquals("getLeaderHint", LEADER_ID, addServerReply.getLeaderHint().orElseThrow());
 
         // Verify ServerConfigurationPayload entry in leader's log
         verifyServerConfigurationPayloadEntry(leaderActorContext.getReplicatedLog(), votingServer(LEADER_ID),
@@ -711,7 +705,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -735,14 +729,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
 
         TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
                         leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(FOLLOWER_ID));
         followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
 
-        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
-                -1, -1, (short)0), leaderActor);
+        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
 
         followerRaftActor.tell(new AddServer(NEW_SERVER_ID, newFollowerRaftActor.path().toString(), true),
                 testKit.getRef());
@@ -758,7 +751,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
         TestActorRef<MockRaftActor> noLeaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -767,7 +760,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 noLeaderActor.underlyingActor());
 
         ReplicatedLogEntry serverConfigEntry = new SimpleReplicatedLogEntry(1, 1,
-                new ServerConfigurationPayload(Collections.<ServerInfo>emptyList()));
+                new ServerConfigurationPayload(List.of()));
         boolean handled = support.handleMessage(new ApplyState(null, null, serverConfigEntry), ActorRef.noSender());
         assertEquals("Message handled", true, handled);
 
@@ -787,7 +780,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> leaderActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(LEADER_ID).peerAddresses(ImmutableMap.of(FOLLOWER_ID,
+                MockRaftActor.builder().id(LEADER_ID).peerAddresses(Map.of(FOLLOWER_ID,
                         followerActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
@@ -807,7 +800,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActor.path().toString()),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActor.path().toString()),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -829,14 +822,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 MessageCollectorActor.props(), actorFactory.generateActorId(LEADER_ID));
 
         TestActorRef<MockRaftActor> followerRaftActor = actorFactory.createTestActor(
-                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(ImmutableMap.of(LEADER_ID,
+                MockRaftActor.builder().id(FOLLOWER_ID).peerAddresses(Map.of(LEADER_ID,
                         leaderActor.path().toString())).config(configParams).persistent(Optional.of(false))
                         .props().withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(FOLLOWER_ID));
         followerRaftActor.underlyingActor().waitForInitializeBehaviorComplete();
 
-        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, Collections.<ReplicatedLogEntry>emptyList(),
-                -1, -1, (short)0), leaderActor);
+        followerRaftActor.tell(new AppendEntries(1, LEADER_ID, 0, 1, List.of(), -1, -1, (short)0), leaderActor);
 
         followerRaftActor.tell(new RemoveServer(FOLLOWER_ID), testKit.getRef());
         expectFirstMatching(leaderActor, RemoveServer.class);
@@ -860,7 +852,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         final String downNodeId = "downNode";
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(MockLeaderRaftActor.props(
-                ImmutableMap.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
+                Map.of(FOLLOWER_ID, follower1ActorPath, FOLLOWER_ID2, follower2ActorPath, downNodeId, ""),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -869,14 +861,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1Actor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
                         follower1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2Actor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath, downNodeId, ""), configParams, NO_PERSISTENCE,
                         follower2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
@@ -920,7 +912,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         RaftActorContext initialActorContext = new MockRaftActorContext();
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, followerActorPath),
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, followerActorPath),
                         initialActorContext).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -929,7 +921,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final ActorRef followerCollector =
                 actorFactory.createActor(MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString()),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString()),
                         configParams, NO_PERSISTENCE, followerCollector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()),
                 followerActorId);
@@ -953,7 +945,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         LOG.info("testRemoveServerLeaderWithNoFollowers starting");
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(Collections.<String, String>emptyMap(),
+                MockLeaderRaftActor.props(Map.of(),
                         new MockRaftActorContext()).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 actorFactory.generateActorId(LEADER_ID));
 
@@ -978,7 +970,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
                         FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
         ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
@@ -986,20 +978,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
         // Send first ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, false, FOLLOWER_ID2, false)),
                 testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
@@ -1025,7 +1017,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // Send second ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(FOLLOWER_ID, true)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(FOLLOWER_ID, true)), testKit.getRef());
         reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
 
@@ -1057,7 +1049,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         final String follower2ActorPath = actorFactory.createTestActorPath(follower2ActorId);
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(FOLLOWER_ID, follower1ActorPath,
+                MockLeaderRaftActor.props(Map.of(FOLLOWER_ID, follower1ActorPath,
                         FOLLOWER_ID2, follower2ActorPath), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
         ActorRef leaderCollector = newLeaderCollectorActor(leaderActor.underlyingActor());
@@ -1065,20 +1057,20 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef follower1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower1RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID2, follower2ActorPath), configParams, NO_PERSISTENCE, follower1Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower1ActorId);
 
         ActorRef follower2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         final TestActorRef<CollectingMockRaftActor> follower2RaftActor = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(FOLLOWER_ID2, ImmutableMap.of(LEADER_ID, leaderActor.path().toString(),
+                CollectingMockRaftActor.props(FOLLOWER_ID2, Map.of(LEADER_ID, leaderActor.path().toString(),
                         FOLLOWER_ID, follower1ActorPath), configParams, NO_PERSISTENCE, follower2Collector)
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), follower2ActorId);
 
         // Send ChangeServersVotingStatus message
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
 
@@ -1107,10 +1099,10 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         LOG.info("testChangeLeaderToNonVotingInSingleNode starting");
 
         TestActorRef<MockLeaderRaftActor> leaderActor = actorFactory.createTestActor(
-                MockLeaderRaftActor.props(ImmutableMap.of(), new MockRaftActorContext())
+                MockLeaderRaftActor.props(Map.of(), new MockRaftActorContext())
                         .withDispatcher(Dispatchers.DefaultDispatcherId()), actorFactory.generateActorId(LEADER_ID));
 
-        leaderActor.tell(new ChangeServersVotingStatus(ImmutableMap.of(LEADER_ID, false)), testKit.getRef());
+        leaderActor.tell(new ChangeServersVotingStatus(Map.of(LEADER_ID, false)), testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.INVALID_REQUEST, reply.getStatus());
 
@@ -1132,7 +1124,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // via the server config. The server config will also contain 2 voting peers that are down (ie no
         // actors created).
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, false),
                 new ServerInfo("downNode1", true), new ServerInfo("downNode2", true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
@@ -1147,14 +1139,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1181,7 +1173,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         // First send the message such that node1 has no peer address for node2 - should fail.
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
                 node2ID, true, "downNode1", false, "downNode2", false));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
@@ -1192,7 +1184,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         long term = node1RaftActor.getRaftActorContext().getTermInformation().getCurrentTerm();
         node1RaftActorRef.tell(new AppendEntries(term, "downNode1", -1L, -1L,
-                Collections.<ReplicatedLogEntry>emptyList(), 0, -1, (short)1), ActorRef.noSender());
+                List.of(), 0, -1, (short)1), ActorRef.noSender());
 
         // Wait for the ElectionTimeout to clear the leaderId. The leaderId must be null so on the next
         // ChangeServersVotingStatus message, it will try to elect a leader.
@@ -1239,7 +1231,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
                         ? actorFactory.createTestActorPath(node2ID) : null;
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1255,7 +1247,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams1,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams1,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
@@ -1265,7 +1257,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams2,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams2,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1277,13 +1269,13 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         node2RaftActor.setDropMessageOfType(RequestVote.class);
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true));
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.NO_LEADER, reply.getStatus());
 
-        assertEquals("Server config", Sets.newHashSet(nonVotingServer(node1ID), votingServer(node2ID)),
-                Sets.newHashSet(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
+        assertEquals("Server config", Set.of(nonVotingServer(node1ID), votingServer(node2ID)),
+            Set.copyOf(node1RaftActor.getRaftActorContext().getPeerServerInfo(true).getServerConfig()));
         assertEquals("getRaftState", RaftState.Follower, node1RaftActor.getRaftState());
 
         LOG.info("testChangeToVotingWithNoLeaderAndElectionTimeout ending");
@@ -1305,7 +1297,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         configParams.setElectionTimeoutFactor(3);
         configParams.setPeerAddressResolver(peerAddressResolver);
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, false)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1320,14 +1312,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         final CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1337,7 +1329,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         // forward the request to node2.
 
         ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(
-                ImmutableMap.of(node1ID, true, node2ID, true));
+                Map.of(node1ID, true, node2ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
         ServerChangeReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ServerChangeReply.class);
         assertEquals("getStatus", ServerChangeStatus.OK, reply.getStatus());
@@ -1371,7 +1363,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
                 ? actorFactory.createTestActorPath(node1ID) : peerId.equals(node2ID)
                         ? actorFactory.createTestActorPath(node2ID) : null);
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(node1ID, false), new ServerInfo(node2ID, true)));
         SimpleReplicatedLogEntry persistedServerConfigEntry = new SimpleReplicatedLogEntry(0, 1, persistedServerConfig);
 
@@ -1383,14 +1375,14 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ActorRef node1Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node1RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node1ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node1ID, Map.of(), configParams,
                         PERSISTENT, node1Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node1ID);
         final CollectingMockRaftActor node1RaftActor = node1RaftActorRef.underlyingActor();
 
         ActorRef node2Collector = actorFactory.createActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId("collector"));
         TestActorRef<CollectingMockRaftActor> node2RaftActorRef = actorFactory.createTestActor(
-                CollectingMockRaftActor.props(node2ID, ImmutableMap.<String, String>of(), configParams,
+                CollectingMockRaftActor.props(node2ID, Map.of(), configParams,
                         PERSISTENT, node2Collector).withDispatcher(Dispatchers.DefaultDispatcherId()), node2ID);
         CollectingMockRaftActor node2RaftActor = node2RaftActorRef.underlyingActor();
 
@@ -1401,7 +1393,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         node2RaftActor.setDropMessageOfType(RequestVote.class);
 
-        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(ImmutableMap.of(node1ID, true,
+        ChangeServersVotingStatus changeServers = new ChangeServersVotingStatus(Map.of(node1ID, true,
                 node2ID, true));
         node1RaftActorRef.tell(changeServers, testKit.getRef());
 
@@ -1426,7 +1418,7 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         LOG.info("testChangeToVotingWithNoLeaderAndOtherLeaderElected ending");
     }
 
-    private static void verifyRaftState(RaftState expState, RaftActor... raftActors) {
+    private static void verifyRaftState(final RaftState expState, final RaftActor... raftActors) {
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             for (RaftActor raftActor : raftActors) {
@@ -1439,33 +1431,34 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         fail("None of the RaftActors have state " + expState);
     }
 
-    private static ServerInfo votingServer(String id) {
+    private static ServerInfo votingServer(final String id) {
         return new ServerInfo(id, true);
     }
 
-    private static ServerInfo nonVotingServer(String id) {
+    private static ServerInfo nonVotingServer(final String id) {
         return new ServerInfo(id, false);
     }
 
-    private ActorRef newLeaderCollectorActor(MockLeaderRaftActor leaderRaftActor) {
+    private ActorRef newLeaderCollectorActor(final MockLeaderRaftActor leaderRaftActor) {
         return newCollectorActor(leaderRaftActor, LEADER_ID);
     }
 
-    private ActorRef newCollectorActor(AbstractMockRaftActor raftActor, String id) {
+    private ActorRef newCollectorActor(final AbstractMockRaftActor raftActor, final String id) {
         ActorRef collectorActor = actorFactory.createTestActor(
                 MessageCollectorActor.props(), actorFactory.generateActorId(id + "Collector"));
         raftActor.setCollectorActor(collectorActor);
         return collectorActor;
     }
 
-    private static void verifyServerConfigurationPayloadEntry(ReplicatedLog log, ServerInfo... expected) {
+    private static void verifyServerConfigurationPayloadEntry(final ReplicatedLog log, final ServerInfo... expected) {
         ReplicatedLogEntry logEntry = log.get(log.lastIndex());
         assertEquals("Last log entry payload class", ServerConfigurationPayload.class, logEntry.getData().getClass());
         ServerConfigurationPayload payload = (ServerConfigurationPayload)logEntry.getData();
-        assertEquals("Server config", Sets.newHashSet(expected), Sets.newHashSet(payload.getServerConfig()));
+        assertEquals("Server config", Set.of(expected), Set.copyOf(payload.getServerConfig()));
     }
 
-    private static RaftActorContextImpl newFollowerContext(String id, TestActorRef<? extends AbstractActor> actor) {
+    private static RaftActorContextImpl newFollowerContext(final String id,
+            final TestActorRef<? extends AbstractActor> actor) {
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
         configParams.setHeartBeatInterval(new FiniteDuration(100, TimeUnit.MILLISECONDS));
         configParams.setElectionTimeoutFactor(100000);
@@ -1473,31 +1466,31 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
         ElectionTermImpl termInfo = new ElectionTermImpl(noPersistence, id, LOG);
         termInfo.update(1, LEADER_ID);
         return new RaftActorContextImpl(actor, actor.underlyingActor().getContext(),
-                id, termInfo, -1, -1, ImmutableMap.of(LEADER_ID, ""), configParams,
-                noPersistence, applyState -> actor.tell(applyState, actor), LOG);
+                id, termInfo, -1, -1, Map.of(LEADER_ID, ""), configParams,
+                noPersistence, applyState -> actor.tell(applyState, actor), LOG,  MoreExecutors.directExecutor());
     }
 
     abstract static class AbstractMockRaftActor extends MockRaftActor {
         private volatile ActorRef collectorActor;
         private volatile Class<?> dropMessageOfType;
 
-        AbstractMockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
-                boolean persistent, ActorRef collectorActor) {
-            super(builder().id(id).peerAddresses(peerAddresses).config(config.get())
+        AbstractMockRaftActor(final String id, final Map<String, String> peerAddresses,
+                final Optional<ConfigParams> config, final boolean persistent, final ActorRef collectorActor) {
+            super(builder().id(id).peerAddresses(peerAddresses).config(config.orElseThrow())
                     .persistent(Optional.of(persistent)));
             this.collectorActor = collectorActor;
         }
 
-        void setDropMessageOfType(Class<?> dropMessageOfType) {
+        void setDropMessageOfType(final Class<?> dropMessageOfType) {
             this.dropMessageOfType = dropMessageOfType;
         }
 
-        void setCollectorActor(ActorRef collectorActor) {
+        void setCollectorActor(final ActorRef collectorActor) {
             this.collectorActor = collectorActor;
         }
 
         @Override
-        public void handleCommand(Object message) {
+        public void handleCommand(final Object message) {
             if (dropMessageOfType == null || !dropMessageOfType.equals(message.getClass())) {
                 super.handleCommand(message);
             }
@@ -1510,30 +1503,31 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
     public static class CollectingMockRaftActor extends AbstractMockRaftActor {
 
-        CollectingMockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config,
-                boolean persistent, ActorRef collectorActor) {
+        CollectingMockRaftActor(final String id, final Map<String, String> peerAddresses,
+                final Optional<ConfigParams> config, final boolean persistent, final ActorRef collectorActor) {
             super(id, peerAddresses, config, persistent, collectorActor);
             snapshotCohortDelegate = new RaftActorSnapshotCohort() {
                 @Override
-                public void createSnapshot(ActorRef actorRef, java.util.Optional<OutputStream> installSnapshotStream) {
+                public void createSnapshot(final ActorRef actorRef,
+                        final Optional<OutputStream> installSnapshotStream) {
                     actorRef.tell(new CaptureSnapshotReply(ByteState.empty(), installSnapshotStream), actorRef);
                 }
 
                 @Override
                 public void applySnapshot(
-                        org.opendaylight.controller.cluster.raft.persisted.Snapshot.State snapshotState) {
+                        final org.opendaylight.controller.cluster.raft.persisted.Snapshot.State snapshotState) {
                 }
 
                 @Override
                 public org.opendaylight.controller.cluster.raft.persisted.Snapshot.State deserializeSnapshot(
-                        ByteSource snapshotBytes) {
+                        final ByteSource snapshotBytes) {
                     throw new UnsupportedOperationException();
                 }
             };
         }
 
         public static Props props(final String id, final Map<String, String> peerAddresses,
-                ConfigParams config, boolean persistent, ActorRef collectorActor) {
+                final ConfigParams config, final boolean persistent, final ActorRef collectorActor) {
 
             return Props.create(CollectingMockRaftActor.class, id, peerAddresses, Optional.of(config),
                     persistent, collectorActor);
@@ -1542,8 +1536,8 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
     }
 
     public static class MockLeaderRaftActor extends AbstractMockRaftActor {
-        public MockLeaderRaftActor(Map<String, String> peerAddresses, ConfigParams config,
-                RaftActorContext fromContext) {
+        public MockLeaderRaftActor(final Map<String, String> peerAddresses, final ConfigParams config,
+                final RaftActorContext fromContext) {
             super(LEADER_ID, peerAddresses, Optional.of(config), NO_PERSISTENCE, null);
             setPersistence(false);
 
@@ -1568,16 +1562,16 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
 
         @Override
         @SuppressWarnings("checkstyle:IllegalCatch")
-        public void createSnapshot(ActorRef actorRef, java.util.Optional<OutputStream> installSnapshotStream) {
-            MockSnapshotState snapshotState = new MockSnapshotState(new ArrayList<>(getState()));
+        public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
+            MockSnapshotState snapshotState = new MockSnapshotState(List.copyOf(getState()));
             if (installSnapshotStream.isPresent()) {
-                SerializationUtils.serialize(snapshotState, installSnapshotStream.get());
+                SerializationUtils.serialize(snapshotState, installSnapshotStream.orElseThrow());
             }
 
             actorRef.tell(new CaptureSnapshotReply(snapshotState, installSnapshotStream), actorRef);
         }
 
-        static Props props(Map<String, String> peerAddresses, RaftActorContext fromContext) {
+        static Props props(final Map<String, String> peerAddresses, final RaftActorContext fromContext) {
             DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl();
             configParams.setHeartBeatInterval(new FiniteDuration(100, TimeUnit.MILLISECONDS));
             configParams.setElectionTimeoutFactor(10);
@@ -1586,13 +1580,12 @@ public class RaftActorServerConfigurationSupportTest extends AbstractActorTest {
     }
 
     public static class MockNewFollowerRaftActor extends AbstractMockRaftActor {
-        public MockNewFollowerRaftActor(ConfigParams config, ActorRef collectorActor) {
-            super(NEW_SERVER_ID, Maps.<String, String>newHashMap(), Optional.of(config), NO_PERSISTENCE,
-                    collectorActor);
+        public MockNewFollowerRaftActor(final ConfigParams config, final ActorRef collectorActor) {
+            super(NEW_SERVER_ID, Map.of(), Optional.of(config), NO_PERSISTENCE, collectorActor);
             setPersistence(false);
         }
 
-        static Props props(ConfigParams config, ActorRef collectorActor) {
+        static Props props(final ConfigParams config, final ActorRef collectorActor) {
             return Props.create(MockNewFollowerRaftActor.class, config, collectorActor);
         }
     }
index 11b5000ad5816220a32dfb6be554702ddd761957..87717ccfbecd3a5040b2880566a136ffea181387 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 
@@ -18,13 +17,16 @@ import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotFailure;
 import akka.persistence.SaveSnapshotSuccess;
 import akka.persistence.SnapshotMetadata;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.io.OutputStream;
-import java.util.Collections;
+import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
@@ -39,8 +41,8 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class RaftActorSnapshotMessageSupportTest {
-
     private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
 
     @Mock
@@ -65,11 +67,9 @@ public class RaftActorSnapshotMessageSupportTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
         context = new RaftActorContextImpl(mockRaftActorRef, null, "test",
-                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.<String,String>emptyMap(),
-                configParams, mockPersistence, applyState -> { }, LOG) {
+                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(),
+                configParams, mockPersistence, applyState -> { }, LOG,  MoreExecutors.directExecutor()) {
             @Override
             public SnapshotManager getSnapshotManager() {
                 return mockSnapshotManager;
@@ -78,16 +78,14 @@ public class RaftActorSnapshotMessageSupportTest {
 
         support = new RaftActorSnapshotMessageSupport(context, mockCohort);
 
-        doReturn(true).when(mockPersistence).isRecoveryApplicable();
-
         context.setReplicatedLog(ReplicatedLogImpl.newInstance(context));
     }
 
-    private void sendMessageToSupport(Object message) {
+    private void sendMessageToSupport(final Object message) {
         sendMessageToSupport(message, true);
     }
 
-    private void sendMessageToSupport(Object message, boolean expHandled) {
+    private void sendMessageToSupport(final Object message, final boolean expHandled) {
         boolean handled = support.handleSnapshotMessage(message, mockRaftActorRef);
         assertEquals("complete", expHandled, handled);
     }
@@ -99,7 +97,7 @@ public class RaftActorSnapshotMessageSupportTest {
         long lastIndexDuringSnapshotCapture = 2;
         byte[] snapshotBytes = {1,2,3,4,5};
 
-        Snapshot snapshot = Snapshot.create(ByteState.of(snapshotBytes), Collections.<ReplicatedLogEntry>emptyList(),
+        Snapshot snapshot = Snapshot.create(ByteState.of(snapshotBytes), List.of(),
                 lastIndexDuringSnapshotCapture, 1, lastAppliedDuringSnapshotCapture, 1, -1, null, null);
 
         ApplySnapshot applySnapshot = new ApplySnapshot(snapshot);
index 2c975ecf08e1d5aecdd13685f0be90ad4b3eed7e..fde56a9a21e1145895282d0a97fe8dd736e4807e 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static org.awaitility.Awaitility.await;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
@@ -22,6 +23,7 @@ import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
@@ -36,18 +38,15 @@ import akka.persistence.SnapshotOffer;
 import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectOutputStream;
 import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.junit.After;
@@ -59,6 +58,8 @@ import org.opendaylight.controller.cluster.NonPersistentDataProvider;
 import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
 import org.opendaylight.controller.cluster.notifications.RoleChanged;
+import org.opendaylight.controller.cluster.raft.AbstractRaftActorIntegrationTest.TestPersist;
+import org.opendaylight.controller.cluster.raft.AbstractRaftActorIntegrationTest.TestRaftActor;
 import org.opendaylight.controller.cluster.raft.MockRaftActor.MockSnapshotState;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
@@ -136,22 +137,20 @@ public class RaftActorTest extends AbstractActorTest {
         // log entry.
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
-        ImmutableMap<String, String> peerAddresses = ImmutableMap.<String, String>builder()
-                .put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
         ActorRef followerActor = factory.createActor(MockRaftActor.props(persistenceId,
                 peerAddresses, config), persistenceId);
 
         kit.watch(followerActor);
 
-        List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
-        ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E"));
-        snapshotUnappliedEntries.add(entry1);
+        List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+            new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
 
         int lastAppliedDuringSnapshotCapture = 3;
         int lastIndexDuringSnapshotCapture = 4;
 
         // 4 messages as part of snapshot, which are applied to state
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("A"),
                 new MockRaftActorContext.MockPayload("B"),
                 new MockRaftActorContext.MockPayload("C"),
@@ -162,13 +161,9 @@ public class RaftActorTest extends AbstractActorTest {
         InMemorySnapshotStore.addSnapshot(persistenceId, snapshot);
 
         // add more entries after snapshot is taken
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
         ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(5, 1, new MockRaftActorContext.MockPayload("F", 2));
         ReplicatedLogEntry entry3 = new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("G", 3));
         ReplicatedLogEntry entry4 = new SimpleReplicatedLogEntry(7, 1, new MockRaftActorContext.MockPayload("H", 4));
-        entries.add(entry2);
-        entries.add(entry3);
-        entries.add(entry4);
 
         final int lastAppliedToState = 5;
         final int lastIndex = 7;
@@ -194,7 +189,7 @@ public class RaftActorTest extends AbstractActorTest {
         mockRaftActor.waitForRecoveryComplete();
 
         RaftActorContext context = mockRaftActor.getRaftActorContext();
-        assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+        assertEquals("Journal log size", snapshotUnappliedEntries.size() + 3,
                 context.getReplicatedLog().size());
         assertEquals("Journal data size", 10, context.getReplicatedLog().dataSize());
         assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
@@ -218,8 +213,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(),
-                config, createProvider()), persistenceId);
+                Map.of("member1", "address"), config, createProvider()), persistenceId);
 
         MockRaftActor mockRaftActor = ref.underlyingActor();
 
@@ -241,8 +235,7 @@ public class RaftActorTest extends AbstractActorTest {
         InMemoryJournal.addWriteMessagesCompleteLatch(persistenceId, 1);
 
         TestActorRef<MockRaftActor> ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(),
-                config, createProvider())
+                Map.of("member1", "address"), config, createProvider())
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
 
         InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
@@ -253,8 +246,7 @@ public class RaftActorTest extends AbstractActorTest {
         factory.killActor(ref, kit);
 
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
-        ref = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(), config,
+        ref = factory.createTestActor(MockRaftActor.props(persistenceId, Map.of("member1", "address"), config,
                 createProvider()).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 factory.generateActorId("follower-"));
 
@@ -279,7 +271,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config), persistenceId);
+                Map.of(), config), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -290,7 +282,7 @@ public class RaftActorTest extends AbstractActorTest {
         mockRaftActor.setRaftActorRecoverySupport(mockSupport);
 
         Snapshot snapshot = Snapshot.create(ByteState.of(new byte[]{1}),
-                Collections.<ReplicatedLogEntry>emptyList(), 3, 1, 3, 1, -1, null, null);
+                List.of(), 3, 1, 3, 1, -1, null, null);
         SnapshotOffer snapshotOffer = new SnapshotOffer(new SnapshotMetadata("test", 6, 12345), snapshot);
         mockRaftActor.handleRecover(snapshotOffer);
 
@@ -331,29 +323,29 @@ public class RaftActorTest extends AbstractActorTest {
         // Wait for akka's recovery to complete so it doesn't interfere.
         mockRaftActor.waitForRecoveryComplete();
 
-        ApplySnapshot applySnapshot = new ApplySnapshot(mock(Snapshot.class));
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
+        ApplySnapshot applySnapshot = new ApplySnapshot(
+            Snapshot.create(null, null, 0, 0, 0, 0, 0, persistenceId, null));
+        when(mockSupport.handleSnapshotMessage(same(applySnapshot), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(applySnapshot);
 
-        CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(ByteState.empty(),
-                java.util.Optional.empty());
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class));
+        CaptureSnapshotReply captureSnapshotReply = new CaptureSnapshotReply(ByteState.empty(), Optional.empty());
+        when(mockSupport.handleSnapshotMessage(same(captureSnapshotReply), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(captureSnapshotReply);
 
         SaveSnapshotSuccess saveSnapshotSuccess = new SaveSnapshotSuccess(new SnapshotMetadata("", 0L, 0L));
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(saveSnapshotSuccess), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(saveSnapshotSuccess);
 
         SaveSnapshotFailure saveSnapshotFailure = new SaveSnapshotFailure(new SnapshotMetadata("", 0L, 0L),
                 new Throwable());
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(saveSnapshotFailure), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(saveSnapshotFailure);
 
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
-                any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT),
+            any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(RaftActorSnapshotMessageSupport.COMMIT_SNAPSHOT);
 
-        doReturn(true).when(mockSupport).handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class));
+        when(mockSupport.handleSnapshotMessage(same(GetSnapshot.INSTANCE), any(ActorRef.class))).thenReturn(true);
         mockRaftActor.handleCommand(GetSnapshot.INSTANCE);
 
         verify(mockSupport).handleSnapshotMessage(same(applySnapshot), any(ActorRef.class));
@@ -377,7 +369,7 @@ public class RaftActorTest extends AbstractActorTest {
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+                Map.of(), config, dataPersistenceProvider), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -385,7 +377,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         mockRaftActor.waitUntilLeader();
 
-        mockRaftActor.onReceiveCommand(new ApplyJournalEntries(10));
+        mockRaftActor.handleCommand(new ApplyJournalEntries(10));
 
         verify(dataPersistenceProvider).persistAsync(any(ApplyJournalEntries.class), any(Procedure.class));
     }
@@ -401,7 +393,7 @@ public class RaftActorTest extends AbstractActorTest {
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                Collections.<String, String>emptyMap(), config, dataPersistenceProvider), persistenceId);
+                Map.of(), config, dataPersistenceProvider), persistenceId);
 
         MockRaftActor mockRaftActor = mockActorRef.underlyingActor();
 
@@ -514,7 +506,7 @@ public class RaftActorTest extends AbstractActorTest {
         String persistenceId = factory.generateActorId("notifier-");
 
         factory.createActor(MockRaftActor.builder().id(persistenceId)
-                .peerAddresses(ImmutableMap.of("leader", "fake/path"))
+                .peerAddresses(Map.of("leader", "fake/path"))
                 .config(config).roleChangeNotifier(notifierActor).props());
 
         List<RoleChanged> matches =  null;
@@ -556,8 +548,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(follower1Id, followerActor1.path().toString());
+        Map<String, String> peerAddresses = Map.of(follower1Id, followerActor1.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -590,27 +581,27 @@ public class RaftActorTest extends AbstractActorTest {
 
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
         //fake snapshot on index 5
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 5, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower1Id, 1, true, 5, 1, (short)0));
 
         assertEquals(8, leaderActor.getReplicatedLog().size());
 
         //fake snapshot on index 6
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 6, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower1Id, 1, true, 6, 1, (short)0));
         assertEquals(8, leaderActor.getReplicatedLog().size());
 
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
 
         assertEquals(8, leaderActor.getReplicatedLog().size());
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
                 new MockRaftActorContext.MockPayload("foo-3"),
                 new MockRaftActorContext.MockPayload("foo-4")));
 
-        leaderActor.getRaftActorContext().getSnapshotManager().persist(snapshotState, java.util.Optional.empty(),
+        leaderActor.getRaftActorContext().getSnapshotManager().persist(snapshotState, Optional.empty(),
                 Runtime.getRuntime().totalMemory());
 
         assertTrue(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
@@ -627,7 +618,7 @@ public class RaftActorTest extends AbstractActorTest {
                 new SimpleReplicatedLogEntry(8, 1, new MockRaftActorContext.MockPayload("foo-8")));
 
         //fake snapshot on index 7, since lastApplied = 7 , we would keep the last applied
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 7, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower1Id, 1, true, 7, 1, (short)0));
         assertEquals(2, leaderActor.getReplicatedLog().size());
         assertEquals(8, leaderActor.getReplicatedLog().lastIndex());
     }
@@ -645,8 +636,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(leaderId, leaderActor1.path().toString());
+        Map<String, String> peerAddresses = Map.of(leaderId, leaderActor1.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -679,30 +669,30 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(6, followerActor.getReplicatedLog().size());
 
         //fake snapshot on index 6
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                (ReplicatedLogEntry) new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
-        followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5, (short)0));
+        List<ReplicatedLogEntry> entries = List.of(
+                new SimpleReplicatedLogEntry(6, 1, new MockRaftActorContext.MockPayload("foo-6")));
+        followerActor.handleCommand(new AppendEntries(1, leaderId, 5, 1, entries, 5, 5, (short)0));
         assertEquals(7, followerActor.getReplicatedLog().size());
 
         //fake snapshot on index 7
         assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
 
-        entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(7, 1,
+        entries = List.of(new SimpleReplicatedLogEntry(7, 1,
                 new MockRaftActorContext.MockPayload("foo-7")));
-        followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6, (short) 0));
+        followerActor.handleCommand(new AppendEntries(1, leaderId, 6, 1, entries, 6, 6, (short) 0));
         assertEquals(8, followerActor.getReplicatedLog().size());
 
         assertEquals(RaftState.Follower, followerActor.getCurrentBehavior().state());
 
 
-        ByteString snapshotBytes = fromObject(Arrays.asList(
+        ByteString snapshotBytes = fromObject(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
                 new MockRaftActorContext.MockPayload("foo-3"),
                 new MockRaftActorContext.MockPayload("foo-4")));
-        followerActor.onReceiveCommand(new CaptureSnapshotReply(ByteState.of(snapshotBytes.toByteArray()),
-                java.util.Optional.empty()));
+        followerActor.handleCommand(new CaptureSnapshotReply(ByteState.of(snapshotBytes.toByteArray()),
+                Optional.empty()));
         assertTrue(followerActor.getRaftActorContext().getSnapshotManager().isCapturing());
 
         // The commit is needed to complete the snapshot creation process
@@ -712,10 +702,9 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
         assertEquals(7, followerActor.getReplicatedLog().lastIndex());
 
-        entries = Arrays.asList((ReplicatedLogEntry) new SimpleReplicatedLogEntry(8, 1,
-                new MockRaftActorContext.MockPayload("foo-7")));
+        entries = List.of(new SimpleReplicatedLogEntry(8, 1, new MockRaftActorContext.MockPayload("foo-7")));
         // send an additional entry 8 with leaderCommit = 7
-        followerActor.onReceiveCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7, (short) 0));
+        followerActor.handleCommand(new AppendEntries(1, leaderId, 7, 1, entries, 7, 7, (short) 0));
 
         // 7 and 8, as lastapplied is 7
         assertEquals(2, followerActor.getReplicatedLog().size());
@@ -736,9 +725,9 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
-        Map<String, String> peerAddresses = new HashMap<>();
-        peerAddresses.put(follower1Id, followerActor1.path().toString());
-        peerAddresses.put(follower2Id, followerActor2.path().toString());
+        Map<String, String> peerAddresses = Map.of(
+            follower1Id, followerActor1.path().toString(),
+            follower2Id, followerActor2.path().toString());
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -765,17 +754,17 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals(5, leaderActor.getReplicatedLog().size());
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
 
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower1Id, 1, true, 9, 1, (short) 0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower1Id, 1, true, 9, 1, (short) 0));
         assertEquals(5, leaderActor.getReplicatedLog().size());
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
 
         // set the 2nd follower nextIndex to 1 which has been snapshotted
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 0, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower2Id, 1, true, 0, 1, (short)0));
         assertEquals(5, leaderActor.getReplicatedLog().size());
         assertEquals(RaftState.Leader, leaderActor.getCurrentBehavior().state());
 
         // simulate a real snapshot
-        leaderActor.onReceiveCommand(SendHeartBeat.INSTANCE);
+        leaderActor.handleCommand(SendHeartBeat.INSTANCE);
         assertEquals(5, leaderActor.getReplicatedLog().size());
         assertEquals(String.format("expected to be Leader but was %s. Current Leader = %s ",
                 leaderActor.getCurrentBehavior().state(), leaderActor.getLeaderId()),
@@ -783,25 +772,25 @@ public class RaftActorTest extends AbstractActorTest {
 
 
         //reply from a slow follower does not initiate a fake snapshot
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 9, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower2Id, 1, true, 9, 1, (short)0));
         assertEquals("Fake snapshot should not happen when Initiate is in progress", 5,
                 leaderActor.getReplicatedLog().size());
 
-        ByteString snapshotBytes = fromObject(Arrays.asList(
+        ByteString snapshotBytes = fromObject(List.of(
                 new MockRaftActorContext.MockPayload("foo-0"),
                 new MockRaftActorContext.MockPayload("foo-1"),
                 new MockRaftActorContext.MockPayload("foo-2"),
                 new MockRaftActorContext.MockPayload("foo-3"),
                 new MockRaftActorContext.MockPayload("foo-4")));
-        leaderActor.onReceiveCommand(new CaptureSnapshotReply(ByteState.of(snapshotBytes.toByteArray()),
-                java.util.Optional.empty()));
+        leaderActor.handleCommand(new CaptureSnapshotReply(ByteState.of(snapshotBytes.toByteArray()),
+                Optional.empty()));
         assertTrue(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
 
         assertEquals("Real snapshot didn't clear the log till replicatedToAllIndex", 0,
                 leaderActor.getReplicatedLog().size());
 
         //reply from a slow follower after should not raise errors
-        leaderActor.onReceiveCommand(new AppendEntriesReply(follower2Id, 1, true, 5, 1, (short) 0));
+        leaderActor.handleCommand(new AppendEntriesReply(follower2Id, 1, true, 5, 1, (short) 0));
         assertEquals(0, leaderActor.getReplicatedLog().size());
     }
 
@@ -815,7 +804,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -841,7 +830,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         // Now send a CaptureSnapshotReply
         mockActorRef.tell(new CaptureSnapshotReply(ByteState.of(fromObject("foo").toByteArray()),
-                java.util.Optional.empty()), mockActorRef);
+                Optional.empty()), mockActorRef);
 
         // Trimming log in this scenario is a no-op
         assertEquals(-1, leaderActor.getReplicatedLog().getSnapshotIndex());
@@ -859,7 +848,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -882,7 +871,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         // Now send a CaptureSnapshotReply
         mockActorRef.tell(new CaptureSnapshotReply(ByteState.of(fromObject("foo").toByteArray()),
-                java.util.Optional.empty()), mockActorRef);
+                Optional.empty()), mockActorRef);
 
         // Trimming log in this scenario is a no-op
         assertEquals(3, leaderActor.getReplicatedLog().getSnapshotIndex());
@@ -905,7 +894,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         DataPersistenceProvider dataPersistenceProvider = createProvider();
 
-        Map<String, String> peerAddresses = ImmutableMap.<String, String>builder().build();
+        Map<String, String> peerAddresses = Map.of();
 
         TestActorRef<MockRaftActor> mockActorRef = factory.createTestActor(
                 MockRaftActor.props(persistenceId, peerAddresses, config, dataPersistenceProvider), persistenceId);
@@ -959,8 +948,7 @@ public class RaftActorTest extends AbstractActorTest {
     public void testUpdateConfigParam() {
         DefaultConfigParamsImpl emptyConfig = new DefaultConfigParamsImpl();
         String persistenceId = factory.generateActorId("follower-");
-        ImmutableMap<String, String> peerAddresses =
-            ImmutableMap.<String, String>builder().put("member1", "address").build();
+        Map<String, String> peerAddresses = Map.of("member1", "address");
         DataPersistenceProvider dataPersistenceProvider = mock(DataPersistenceProvider.class);
 
         TestActorRef<MockRaftActor> actorRef = factory.createTestActor(
@@ -1025,7 +1013,7 @@ public class RaftActorTest extends AbstractActorTest {
                 new MockRaftActorContext.MockPayload("C")));
 
         TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
-                ImmutableMap.<String, String>builder().put("member1", "address").build(), config)
+                Map.of("member1", "address"), config)
                     .withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
         MockRaftActor mockRaftActor = raftActorRef.underlyingActor();
 
@@ -1037,10 +1025,10 @@ public class RaftActorTest extends AbstractActorTest {
 
         ArgumentCaptor<ActorRef> replyActor = ArgumentCaptor.forClass(ActorRef.class);
         verify(mockRaftActor.snapshotCohortDelegate, timeout(5000)).createSnapshot(replyActor.capture(),
-                eq(java.util.Optional.empty()));
+                eq(Optional.empty()));
 
         byte[] stateSnapshot = new byte[]{1,2,3};
-        replyActor.getValue().tell(new CaptureSnapshotReply(ByteState.of(stateSnapshot), java.util.Optional.empty()),
+        replyActor.getValue().tell(new CaptureSnapshotReply(ByteState.of(stateSnapshot), Optional.empty()),
                 ActorRef.noSender());
 
         GetSnapshotReply reply = kit.expectMsgClass(GetSnapshotReply.class);
@@ -1101,13 +1089,13 @@ public class RaftActorTest extends AbstractActorTest {
         DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
         config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
 
-        List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
-        snapshotUnappliedEntries.add(new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
+        List<ReplicatedLogEntry> snapshotUnappliedEntries = List.of(
+            new SimpleReplicatedLogEntry(4, 1, new MockRaftActorContext.MockPayload("E")));
 
         int snapshotLastApplied = 3;
         int snapshotLastIndex = 4;
 
-        MockSnapshotState snapshotState = new MockSnapshotState(Arrays.asList(
+        MockSnapshotState snapshotState = new MockSnapshotState(List.of(
                 new MockRaftActorContext.MockPayload("A"),
                 new MockRaftActorContext.MockPayload("B"),
                 new MockRaftActorContext.MockPayload("C"),
@@ -1148,7 +1136,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         // Test with data persistence disabled
 
-        snapshot = Snapshot.create(EmptyState.INSTANCE, Collections.<ReplicatedLogEntry>emptyList(),
+        snapshot = Snapshot.create(EmptyState.INSTANCE, List.of(),
                 -1, -1, -1, -1, 5, "member-1", null);
 
         persistenceId = factory.generateActorId("test-actor-");
@@ -1178,9 +1166,9 @@ public class RaftActorTest extends AbstractActorTest {
         DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
         config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
 
-        List<MockPayload> state = Arrays.asList(new MockRaftActorContext.MockPayload("A"));
+        List<MockPayload> state = List.of(new MockRaftActorContext.MockPayload("A"));
         Snapshot snapshot = Snapshot.create(ByteState.of(fromObject(state).toByteArray()),
-                Arrays.<ReplicatedLogEntry>asList(), 5, 2, 5, 2, 2, "member-1", null);
+                List.of(), 5, 2, 5, 2, 2, "member-1", null);
 
         InMemoryJournal.addEntry(persistenceId, 1, new SimpleReplicatedLogEntry(0, 1,
                 new MockRaftActorContext.MockPayload("B")));
@@ -1216,7 +1204,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         String persistenceId = factory.generateActorId("test-actor-");
         InMemoryJournal.addEntry(persistenceId, 1,  new SimpleReplicatedLogEntry(0, 1,
-                new ServerConfigurationPayload(Arrays.asList(new ServerInfo(persistenceId, false)))));
+                new ServerConfigurationPayload(List.of(new ServerInfo(persistenceId, false)))));
 
         TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.builder().id(persistenceId)
                 .config(config).props().withDispatcher(Dispatchers.DefaultDispatcherId()), persistenceId);
@@ -1250,7 +1238,7 @@ public class RaftActorTest extends AbstractActorTest {
 
         mockRaftActor.waitForInitializeBehaviorComplete();
 
-        raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, Collections.<ReplicatedLogEntry>emptyList(),
+        raftActorRef.tell(new AppendEntries(1L, "leader", 0L, 1L, List.of(),
                 0L, -1L, (short)1), ActorRef.noSender());
         LeaderStateChanged leaderStateChange = MessageCollectorActor.expectFirstMatching(
                 notifierActor, LeaderStateChanged.class);
@@ -1283,7 +1271,7 @@ public class RaftActorTest extends AbstractActorTest {
         doReturn(true).when(mockPersistenceProvider).isRecoveryApplicable();
 
         TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
-                MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config,
+                MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config,
                         mockPersistenceProvider), leaderId);
         MockRaftActor leaderActor = leaderActorRef.underlyingActor();
         leaderActor.waitForInitializeBehaviorComplete();
@@ -1301,7 +1289,7 @@ public class RaftActorTest extends AbstractActorTest {
         assertEquals("isPersistencePending", true, logEntry.isPersistencePending());
         assertEquals("getCommitIndex", -1, leaderActor.getRaftActorContext().getCommitIndex());
 
-        leaderActor.onReceiveCommand(new AppendEntriesReply(followerId, 1, true, 0, 1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(followerId, 1, true, 0, 1, (short)0));
         assertEquals("getCommitIndex", -1, leaderActor.getRaftActorContext().getCommitIndex());
 
         ArgumentCaptor<Procedure> callbackCaptor = ArgumentCaptor.forClass(Procedure.class);
@@ -1325,7 +1313,7 @@ public class RaftActorTest extends AbstractActorTest {
         config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
 
         TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
-                MockRaftActor.props(leaderId, ImmutableMap.of(followerId, followerActor.path().toString()), config),
+                MockRaftActor.props(leaderId, Map.of(followerId, followerActor.path().toString()), config),
                     leaderId);
         MockRaftActor leaderActor = leaderActorRef.underlyingActor();
         leaderActor.waitForInitializeBehaviorComplete();
@@ -1338,7 +1326,7 @@ public class RaftActorTest extends AbstractActorTest {
         MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
         MessageCollectorActor.clearMessages(followerActor);
 
-        leaderActor.onReceiveCommand(new AppendEntriesReply(followerId, 1, true, -1, -1, (short)0));
+        leaderActor.handleCommand(new AppendEntriesReply(followerId, 1, true, -1, -1, (short)0));
 
         leaderActor.persistData(leaderActorRef, new MockIdentifier("1"), new MockPayload("1"), true);
         MessageCollectorActor.assertNoneMatching(followerActor, AppendEntries.class, 500);
@@ -1350,4 +1338,67 @@ public class RaftActorTest extends AbstractActorTest {
         AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
         assertEquals("AppendEntries size", 3, appendEntries.getEntries().size());
     }
+
+    @Test
+    @SuppressWarnings("checkstyle:illegalcatch")
+    public void testApplyStateRace() throws Exception {
+        final String leaderId = factory.generateActorId("leader-");
+        final String followerId = factory.generateActorId("follower-");
+
+        DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+        config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
+        config.setCustomRaftPolicyImplementationClass(DisableElectionsRaftPolicy.class.getName());
+
+        ActorRef mockFollowerActorRef = factory.createActor(MessageCollectorActor.props());
+
+        TestRaftActor.Builder builder = TestRaftActor.newBuilder()
+                .id(leaderId)
+                .peerAddresses(Map.of(followerId, mockFollowerActorRef.path().toString()))
+                .config(config)
+                .collectorActor(factory.createActor(
+                        MessageCollectorActor.props(), factory.generateActorId(leaderId + "-collector")));
+
+        TestActorRef<MockRaftActor> leaderActorRef = factory.createTestActor(
+                builder.props(), leaderId);
+        MockRaftActor leaderActor = leaderActorRef.underlyingActor();
+        leaderActor.waitForInitializeBehaviorComplete();
+
+        leaderActor.getRaftActorContext().getTermInformation().update(1, leaderId);
+        Leader leader = new Leader(leaderActor.getRaftActorContext());
+        leaderActor.setCurrentBehavior(leader);
+
+        final ExecutorService executorService = Executors.newSingleThreadExecutor();
+
+        leaderActor.setPersistence(new PersistentDataProvider(leaderActor) {
+            @Override
+            public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
+                // needs to be executed from another thread to simulate the persistence actor calling this callback
+                executorService.submit(() -> {
+                    try {
+                        procedure.apply(entry);
+                    } catch (Exception e) {
+                        TEST_LOG.info("Fail during async persist callback", e);
+                    }
+                }, "persistence-callback");
+            }
+        });
+
+        leader.getFollower(followerId).setNextIndex(0);
+        leader.getFollower(followerId).setMatchIndex(-1);
+
+        // hitting this is flimsy so run multiple times to improve the chance of things
+        // blowing up while breaking actor containment
+        final TestPersist message =
+                new TestPersist(leaderActorRef, new MockIdentifier("1"), new MockPayload("1"));
+        for (int i = 0; i < 100; i++) {
+            leaderActorRef.tell(message, null);
+
+            AppendEntriesReply reply =
+                    new AppendEntriesReply(followerId, 1, true, i, 1, (short) 5);
+            leaderActorRef.tell(reply, mockFollowerActorRef);
+        }
+
+        await("Persistence callback.").atMost(5, TimeUnit.SECONDS).until(() -> leaderActor.getState().size() == 100);
+        executorService.shutdown();
+    }
 }
index 6386d6c6ba1e7a9453161c3f67caa146e1dcd543..7d6b8988d32dbd076f5d87826e4984e0148b857a 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static org.junit.Assert.fail;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.pattern.Patterns;
@@ -16,7 +18,6 @@ import akka.util.Timeout;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.slf4j.Logger;
@@ -31,7 +32,7 @@ public class RaftActorTestKit extends TestKit {
 
     public RaftActorTestKit(final ActorSystem actorSystem, final String actorName) {
         super(actorSystem);
-        raftActor = this.getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
+        raftActor = getSystem().actorOf(MockRaftActor.builder().id(actorName).props(), actorName);
     }
 
     public ActorRef getRaftActor() {
@@ -65,6 +66,6 @@ public class RaftActorTestKit extends TestKit {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
         }
 
-        Assert.fail("Leader not found for actorRef " + actorRef.path());
+        fail("Leader not found for actorRef " + actorRef.path());
     }
 }
index 7004ca88787707c7b094d4c1e520db9fed5076bb..b3da66c0ff448d045742e2a0da2ced24fb325a19 100644 (file)
@@ -12,9 +12,8 @@ import static org.junit.Assert.assertEquals;
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
 import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
 import java.util.List;
+import java.util.Map;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
@@ -43,7 +42,7 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
 
         String persistenceId = factory.generateActorId("singleNode");
         TestActorRef<AbstractRaftActorIntegrationTest.TestRaftActor> singleNodeActorRef =
-                newTestRaftActor(persistenceId, ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+                newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
 
         waitUntilLeader(singleNodeActorRef);
 
@@ -75,8 +74,9 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
 
         assertEquals("Last applied", 5, singleNodeContext.getLastApplied());
 
-        assertEquals("Incorrect State after snapshot success is received ", Lists.newArrayList(payload0, payload1,
-                payload2, payload3, payload4, payload5), singleNodeActorRef.underlyingActor().getState());
+        assertEquals("Incorrect State after snapshot success is received ",
+                List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+                singleNodeActorRef.underlyingActor().getState());
 
         InMemoryJournal.waitForWriteMessagesComplete(persistenceId);
 
@@ -87,19 +87,17 @@ public class RecoveryIntegrationSingleNodeTest extends AbstractRaftActorIntegrat
         assertEquals(1, persistedSnapshots.size());
 
         List<Object> snapshottedState = MockRaftActor.fromState(persistedSnapshots.get(0).getState());
-        assertEquals("Incorrect Snapshot", Lists.newArrayList(payload0, payload1, payload2, payload3),
-                snapshottedState);
+        assertEquals("Incorrect Snapshot", List.of(payload0, payload1, payload2, payload3), snapshottedState);
 
         //recovery logic starts
         killActor(singleNodeActorRef);
 
-        singleNodeActorRef = newTestRaftActor(persistenceId,
-                ImmutableMap.<String, String>builder().build(), leaderConfigParams);
+        singleNodeActorRef = newTestRaftActor(persistenceId, Map.of(), leaderConfigParams);
 
         singleNodeActorRef.underlyingActor().waitForRecoveryComplete();
 
-        assertEquals("Incorrect State after Recovery ", Lists.newArrayList(payload0, payload1, payload2, payload3,
-                payload4, payload5), singleNodeActorRef.underlyingActor().getState());
-
+        assertEquals("Incorrect State after Recovery ",
+                List.of(payload0, payload1, payload2, payload3, payload4, payload5),
+                singleNodeActorRef.underlyingActor().getState());
     }
 }
index de6a4909c3c46ec5aac42a8455c69c2f2b446644..f197ba29a0e5c2ea0b1a2ce1480aad003f4bc40a 100644 (file)
@@ -11,9 +11,6 @@ import static org.junit.Assert.assertEquals;
 
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
-import java.util.Arrays;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.junit.Before;
@@ -38,15 +35,12 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
     @Before
     public void setup() {
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId)),
                 newFollowerConfigParams());
 
-        Map<String, String> leaderPeerAddresses = new HashMap<>();
-        leaderPeerAddresses.put(follower1Id, follower1Actor.path().toString());
-        leaderPeerAddresses.put(follower2Id, "");
-
         leaderConfigParams = newLeaderConfigParams();
-        leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
+        leaderActor = newTestRaftActor(leaderId, Map.of(follower1Id, follower1Actor.path().toString(), follower2Id, ""),
+            leaderConfigParams);
 
         follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
         leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
@@ -96,7 +90,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
         assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
 
-        assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+        assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
                 leaderActor.underlyingActor().getState());
     }
 
@@ -135,7 +129,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Leader commit index", 4, leaderContext.getCommitIndex());
         assertEquals("Leader last applied", 4, leaderContext.getLastApplied());
 
-        assertEquals("Leader state", Arrays.asList(payload0, payload1, payload2, payload3, payload4),
+        assertEquals("Leader state", List.of(payload0, payload1, payload2, payload3, payload4),
                 leaderActor.underlyingActor().getState());
     }
 
@@ -146,8 +140,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         leader = leaderActor.underlyingActor().getCurrentBehavior();
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
-                newFollowerConfigParams());
+        follower2Actor = newTestRaftActor(follower2Id,
+                Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
         follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
 
         leaderActor.tell(new SetPeerAddress(follower2Id, follower2Actor.path().toString()), ActorRef.noSender());
@@ -168,8 +162,8 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         InMemoryJournal.clear();
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
-                newFollowerConfigParams());
+        follower2Actor = newTestRaftActor(follower2Id,
+                Map.of(leaderId, testActorPath(leaderId)), newFollowerConfigParams());
         TestRaftActor follower2Underlying = follower2Actor.underlyingActor();
         follower2CollectorActor = follower2Underlying.collectorActor();
         follower2Context = follower2Underlying.getRaftActorContext();
@@ -182,7 +176,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         // Wait for the follower to persist the snapshot.
         MessageCollectorActor.expectFirstMatching(follower2CollectorActor, SaveSnapshotSuccess.class);
 
-        final List<MockPayload> expFollowerState = Arrays.asList(payload0, payload1, payload2);
+        final List<MockPayload> expFollowerState = List.of(payload0, payload1, payload2);
 
         assertEquals("Follower commit index", 2, follower2Context.getCommitIndex());
         assertEquals("Follower last applied", 2, follower2Context.getLastApplied());
@@ -191,7 +185,7 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
 
         killActor(follower2Actor);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId)),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId)),
                 newFollowerConfigParams());
 
         follower2Underlying = follower2Actor.underlyingActor();
@@ -204,6 +198,51 @@ public class RecoveryIntegrationTest extends AbstractRaftActorIntegrationTest {
         assertEquals("Follower state", expFollowerState, follower2Underlying.getState());
     }
 
+    @Test
+    public void testRecoveryDeleteEntries() {
+        send2InitialPayloads();
+
+        sendPayloadData(leaderActor, "two");
+
+        // This should trigger a snapshot.
+        sendPayloadData(leaderActor, "three");
+
+        MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+        MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyJournalEntries.class, 2);
+
+        // Disconnect follower from leader
+        killActor(follower1Actor);
+
+        // Send another payloads
+        sendPayloadData(leaderActor, "four");
+        sendPayloadData(leaderActor, "five");
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        // Remove entries started from 4 index
+        leaderActor.underlyingActor().getReplicatedLog().removeFromAndPersist(4);
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 3, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        // Send new payloads
+        final MockPayload payload4 = sendPayloadData(leaderActor, "newFour");
+        final MockPayload payload5 = sendPayloadData(leaderActor, "newFive");
+
+        verifyRaftState(leaderActor, raftState -> {
+            assertEquals("leader journal last index", 5, leaderContext.getReplicatedLog().lastIndex());
+        });
+
+        reinstateLeaderActor();
+
+        final var log = leaderActor.underlyingActor().getReplicatedLog();
+        assertEquals("Leader last index", 5, log.lastIndex());
+        assertEquals(List.of(payload4, payload5), List.of(log.get(4).getData(), log.get(5).getData()));
+    }
+
     private void reinstateLeaderActor() {
         killActor(leaderActor);
 
index 6ae5731800049ec42d252bc22a1a9ed74c47adb9..542828b9d0404eceeca87410426c93a64fa3ba7b 100644 (file)
@@ -10,21 +10,23 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.ArgumentMatchers.argThat;
 import static org.mockito.ArgumentMatchers.same;
-import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
 
 import akka.japi.Procedure;
-import java.util.Collections;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Map;
+import java.util.function.Consumer;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
 import org.mockito.internal.matchers.Same;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
@@ -38,6 +40,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ReplicatedLogImplTest {
     private static final Logger LOG = LoggerFactory.getLogger(RaftActorRecoverySupportTest.class);
 
@@ -52,19 +55,18 @@ public class ReplicatedLogImplTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
         context = new RaftActorContextImpl(null, null, "test",
-                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Collections.<String,String>emptyMap(),
-                configParams, mockPersistence, applyState -> { }, LOG);
+                new ElectionTermImpl(mockPersistence, "test", LOG), -1, -1, Map.of(),
+                configParams, mockPersistence, applyState -> { }, LOG,  MoreExecutors.directExecutor());
     }
 
-    private void verifyPersist(Object message) throws Exception {
+    private void verifyPersist(final Object message) throws Exception {
         verifyPersist(message, new Same(message), true);
     }
 
     @SuppressWarnings({ "unchecked", "rawtypes" })
-    private void verifyPersist(Object message, ArgumentMatcher<?> matcher, boolean async) throws Exception {
+    private void verifyPersist(final Object message, final ArgumentMatcher<?> matcher, final boolean async)
+            throws Exception {
         ArgumentCaptor<Procedure> procedure = ArgumentCaptor.forClass(Procedure.class);
         if (async) {
             verify(mockPersistence).persistAsync(argThat(matcher), procedure.capture());
@@ -75,8 +77,8 @@ public class ReplicatedLogImplTest {
         procedure.getValue().apply(message);
     }
 
-    @SuppressWarnings("unchecked")
     @Test
+    @SuppressWarnings("unchecked")
     public void testAppendAndPersistExpectingNoCapture() throws Exception {
         ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
 
@@ -91,12 +93,12 @@ public class ReplicatedLogImplTest {
         reset(mockPersistence);
 
         ReplicatedLogEntry logEntry2 = new SimpleReplicatedLogEntry(2, 1, new MockPayload("2"));
-        Procedure<ReplicatedLogEntry> mockCallback = mock(Procedure.class);
+        Consumer<ReplicatedLogEntry> mockCallback = mock(Consumer.class);
         log.appendAndPersist(logEntry2, mockCallback, true);
 
         verifyPersist(logEntry2);
 
-        verify(mockCallback).apply(same(logEntry2));
+        verify(mockCallback).accept(same(logEntry2));
 
         assertEquals("size", 2, log.size());
     }
@@ -106,7 +108,7 @@ public class ReplicatedLogImplTest {
     public void testAppendAndPersisWithDuplicateEntry() throws Exception {
         ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
 
-        Procedure<ReplicatedLogEntry> mockCallback = mock(Procedure.class);
+        Consumer<ReplicatedLogEntry> mockCallback = mock(Consumer.class);
         ReplicatedLogEntry logEntry = new SimpleReplicatedLogEntry(1, 1, new MockPayload("1"));
 
         log.appendAndPersist(logEntry, mockCallback, true);
@@ -128,8 +130,6 @@ public class ReplicatedLogImplTest {
     public void testAppendAndPersistExpectingCaptureDueToJournalCount() throws Exception {
         configParams.setSnapshotBatchCount(2);
 
-        doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
-
         ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
 
         final ReplicatedLogEntry logEntry1 = new SimpleReplicatedLogEntry(2, 1, new MockPayload("2"));
@@ -149,8 +149,6 @@ public class ReplicatedLogImplTest {
 
     @Test
     public void testAppendAndPersistExpectingCaptureDueToDataSize() throws Exception {
-        doReturn(1L).when(mockBehavior).getReplicatedToAllIndex();
-
         context.setTotalMemoryRetriever(() -> 100);
 
         ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
@@ -194,7 +192,21 @@ public class ReplicatedLogImplTest {
         verifyNoMoreInteractions(mockPersistence);
     }
 
-    public ArgumentMatcher<DeleteEntries> match(final DeleteEntries actual) {
+    @Test
+    public void testCommitFakeSnapshot() {
+        ReplicatedLog log = ReplicatedLogImpl.newInstance(context);
+
+        log.append(new SimpleReplicatedLogEntry(0, 1, new MockPayload("0")));
+        final int dataSizeAfterFirstPayload = log.dataSize();
+
+        log.snapshotPreCommit(0,1);
+        log.snapshotCommit(false);
+
+        assertEquals(0, log.size());
+        assertEquals(dataSizeAfterFirstPayload, log.dataSize());
+    }
+
+    private static ArgumentMatcher<DeleteEntries> match(final DeleteEntries actual) {
         return other -> actual.getFromIndex() == other.getFromIndex();
     }
 }
index 050b0ddf35d0391a64e47577b391e3d95918a471..70f67425c3951c987c982999b664d1ae17d37625 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft;
 import static org.junit.Assert.assertEquals;
 
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
 import java.util.List;
+import java.util.Map;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -67,15 +67,15 @@ public class ReplicationAndSnapshotsIntegrationTest extends AbstractRaftActorInt
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), followerConfigParams);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), followerConfigParams);
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
index fa974b0ca42eee0b704df9afd1dd9ead437b18c9..f2658957e1e173d66e30839cd3938ade1c57f870 100644 (file)
@@ -8,21 +8,20 @@
 package org.opendaylight.controller.cluster.raft;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorRef;
 import akka.persistence.SaveSnapshotSuccess;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Arrays;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
 import org.eclipse.jdt.annotation.Nullable;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
@@ -61,15 +60,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         InMemoryJournal.addEntry(leaderId, 1, new UpdateElectionTerm(initialTerm, leaderId));
 
         // Create the leader and 2 follower actors.
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), newFollowerConfigParams());
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
 
-        Map<String, String> leaderPeerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        Map<String, String> leaderPeerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, leaderPeerAddresses, leaderConfigParams);
@@ -86,7 +85,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         follower2 = follower2Actor.underlyingActor().getCurrentBehavior();
 
         currentTerm = leaderContext.getTermInformation().getCurrentTerm();
-        assertEquals("Current term > " + initialTerm, true, currentTerm > initialTerm);
+        assertTrue("Current term > " + initialTerm, currentTerm > initialTerm);
 
         leaderCollectorActor = leaderActor.underlyingActor().collectorActor();
         follower1CollectorActor = follower1Actor.underlyingActor().collectorActor();
@@ -95,6 +94,16 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         testLog.info("Leader created and elected");
     }
 
+    private void setupFollower2() {
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
+                follower1Id, testActorPath(follower1Id)), newFollowerConfigParams());
+
+        follower2Context = follower2Actor.underlyingActor().getRaftActorContext();
+        follower2 = follower2Actor.underlyingActor().getCurrentBehavior();
+
+        follower2CollectorActor = follower2Actor.underlyingActor().collectorActor();
+    }
+
     /**
      * Send 2 payload instances with follower 2 lagging then resume the follower and verifies it gets
      * caught up via AppendEntries.
@@ -159,7 +168,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // to catch it up because no snapshotting was done so the follower's next index was present in the log.
         InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
-        Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+        assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
 
         testLog.info("testReplicationsWithLaggingFollowerCaughtUpViaAppendEntries complete");
     }
@@ -244,7 +253,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify the leader did not try to install a snapshot to catch up follower 2.
         InstallSnapshot installSnapshot = MessageCollectorActor.getFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
-        Assert.assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
+        assertNull("Follower 2 received unexpected InstallSnapshot", installSnapshot);
 
         // Ensure there's at least 1 more heartbeat.
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -354,7 +363,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
 
         // Send a server config change to test that the install snapshot includes the server config.
 
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo(leaderId, true),
                 new ServerInfo(follower1Id, false),
                 new ServerInfo(follower2Id, false)));
@@ -383,6 +392,80 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         testLog.info("testLeaderSnapshotWithLaggingFollowerCaughtUpViaInstallSnapshot complete");
     }
 
+    /**
+     * Tests whether the leader reattempts to send a snapshot when a follower crashes before replying with
+     * InstallSnapshotReply after the last chunk has been sent.
+     */
+    @Test
+    public void testLeaderInstallsSnapshotWithRestartedFollowerDuringSnapshotInstallation() throws Exception {
+        testLog.info("testLeaderInstallsSnapshotWithRestartedFollowerDuringSnapshotInstallation starting");
+
+        setup();
+
+        sendInitialPayloadsReplicatedToAllFollowers("zero", "one");
+
+        // Configure follower 2 to drop messages and lag.
+        follower2Actor.stop();
+
+        // Sleep for at least the election timeout interval so follower 2 is deemed inactive by the leader.
+        Uninterruptibles.sleepUninterruptibly(leaderConfigParams.getElectionTimeOutInterval().toMillis() + 5,
+                TimeUnit.MILLISECONDS);
+
+        // Send 5 payloads - the second should cause a leader snapshot.
+        final MockPayload payload2 = sendPayloadData(leaderActor, "two");
+        final MockPayload payload3 = sendPayloadData(leaderActor, "three");
+        final MockPayload payload4 = sendPayloadData(leaderActor, "four");
+        final MockPayload payload5 = sendPayloadData(leaderActor, "five");
+        final MockPayload payload6 = sendPayloadData(leaderActor, "six");
+
+        MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+        // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
+        List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor, ApplyState.class, 5);
+        verifyApplyState(applyStates.get(0), leaderCollectorActor, payload2.toString(), currentTerm, 2, payload2);
+        verifyApplyState(applyStates.get(2), leaderCollectorActor, payload4.toString(), currentTerm, 4, payload4);
+        verifyApplyState(applyStates.get(4), leaderCollectorActor, payload6.toString(), currentTerm, 6, payload6);
+
+        MessageCollectorActor.clearMessages(leaderCollectorActor);
+
+        testLog.info("testLeaderInstallsSnapshotWithRestartedFollowerDuringSnapshotInstallation: "
+                + "sending 1 more payload to trigger second snapshot");
+
+        // Send another payload to trigger a second leader snapshot.
+        MockPayload payload7 = sendPayloadData(leaderActor, "seven");
+
+        MessageCollectorActor.expectFirstMatching(leaderCollectorActor, SaveSnapshotSuccess.class);
+
+
+        ApplyState applyState = MessageCollectorActor.expectFirstMatching(leaderCollectorActor, ApplyState.class);
+        verifyApplyState(applyState, leaderCollectorActor, payload7.toString(), currentTerm, 7, payload7);
+
+        // Verify follower 1 applies each log entry.
+        applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 6);
+        verifyApplyState(applyStates.get(0), null, null, currentTerm, 2, payload2);
+        verifyApplyState(applyStates.get(2), null, null, currentTerm, 4, payload4);
+        verifyApplyState(applyStates.get(5), null, null, currentTerm, 7, payload7);
+
+        leaderActor.underlyingActor()
+                .startDropMessages(InstallSnapshotReply.class, reply -> reply.getChunkIndex() == 5);
+
+        setupFollower2();
+
+        MessageCollectorActor.expectMatching(follower2CollectorActor, InstallSnapshot.class, 1);
+
+        follower2Actor.stop();
+
+        // need to get rid of persistence for follower2
+        InMemorySnapshotStore.clearSnapshotsFor(follower2Id);
+
+        leaderActor.underlyingActor().stopDropMessages(InstallSnapshotReply.class);
+
+        MessageCollectorActor.clearMessages(follower2CollectorActor);
+        setupFollower2();
+
+        MessageCollectorActor.expectMatching(follower2CollectorActor, SaveSnapshotSuccess.class, 1);
+    }
+
     /**
      * Send payloads with follower 2 lagging with the last payload having a large enough size to trigger a
      * leader snapshot such that the leader trims its log from the last applied index.. Follower 2's log will
@@ -424,7 +507,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify a snapshot is not triggered.
         CaptureSnapshot captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor,
                 CaptureSnapshot.class);
-        Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+        assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
 
         expSnapshotState.add(payload1);
 
@@ -497,7 +580,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         verifyApplyState(applyState, leaderCollectorActor, payload3.toString(), currentTerm, 3, payload3);
 
         captureSnapshot = MessageCollectorActor.getFirstMatching(leaderCollectorActor, CaptureSnapshot.class);
-        Assert.assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
+        assertNull("Leader received unexpected CaptureSnapshot", captureSnapshot);
 
         // Verify the follower 1 applies the state.
         applyState = MessageCollectorActor.expectFirstMatching(follower1CollectorActor, ApplyState.class);
@@ -529,8 +612,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
     /**
      * Resume the lagging follower 2 and verify it receives an install snapshot from the leader.
      */
-    private void verifyInstallSnapshotToLaggingFollower(long lastAppliedIndex,
-            @Nullable ServerConfigurationPayload expServerConfig) {
+    private void verifyInstallSnapshotToLaggingFollower(final long lastAppliedIndex,
+            final @Nullable ServerConfigurationPayload expServerConfig) {
         testLog.info("verifyInstallSnapshotToLaggingFollower starting");
 
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -549,15 +632,15 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // This is OK - the next snapshot should delete it. In production, even if the system restarted
         // before another snapshot, they would both get applied which wouldn't hurt anything.
         List<Snapshot> persistedSnapshots = InMemorySnapshotStore.getSnapshots(leaderId, Snapshot.class);
-        Assert.assertTrue("Expected at least 1 persisted snapshots", persistedSnapshots.size() > 0);
+        assertFalse("Expected at least 1 persisted snapshots", persistedSnapshots.isEmpty());
         Snapshot persistedSnapshot = persistedSnapshots.get(persistedSnapshots.size() - 1);
         verifySnapshot("Persisted", persistedSnapshot, currentTerm, lastAppliedIndex, currentTerm, lastAppliedIndex);
         List<ReplicatedLogEntry> unAppliedEntry = persistedSnapshot.getUnAppliedEntries();
         assertEquals("Persisted Snapshot getUnAppliedEntries size", 0, unAppliedEntry.size());
 
         int snapshotSize = SerializationUtils.serialize(persistedSnapshot.getState()).length;
-        final int expTotalChunks = snapshotSize / SNAPSHOT_CHUNK_SIZE
-                + (snapshotSize % SNAPSHOT_CHUNK_SIZE > 0 ? 1 : 0);
+        final int expTotalChunks = snapshotSize / MAXIMUM_MESSAGE_SLICE_SIZE
+                + (snapshotSize % MAXIMUM_MESSAGE_SLICE_SIZE > 0 ? 1 : 0);
 
         InstallSnapshot installSnapshot = MessageCollectorActor.expectFirstMatching(follower2CollectorActor,
                 InstallSnapshot.class);
@@ -576,7 +659,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
             assertEquals("InstallSnapshotReply getTerm", currentTerm, installSnapshotReply.getTerm());
             assertEquals("InstallSnapshotReply getChunkIndex", index++, installSnapshotReply.getChunkIndex());
             assertEquals("InstallSnapshotReply getFollowerId", follower2Id, installSnapshotReply.getFollowerId());
-            assertEquals("InstallSnapshotReply isSuccess", true, installSnapshotReply.isSuccess());
+            assertTrue("InstallSnapshotReply isSuccess", installSnapshotReply.isSuccess());
         }
 
         // Verify follower 2 applies the snapshot.
@@ -599,18 +682,18 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         verifyLeadersTrimmedLog(lastAppliedIndex);
 
         if (expServerConfig != null) {
-            Set<ServerInfo> expServerInfo = new HashSet<>(expServerConfig.getServerConfig());
+            Set<ServerInfo> expServerInfo = Set.copyOf(expServerConfig.getServerConfig());
             assertEquals("Leader snapshot server config", expServerInfo,
-                    new HashSet<>(persistedSnapshot.getServerConfiguration().getServerConfig()));
+                Set.copyOf(persistedSnapshot.getServerConfiguration().getServerConfig()));
 
             assertEquals("Follower 2 snapshot server config", expServerInfo,
-                    new HashSet<>(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
+                Set.copyOf(applySnapshot.getSnapshot().getServerConfiguration().getServerConfig()));
 
             ServerConfigurationPayload follower2ServerConfig = follower2Context.getPeerServerInfo(true);
             assertNotNull("Follower 2 server config is null", follower2ServerConfig);
 
             assertEquals("Follower 2 server config", expServerInfo,
-                    new HashSet<>(follower2ServerConfig.getServerConfig()));
+                Set.copyOf(follower2ServerConfig.getServerConfig()));
         }
 
         MessageCollectorActor.clearMessages(leaderCollectorActor);
@@ -681,8 +764,9 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         // Verify the leaders's persisted journal log - it should only contain the last 2 ReplicatedLogEntries
         // added after the snapshot as the persisted journal should've been purged to the snapshot
         // sequence number.
-        verifyPersistedJournal(leaderId, Arrays.asList(new SimpleReplicatedLogEntry(5, currentTerm, payload5),
-                new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
+        verifyPersistedJournal(leaderId, List.of(
+            new SimpleReplicatedLogEntry(5, currentTerm, payload5),
+            new SimpleReplicatedLogEntry(6, currentTerm, payload6)));
 
         // Verify the leaders's persisted journal contains an ApplyJournalEntries for at least the last entry index.
         List<ApplyJournalEntries> persistedApplyJournalEntries =
@@ -695,8 +779,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
             }
         }
 
-        Assert.assertTrue(String.format("ApplyJournalEntries with index %d not found in leader's persisted journal", 6),
-                found);
+        assertTrue("ApplyJournalEntries with index 6 not found in leader's persisted journal", found);
 
         // Verify follower 1 applies the 3 log entries.
         applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, 3);
@@ -727,8 +810,8 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
     /**
      * Kill the leader actor, reinstate it and verify the recovered journal.
      */
-    private void verifyLeaderRecoveryAfterReinstatement(long lastIndex, long snapshotIndex,
-            long firstJournalEntryIndex) {
+    private void verifyLeaderRecoveryAfterReinstatement(final long lastIndex, final long snapshotIndex,
+            final long firstJournalEntryIndex) {
         testLog.info("verifyLeaderRecoveryAfterReinstatement starting: lastIndex: {}, snapshotIndex: {}, "
             + "firstJournalEntryIndex: {}", lastIndex, snapshotIndex, firstJournalEntryIndex);
 
@@ -761,8 +844,7 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         testLog.info("verifyLeaderRecoveryAfterReinstatement ending");
     }
 
-    private void sendInitialPayloadsReplicatedToAllFollowers(String... data) {
-
+    private void sendInitialPayloadsReplicatedToAllFollowers(final String... data) {
         // Send the payloads.
         for (String d: data) {
             expSnapshotState.add(sendPayloadData(leaderActor, d));
@@ -771,25 +853,27 @@ public class ReplicationAndSnapshotsWithLaggingFollowerIntegrationTest extends A
         int numEntries = data.length;
 
         // Verify the leader got consensus and applies each log entry even though follower 2 didn't respond.
-        List<ApplyState> applyStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
-                ApplyState.class, numEntries);
+        final var leaderStates = MessageCollectorActor.expectMatching(leaderCollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(leaderStates.get(i), leaderCollectorActor, payload.toString(), currentTerm, i, payload);
         }
 
         // Verify follower 1 applies each log entry.
-        applyStates = MessageCollectorActor.expectMatching(follower1CollectorActor, ApplyState.class, numEntries);
+        final var follower1States = MessageCollectorActor.expectMatching(follower1CollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(follower1States.get(i), null, null, currentTerm, i, payload);
         }
 
         // Verify follower 2 applies each log entry.
-        applyStates = MessageCollectorActor.expectMatching(follower2CollectorActor, ApplyState.class, numEntries);
+        final var follower2States = MessageCollectorActor.expectMatching(follower2CollectorActor,
+            ApplyState.class, numEntries);
         for (int i = 0; i < expSnapshotState.size(); i++) {
-            MockPayload payload = expSnapshotState.get(i);
-            verifyApplyState(applyStates.get(i), null, null, currentTerm, i, payload);
+            final MockPayload payload = expSnapshotState.get(i);
+            verifyApplyState(follower2States.get(i), null, null, currentTerm, i, payload);
         }
 
         // Ensure there's at least 1 more heartbeat.
index fda95eaa1d2f4a254240fe24aea6456fb32de767..dcbc8179a6e4e62dcd93cd31a2c4166b8743d10b 100644 (file)
@@ -9,8 +9,8 @@ package org.opendaylight.controller.cluster.raft;
 
 import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching;
 
-import com.google.common.collect.ImmutableMap;
 import java.util.List;
+import java.util.Map;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
@@ -29,19 +29,19 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct
 
         // Create the leader and 2 follower actors.
 
-        snapshotChunkSize = 20;
+        maximumMessageSliceSize = 20;
 
         DefaultConfigParamsImpl followerConfigParams = newFollowerConfigParams();
         followerConfigParams.setSnapshotBatchCount(snapshotBatchCount);
-        follower1Actor = newTestRaftActor(follower1Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower1Actor = newTestRaftActor(follower1Id, Map.of(leaderId, testActorPath(leaderId),
                 follower2Id, testActorPath(follower2Id)), followerConfigParams);
 
-        follower2Actor = newTestRaftActor(follower2Id, ImmutableMap.of(leaderId, testActorPath(leaderId),
+        follower2Actor = newTestRaftActor(follower2Id, Map.of(leaderId, testActorPath(leaderId),
                 follower1Id, testActorPath(follower1Id)), followerConfigParams);
 
-        peerAddresses = ImmutableMap.<String, String>builder()
-                .put(follower1Id, follower1Actor.path().toString())
-                .put(follower2Id, follower2Actor.path().toString()).build();
+        peerAddresses = Map.of(
+                follower1Id, follower1Actor.path().toString(),
+                follower2Id, follower2Actor.path().toString());
 
         leaderConfigParams = newLeaderConfigParams();
         leaderActor = newTestRaftActor(leaderId, peerAddresses, leaderConfigParams);
@@ -58,11 +58,11 @@ public class ReplicationWithSlicedPayloadIntegrationTest extends AbstractRaftAct
 
         // Send a large payload that exceeds the size threshold and needs to be sliced.
 
-        MockPayload largePayload = sendPayloadData(leaderActor, "large", snapshotChunkSize + 1);
+        MockPayload largePayload = sendPayloadData(leaderActor, "large", maximumMessageSliceSize + 1);
 
         // Then send a small payload that does not need to be sliced.
 
-        MockPayload smallPayload = sendPayloadData(leaderActor, "normal", snapshotChunkSize - 1);
+        MockPayload smallPayload = sendPayloadData(leaderActor, "normal", maximumMessageSliceSize - 1);
 
         final List<ApplyState> leaderApplyState = expectMatching(leaderCollectorActor, ApplyState.class, 2);
         verifyApplyState(leaderApplyState.get(0), leaderCollectorActor,
index 0dc33130dae32b491b7acaacf42042fc36ce7b49..aa4a44b97c13803d543ae553036e77ad72b7541c 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft;
 
 import static org.junit.Assert.assertArrayEquals;
@@ -26,15 +25,16 @@ import static org.mockito.Mockito.verify;
 import akka.actor.ActorRef;
 import akka.persistence.SnapshotSelectionCriteria;
 import java.io.OutputStream;
-import java.util.Arrays;
+import java.util.List;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
 import org.opendaylight.controller.cluster.raft.SnapshotManager.LastAppliedTermInformationReader;
@@ -48,6 +48,7 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.slf4j.LoggerFactory;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class SnapshotManagerTest extends AbstractActorTest {
 
     @Mock
@@ -79,8 +80,6 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         doReturn(false).when(mockRaftActorContext).hasFollowers();
         doReturn(mockConfigParams).when(mockRaftActorContext).getConfigParams();
         doReturn(10L).when(mockConfigParams).getSnapshotBatchCount();
@@ -114,7 +113,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
     @Test
     public void testConstruction() {
-        assertEquals(false, snapshotManager.isCapturing());
+        assertFalse(snapshotManager.isCapturing());
     }
 
     @SuppressWarnings({ "unchecked", "rawtypes" })
@@ -125,7 +124,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         snapshotManager.captureToInstall(new SimpleReplicatedLogEntry(0, 1,
                 new MockRaftActorContext.MockPayload()), 0, "follower-1");
 
-        assertEquals(true, snapshotManager.isCapturing());
+        assertTrue(snapshotManager.isCapturing());
 
         ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
         verify(mockProcedure).accept(outputStream.capture());
@@ -155,7 +154,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertTrue(capture);
 
-        assertEquals(true, snapshotManager.isCapturing());
+        assertTrue(snapshotManager.isCapturing());
 
         ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
         verify(mockProcedure).accept(outputStream.capture());
@@ -185,7 +184,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertTrue(capture);
 
-        assertEquals(true, snapshotManager.isCapturing());
+        assertTrue(snapshotManager.isCapturing());
 
         ArgumentCaptor<Optional> outputStream = ArgumentCaptor.forClass(Optional.class);
         verify(mockProcedure).accept(outputStream.capture());
@@ -194,12 +193,12 @@ public class SnapshotManagerTest extends AbstractActorTest {
         CaptureSnapshot captureSnapshot = snapshotManager.getCaptureSnapshot();
 
         // LastIndex and LastTerm are picked up from the lastLogEntry
-        assertEquals(-1L, captureSnapshot.getLastIndex());
-        assertEquals(-1L, captureSnapshot.getLastTerm());
+        assertEquals(0, captureSnapshot.getLastIndex());
+        assertEquals(0, captureSnapshot.getLastTerm());
 
         // Since the actor does not have any followers (no peer addresses) lastApplied will be from lastLogEntry
-        assertEquals(-1L, captureSnapshot.getLastAppliedIndex());
-        assertEquals(-1L, captureSnapshot.getLastAppliedTerm());
+        assertEquals(0, captureSnapshot.getLastAppliedIndex());
+        assertEquals(0, captureSnapshot.getLastAppliedTerm());
 
         //
         assertEquals(-1L, captureSnapshot.getReplicatedToAllIndex());
@@ -216,7 +215,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertFalse(capture);
 
-        assertEquals(false, snapshotManager.isCapturing());
+        assertFalse(snapshotManager.isCapturing());
 
         verify(mockProcedure).accept(any());
     }
@@ -257,7 +256,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
                 8L, 2L, new MockRaftActorContext.MockPayload());
 
         doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
-        doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
+        doReturn(List.of(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
 
         // when replicatedToAllIndex = -1
         snapshotManager.capture(lastLogEntry, -1);
@@ -275,7 +274,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
         assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
         assertEquals("getState", snapshotState, snapshot.getState());
-        assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
+        assertEquals("getUnAppliedEntries", List.of(lastLogEntry), snapshot.getUnAppliedEntries());
         assertEquals("electionTerm", mockElectionTerm.getCurrentTerm(), snapshot.getElectionTerm());
         assertEquals("electionVotedFor", mockElectionTerm.getVotedFor(), snapshot.getElectionVotedFor());
 
@@ -287,6 +286,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
         doReturn(6L).when(mockReplicatedLog).getSnapshotTerm();
         ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+        doReturn(null).when(mockReplicatedLog).get(0);
         doReturn(replicatedLogEntry).when(mockReplicatedLog).get(9);
         doReturn(6L).when(replicatedLogEntry).getTerm();
         doReturn(9L).when(replicatedLogEntry).getIndex();
@@ -340,6 +340,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         long replicatedToAllIndex = 1;
         ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+        doReturn(null).when(mockReplicatedLog).get(0);
         doReturn(replicatedLogEntry).when(mockReplicatedLog).get(replicatedToAllIndex);
         doReturn(6L).when(replicatedLogEntry).getTerm();
         doReturn(replicatedToAllIndex).when(replicatedLogEntry).getIndex();
@@ -376,11 +377,11 @@ public class SnapshotManagerTest extends AbstractActorTest {
         Optional<OutputStream> installSnapshotStream = installSnapshotStreamCapture.getValue();
         assertEquals("isPresent", true, installSnapshotStream.isPresent());
 
-        installSnapshotStream.get().write(snapshotState.getBytes());
+        installSnapshotStream.orElseThrow().write(snapshotState.getBytes());
 
         snapshotManager.persist(snapshotState, installSnapshotStream, Runtime.getRuntime().totalMemory());
 
-        assertEquals(true, snapshotManager.isCapturing());
+        assertTrue(snapshotManager.isCapturing());
 
         verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
 
@@ -433,11 +434,11 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         snapshotManager.persist(ByteState.empty(), Optional.empty(), Runtime.getRuntime().totalMemory());
 
-        assertEquals(true, snapshotManager.isCapturing());
+        assertTrue(snapshotManager.isCapturing());
 
         snapshotManager.commit(100L, 1234L);
 
-        assertEquals(false, snapshotManager.isCapturing());
+        assertFalse(snapshotManager.isCapturing());
 
         verify(mockReplicatedLog).snapshotCommit();
 
@@ -448,7 +449,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         verify(mockDataPersistenceProvider).deleteSnapshots(criteriaCaptor.capture());
 
-        assertEquals(100L, criteriaCaptor.getValue().maxSequenceNr());
+        assertEquals(Long.MAX_VALUE, criteriaCaptor.getValue().maxSequenceNr());
         assertEquals(1233L, criteriaCaptor.getValue().maxTimestamp());
 
         MessageCollectorActor.expectFirstMatching(actorRef, SnapshotComplete.class);
@@ -560,7 +561,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         assertEquals("return index", 10L, retIndex);
 
         verify(mockReplicatedLog).snapshotPreCommit(10, 5);
-        verify(mockReplicatedLog).snapshotCommit();
+        verify(mockReplicatedLog).snapshotCommit(false);
 
         verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
     }
@@ -569,16 +570,11 @@ public class SnapshotManagerTest extends AbstractActorTest {
     public void testTrimLogWhenLastAppliedNotSet() {
         doReturn(-1L).when(mockRaftActorContext).getLastApplied();
 
-        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
-        doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
-        doReturn(5L).when(replicatedLogEntry).getTerm();
-
         long retIndex = snapshotManager.trimLog(10);
         assertEquals("return index", -1L, retIndex);
 
         verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
-        verify(mockReplicatedLog, never()).snapshotCommit();
+        verify(mockReplicatedLog, never()).snapshotCommit(false);
 
         verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
     }
@@ -587,16 +583,11 @@ public class SnapshotManagerTest extends AbstractActorTest {
     public void testTrimLogWhenLastAppliedZero() {
         doReturn(0L).when(mockRaftActorContext).getLastApplied();
 
-        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
-        doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
-        doReturn(5L).when(replicatedLogEntry).getTerm();
-
         long retIndex = snapshotManager.trimLog(10);
         assertEquals("return index", -1L, retIndex);
 
         verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
-        verify(mockReplicatedLog, never()).snapshotCommit();
+        verify(mockReplicatedLog, never()).snapshotCommit(false);
 
         verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
     }
@@ -611,7 +602,7 @@ public class SnapshotManagerTest extends AbstractActorTest {
         assertEquals("return index", -1L, retIndex);
 
         verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
-        verify(mockReplicatedLog, never()).snapshotCommit();
+        verify(mockReplicatedLog, never()).snapshotCommit(false);
 
         // Trim index is greater than replicatedToAllIndex so should update it.
         verify(mockRaftActorBehavior).setReplicatedToAllIndex(10L);
@@ -624,19 +615,12 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertTrue(capture);
 
-        assertEquals(true, snapshotManager.isCapturing());
-
-        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
-        doReturn(20L).when(mockRaftActorContext).getLastApplied();
-        doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
-        doReturn(5L).when(replicatedLogEntry).getTerm();
+        assertTrue(snapshotManager.isCapturing());
 
         snapshotManager.trimLog(10);
 
         verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
-        verify(mockReplicatedLog, never()).snapshotCommit();
-
+        verify(mockReplicatedLog, never()).snapshotCommit(false);
     }
 
     @Test
@@ -646,19 +630,12 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertTrue(capture);
 
-        assertEquals(true, snapshotManager.isCapturing());
-
-        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
-        doReturn(20L).when(mockRaftActorContext).getLastApplied();
-        doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(replicatedLogEntry).when(mockReplicatedLog).get(10);
-        doReturn(5L).when(replicatedLogEntry).getTerm();
+        assertTrue(snapshotManager.isCapturing());
 
         snapshotManager.trimLog(10);
 
         verify(mockReplicatedLog, never()).snapshotPreCommit(10, 5);
         verify(mockReplicatedLog, never()).snapshotCommit();
-
     }
 
     @Test
index c71c8b7de4fd8f204aa7667eea5103644527d0d5..96f4fe8c6e5172126291ed7358526b403d4a25f2 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.raft;
 
+import static org.junit.Assert.assertTrue;
+
 import akka.actor.Actor;
 import akka.actor.ActorIdentity;
 import akka.actor.ActorRef;
@@ -22,10 +24,10 @@ import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.LinkedList;
+import java.time.Duration;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -48,10 +50,10 @@ public class TestActorFactory implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(TestActorFactory.class);
 
     private final ActorSystem system;
-    List<ActorRef> createdActors = new LinkedList<>();
+    private final List<ActorRef> createdActors = new ArrayList<>();
     private static int actorCount = 1;
 
-    public TestActorFactory(ActorSystem system) {
+    public TestActorFactory(final ActorSystem system) {
         this.system = system;
     }
 
@@ -61,7 +63,7 @@ public class TestActorFactory implements AutoCloseable {
      * @param props the actor Props
      * @return the ActorRef
      */
-    public ActorRef createActor(Props props) {
+    public ActorRef createActor(final Props props) {
         ActorRef actorRef = system.actorOf(props);
         return addActor(actorRef, true);
     }
@@ -73,7 +75,7 @@ public class TestActorFactory implements AutoCloseable {
      * @param actorId name of actor
      * @return the ActorRef
      */
-    public ActorRef createActor(Props props, String actorId) {
+    public ActorRef createActor(final Props props, final String actorId) {
         ActorRef actorRef = system.actorOf(props, actorId);
         return addActor(actorRef, true);
     }
@@ -85,7 +87,7 @@ public class TestActorFactory implements AutoCloseable {
      * @param actorId name of actor
      * @return the ActorRef
      */
-    public ActorRef createActorNoVerify(Props props, String actorId) {
+    public ActorRef createActorNoVerify(final Props props, final String actorId) {
         ActorRef actorRef = system.actorOf(props, actorId);
         return addActor(actorRef, false);
     }
@@ -99,7 +101,7 @@ public class TestActorFactory implements AutoCloseable {
      * @return the ActorRef
      */
     @SuppressWarnings("unchecked")
-    public <T extends Actor> TestActorRef<T> createTestActor(Props props, String actorId) {
+    public <T extends Actor> TestActorRef<T> createTestActor(final Props props, final String actorId) {
         InvalidActorNameException lastError = null;
         for (int i = 0; i < 10; i++) {
             try {
@@ -122,12 +124,12 @@ public class TestActorFactory implements AutoCloseable {
      * @return the TestActorRef
      */
     @SuppressWarnings("unchecked")
-    public <T extends Actor> TestActorRef<T> createTestActor(Props props) {
+    public <T extends Actor> TestActorRef<T> createTestActor(final Props props) {
         TestActorRef<T> actorRef = TestActorRef.create(system, props);
         return (TestActorRef<T>) addActor(actorRef, true);
     }
 
-    private <T extends ActorRef> ActorRef addActor(T actorRef, boolean verify) {
+    private <T extends ActorRef> ActorRef addActor(final T actorRef, final boolean verify) {
         createdActors.add(actorRef);
         if (verify) {
             verifyActorReady(actorRef);
@@ -137,7 +139,7 @@ public class TestActorFactory implements AutoCloseable {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void verifyActorReady(ActorRef actorRef) {
+    private void verifyActorReady(final ActorRef actorRef) {
         // Sometimes we see messages go to dead letters soon after creation - it seems the actor isn't quite
         // in a state yet to receive messages or isn't actually created yet. This seems to happen with
         // actorSelection so, to alleviate it, we use an actorSelection and send an Identify message with
@@ -151,7 +153,7 @@ public class TestActorFactory implements AutoCloseable {
                 ActorSelection actorSelection = system.actorSelection(actorRef.path().toString());
                 Future<Object> future = Patterns.ask(actorSelection, new Identify(""), timeout);
                 ActorIdentity reply = (ActorIdentity)Await.result(future, timeout.duration());
-                Assert.assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
+                assertTrue("Identify returned non-present", reply.getActorRef().isPresent());
                 return;
             } catch (Exception | AssertionError e) {
                 Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
@@ -168,26 +170,26 @@ public class TestActorFactory implements AutoCloseable {
      * @param prefix the name prefix
      * @return the actor name
      */
-    public String generateActorId(String prefix) {
+    public String generateActorId(final String prefix) {
         return prefix + actorCount++;
     }
 
-    public void killActor(ActorRef actor, TestKit kit) {
+    public void killActor(final ActorRef actor, final TestKit kit) {
         killActor(actor, kit, true);
     }
 
-    private void killActor(ActorRef actor, TestKit kit, boolean remove) {
+    private void killActor(final ActorRef actor, final TestKit kit, final boolean remove) {
         LOG.info("Killing actor {}", actor);
         kit.watch(actor);
         actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        kit.expectTerminated(kit.duration("5 seconds"), actor);
+        kit.expectTerminated(Duration.ofSeconds(5), actor);
 
         if (remove) {
             createdActors.remove(actor);
         }
     }
 
-    public String createTestActorPath(String actorId) {
+    public String createTestActorPath(final String actorId) {
         return "akka://test/user/" + actorId;
     }
 
index 26cdb22d8ccc42569dc04bddf4d5ad9da7bb6cb9..2a58dd1d4f15aa0be51c1dfe536de028fe407a2c 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.base.messages;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,10 +19,11 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class TimeoutNowTest {
-
     @Test
     public void test() {
-        TimeoutNow cloned = (TimeoutNow) SerializationUtils.clone(TimeoutNow.INSTANCE);
+        final var bytes = SerializationUtils.serialize(TimeoutNow.INSTANCE);
+        assertEquals(86, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("Cloned instance", TimeoutNow.INSTANCE, cloned);
     }
 }
index 7695d05133e6cd18e4d97a35282d9931995a2f01..3497840b386d650600d7926fac0d421d28fdb1d8 100644 (file)
@@ -32,12 +32,12 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
index 6dd5336716c93419ee39ae5caa5d7ef76f3cbcd7..b04c9e39715acbb3566e2c5f11e77a0f132cbcf4 100644 (file)
@@ -15,6 +15,7 @@ import akka.actor.ActorRef;
 import akka.dispatch.Dispatchers;
 import akka.testkit.TestActorRef;
 import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -178,7 +179,7 @@ public class CandidateTest extends AbstractRaftActorBehaviorTest<Candidate> {
         Mockito.doReturn(1L).when(mockElectionTerm).getCurrentTerm();
         RaftActorContext raftActorContext = new RaftActorContextImpl(candidateActor, candidateActor.actorContext(),
                 "candidate", mockElectionTerm, -1, -1, setupPeers(4), new DefaultConfigParamsImpl(),
-                new NonPersistentDataProvider(Runnable::run), applyState -> { }, LOG);
+                new NonPersistentDataProvider(Runnable::run), applyState -> { }, LOG,  MoreExecutors.directExecutor());
         raftActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
         raftActorContext.getPeerInfo("peer1").setVotingState(VotingState.NON_VOTING);
         raftActorContext.getPeerInfo("peer4").setVotingState(VotingState.NON_VOTING);
index d481e6f1491ddaf82e11d7f8b000dab5b3d301b4..52985fd3c134ef82cdf3637c4b966a10b961049c 100644 (file)
@@ -15,6 +15,7 @@ import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
@@ -148,15 +149,19 @@ public class DelayedMessagesElectionScenarioTest extends AbstractLeaderElectionS
         // should switch to Candidate and send out RequestVote messages. Set member 1 and 3 actors
         // to capture RequestVote but not to forward to the behavior just yet as we want to
         // control the order of RequestVote messages to member 1 and 3.
-
-        member1Actor.dropMessagesToBehavior(RequestVote.class);
-
         member2Actor.expectBehaviorStateChange();
 
+        // member 1 and member 3 may reach consensus to consider leader's initial Noop entry as committed, hence
+        // leader would elicit this information to member 2.
+        // We do not want that, as member 2 would respond to that request either before it bumps or after it bumps its
+        // term -- if it would see that message post-bump, it would leak term 2 back to member 1, hence leader would
+        // know about it.
+        member2Actor.dropMessagesToBehavior(AppendEntries.class);
+
+        member1Actor.dropMessagesToBehavior(RequestVote.class);
         member3Actor.dropMessagesToBehavior(RequestVote.class);
 
         member2ActorRef.tell(TimeoutNow.INSTANCE, ActorRef.noSender());
-
         member1Actor.waitForExpectedMessages(RequestVote.class);
         member3Actor.waitForExpectedMessages(RequestVote.class);
 
index a9305a6862f32f3737a194ad6ef601f6e77baff0..d2aa7d013cc5ac374291192d841bbbf81c9b7ab9 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class FollowerIdentifierTest {
-
     @Test
     public void testSerialization() {
-        FollowerIdentifier expected = new FollowerIdentifier("follower1");
-        FollowerIdentifier cloned = (FollowerIdentifier) SerializationUtils.clone(expected);
+        final var expected = new FollowerIdentifier("follower1");
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(87, bytes.length);
+        final var cloned = (FollowerIdentifier) SerializationUtils.deserialize(bytes);
         assertEquals("cloned", expected, cloned);
     }
 }
index 2ba06d6925efbde5933bc042505d1f39c0cff2ab..8006d5a6ac906ed6a11598b59f5c6aba6269f109 100644 (file)
@@ -5,9 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -25,22 +25,16 @@ import akka.dispatch.Dispatchers;
 import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.base.Optional;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.MockRaftActor;
@@ -158,7 +152,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
                 .getElectionTimeOutInterval().toMillis() - 100, TimeUnit.MILLISECONDS);
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
                 -1, -1, (short) 1));
 
         Uninterruptibles.sleepUninterruptibly(130, TimeUnit.MILLISECONDS);
@@ -167,7 +161,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         Uninterruptibles.sleepUninterruptibly(context.getConfigParams()
                 .getElectionTimeOutInterval().toMillis() - 150, TimeUnit.MILLISECONDS);
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(),
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(),
                 -1, -1, (short) 1));
 
         Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
@@ -222,10 +216,9 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().append(newReplicatedLogEntry(1,100, "bar"));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
-        Assert.assertEquals(1, context.getReplicatedLog().size());
+        assertEquals(1, context.getReplicatedLog().size());
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -247,8 +240,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -273,8 +265,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().append(newReplicatedLogEntry(1, 100, "bar"));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -298,8 +289,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().clear(0,2);
         context.getReplicatedLog().setSnapshotIndex(100);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 101, 100, (short) 0);
@@ -324,8 +314,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().clear(0,2);
         context.getReplicatedLog().setSnapshotIndex(100);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 105, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 105, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", -1, -1, entries, 105, 100, (short) 0);
@@ -347,8 +336,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -368,7 +356,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setCommitIndex(101);
         setLastLogEntry(context, 1, 101, new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
@@ -395,8 +383,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -416,8 +403,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 100,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // leader-2 is becoming the leader now and it says the commitIndex is 45
         appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
@@ -435,8 +421,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -457,8 +442,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 101,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         appendEntries = new AppendEntries(2, "leader-1", 101, 1, entries, 102, 101, (short)0);
@@ -475,8 +459,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 100,
                 new MockRaftActorContext.MockPayload(""));
 
-        entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // leader-2 is becoming the leader now and it says the commitIndex is 45
         appendEntries = new AppendEntries(2, "leader-2", 45, 1, entries, 46, 100, (short)0);
@@ -505,8 +488,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
                 new MockRaftActorContext.MockPayload(""));
         context.getReplicatedLog().setSnapshotIndex(99);
 
-        List<ReplicatedLogEntry> entries = Arrays.<ReplicatedLogEntry>asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader-1", 100, 1, entries, 101, 100, (short)0);
@@ -528,13 +510,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         MockRaftActorContext context = createActorContext();
 
-        AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, Collections.emptyList(), 101, -1, (short)0);
+        AppendEntries appendEntries = new AppendEntries(2, "leader", 0, 2, List.of(), 101, -1, (short)0);
 
         follower = createBehavior(context);
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor,
                 AppendEntriesReply.class);
@@ -551,13 +533,13 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.getReplicatedLog().setSnapshotIndex(4);
         context.getReplicatedLog().setSnapshotTerm(3);
 
-        AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, Collections.emptyList(), 8, -1, (short)0);
+        AppendEntries appendEntries = new AppendEntries(3, "leader", 1, 3, List.of(), 8, -1, (short)0);
 
         follower = createBehavior(context);
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
 
@@ -588,9 +570,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 3, "three"));
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(1, 3, "three"), newReplicatedLogEntry(1, 4, "four"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -604,7 +585,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         assertEquals("Next index", 5, log.last().getIndex() + 1);
         assertEquals("Entry 3", entries.get(0), log.get(3));
@@ -640,9 +621,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(2, 2, "two-1"));
-        entries.add(newReplicatedLogEntry(2, 3, "three"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -654,7 +634,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         // The entry at index 2 will be found out-of-sync with the leader
         // and will be removed
@@ -691,9 +671,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(2, 2, "two-1"));
-        entries.add(newReplicatedLogEntry(2, 3, "three"));
+        List<ReplicatedLogEntry> entries = List.of(
+            newReplicatedLogEntry(2, 2, "two-1"), newReplicatedLogEntry(2, 3, "three"));
 
         // Send appendEntries with the same term as was set on the receiver
         // before the new behavior was created (1 in this case)
@@ -706,7 +685,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(2, false, context.getId(), 1, 2, true);
     }
@@ -726,8 +705,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, -1, (short)0);
 
@@ -735,7 +713,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(1, false, context.getId(), 1, 2);
     }
@@ -756,7 +734,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Send the last entry again.
-        List<ReplicatedLogEntry> entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 1, "one"));
 
         follower = createBehavior(context);
 
@@ -769,7 +747,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send the last entry again and also a new one.
 
-        entries = Arrays.asList(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
+        entries = List.of(newReplicatedLogEntry(1, 1, "one"), newReplicatedLogEntry(1, 2, "two"));
 
         MessageCollectorActor.clearMessages(leaderActor);
         follower.handleMessage(leaderActor, new AppendEntries(1, "leader", 0, 1, entries, 2, -1, (short)0));
@@ -797,8 +775,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         context.setReplicatedLog(log);
 
         // Prepare the entries to be sent with AppendEntries
-        List<ReplicatedLogEntry> entries = new ArrayList<>();
-        entries.add(newReplicatedLogEntry(1, 4, "four"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(1, 4, "four"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", 3, 1, entries, 4, 3, (short)0);
 
@@ -806,7 +783,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         RaftActorBehavior newBehavior = follower.handleMessage(leaderActor, appendEntries);
 
-        Assert.assertSame(follower, newBehavior);
+        assertSame(follower, newBehavior);
 
         expectAndVerifyAppendEntriesReply(1, true, context.getId(), 1, 4);
     }
@@ -854,7 +831,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
                 snapshot.getLastAppliedIndex());
         assertEquals("getLastTerm", lastInstallSnapshot.getLastIncludedTerm(), snapshot.getLastTerm());
         assertEquals("getState type", ByteState.class, snapshot.getState().getClass());
-        Assert.assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
+        assertArrayEquals("getState", bsSnapshot.toByteArray(), ((ByteState)snapshot.getState()).getBytes());
         assertEquals("getElectionTerm", 1, snapshot.getElectionTerm());
         assertEquals("getElectionVotedFor", "leader", snapshot.getElectionVotedFor());
         applySnapshot.getCallback().onSuccess();
@@ -908,7 +885,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send an append entry
         AppendEntries appendEntries = new AppendEntries(1, "leader", 1, 1,
-                Arrays.asList(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
+                List.of(newReplicatedLogEntry(2, 1, "3")), 2, -1, (short)1);
 
         follower.handleMessage(leaderActor, appendEntries);
 
@@ -951,7 +928,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
 
         // Send appendEntries with a new term and leader.
         AppendEntries appendEntries = new AppendEntries(2, "new-leader", 1, 1,
-                Arrays.asList(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
+                List.of(newReplicatedLogEntry(2, 2, "3")), 2, -1, (short)1);
 
         follower.handleMessage(leaderActor, appendEntries);
 
@@ -1005,8 +982,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         setLastLogEntry(context, 1, 101,
                 new MockRaftActorContext.MockPayload(""));
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
-                newReplicatedLogEntry(2, 101, "foo"));
+        List<ReplicatedLogEntry> entries = List.of(newReplicatedLogEntry(2, 101, "foo"));
 
         // The new commitIndex is 101
         AppendEntries appendEntries = new AppendEntries(2, "leader", 101, 1, entries, 102, 101, (short)0);
@@ -1082,7 +1058,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
     @Test
     public void testFollowerSchedulesElectionIfNonVoting() {
         MockRaftActorContext context = createActorContext();
-        context.updatePeerIds(new ServerConfigurationPayload(Arrays.asList(new ServerInfo(context.getId(), false))));
+        context.updatePeerIds(new ServerConfigurationPayload(List.of(new ServerInfo(context.getId(), false))));
         ((DefaultConfigParamsImpl)context.getConfigParams()).setHeartBeatInterval(
                 FiniteDuration.apply(100, TimeUnit.MILLISECONDS));
         ((DefaultConfigParamsImpl)context.getConfigParams()).setElectionTimeoutFactor(1);
@@ -1133,7 +1109,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1143,7 +1119,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"));
 
         AppendEntries appendEntries = new AppendEntries(1, "leader", -1, -1, entries, 1, -1, (short)0);
@@ -1170,7 +1146,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 1, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 1, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData()),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData()),
                 MockRaftActor.fromState(snapshot.getState()));
     }
 
@@ -1188,7 +1164,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1198,7 +1174,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
                 newReplicatedLogEntry(1, 2, "three"));
 
@@ -1226,7 +1202,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 2, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData(), entries.get(1).getData(),
                 entries.get(2).getData()), MockRaftActor.fromState(snapshot.getState()));
 
         assertEquals("Journal size", 0, followerRaftActor.get().getReplicatedLog().size());
@@ -1245,7 +1221,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Last index", 2, followerRaftActor.get().getReplicatedLog().lastIndex());
         assertEquals("Last applied index", 2, followerRaftActor.get().getRaftActorContext().getLastApplied());
         assertEquals("Commit index", 2, followerRaftActor.get().getRaftActorContext().getCommitIndex());
-        assertEquals("State", ImmutableList.of(entries.get(0).getData(), entries.get(1).getData(),
+        assertEquals("State", List.of(entries.get(0).getData(), entries.get(1).getData(),
                 entries.get(2).getData()), followerRaftActor.get().getState());
     }
 
@@ -1263,7 +1239,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         final AtomicReference<MockRaftActor> followerRaftActor = new AtomicReference<>();
         RaftActorSnapshotCohort snapshotCohort = newRaftActorSnapshotCohort(followerRaftActor);
         Builder builder = MockRaftActor.builder().persistent(Optional.of(true)).id(id)
-                .peerAddresses(ImmutableMap.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
+                .peerAddresses(Map.of("leader", "")).config(config).snapshotCohort(snapshotCohort);
         TestActorRef<MockRaftActor> followerActorRef = actorFactory.createTestActor(builder.props()
                 .withDispatcher(Dispatchers.DefaultDispatcherId()), id);
         followerRaftActor.set(followerActorRef.underlyingActor());
@@ -1273,7 +1249,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         InMemoryJournal.addDeleteMessagesCompleteLatch(id);
         InMemoryJournal.addWriteMessagesCompleteLatch(id, 1, ApplyJournalEntries.class);
 
-        List<ReplicatedLogEntry> entries = Arrays.asList(
+        List<ReplicatedLogEntry> entries = List.of(
                 newReplicatedLogEntry(1, 0, "one"), newReplicatedLogEntry(1, 1, "two"),
                 newReplicatedLogEntry(1, 2, "three"));
 
@@ -1303,7 +1279,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         assertEquals("Snapshot getLastAppliedIndex", 0, snapshot.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", 1, snapshot.getLastTerm());
         assertEquals("Snapshot getLastIndex", 2, snapshot.getLastIndex());
-        assertEquals("Snapshot state", ImmutableList.of(entries.get(0).getData()),
+        assertEquals("Snapshot state", List.of(entries.get(0).getData()),
                 MockRaftActor.fromState(snapshot.getState()));
     }
 
@@ -1319,7 +1295,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         follower = createBehavior(context);
 
         follower.handleMessage(leaderActor,
-                new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1, (short)0));
+                new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1, (short)0));
 
         AppendEntriesReply reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
         assertTrue(reply.isNeedsLeaderAddress());
@@ -1328,7 +1304,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         PeerAddressResolver mockResolver = mock(PeerAddressResolver.class);
         ((DefaultConfigParamsImpl)context.getConfigParams()).setPeerAddressResolver(mockResolver);
 
-        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, Collections.emptyList(), -1, -1,
+        follower.handleMessage(leaderActor, new AppendEntries(1, "leader", -1, -1, List.of(), -1, -1,
                 (short)0, RaftVersions.CURRENT_VERSION, leaderActor.path().toString()));
 
         reply = MessageCollectorActor.expectFirstMatching(leaderActor, AppendEntriesReply.class);
@@ -1342,8 +1318,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
             final AtomicReference<MockRaftActor> followerRaftActor) {
         RaftActorSnapshotCohort snapshotCohort = new RaftActorSnapshotCohort() {
             @Override
-            public void createSnapshot(final ActorRef actorRef,
-                    final java.util.Optional<OutputStream> installSnapshotStream) {
+            public void createSnapshot(final ActorRef actorRef, final Optional<OutputStream> installSnapshotStream) {
                 try {
                     actorRef.tell(new CaptureSnapshotReply(new MockSnapshotState(followerRaftActor.get().getState()),
                             installSnapshotStream), actorRef);
@@ -1372,10 +1347,8 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
         int size = chunkSize;
         if (chunkSize > snapshotLength) {
             size = snapshotLength;
-        } else {
-            if (start + chunkSize > snapshotLength) {
-                size = snapshotLength - start;
-            }
+        } else if (start + chunkSize > snapshotLength) {
+            size = snapshotLength - start;
         }
 
         byte[] nextChunk = new byte[size];
@@ -1412,12 +1385,7 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest<Follower> {
     }
 
     private ByteString createSnapshot() {
-        HashMap<String, String> followerSnapshot = new HashMap<>();
-        followerSnapshot.put("1", "A");
-        followerSnapshot.put("2", "B");
-        followerSnapshot.put("3", "C");
-
-        return toByteString(followerSnapshot);
+        return toByteString(Map.of("1", "A", "2", "B", "3", "C"));
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderInstallSnapshotStateTest.java
new file mode 100644 (file)
index 0000000..aa07181
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.io.ByteSource;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.Objects;
+import org.junit.Test;
+
+public class LeaderInstallSnapshotStateTest {
+    // Prime number on purpose
+    private static final int CHUNK_SIZE = 9_999_991;
+    // More than Integer.MAX_VALUE
+    private static final long SIZE = 4_294_967_294L;
+
+    @Test
+    public void testSnapshotLongerThanInteger() throws IOException {
+        try (var fts = new LeaderInstallSnapshotState(CHUNK_SIZE, "test")) {
+            fts.setSnapshotBytes(new MockByteSource(SIZE));
+
+            int chunkIndex = 0;
+            long offset = 0;
+            long expectedChunkSize = CHUNK_SIZE;
+            while (offset < SIZE) {
+                offset = offset + CHUNK_SIZE;
+                if (offset > SIZE) {
+                    // We reached last chunk
+                    expectedChunkSize = CHUNK_SIZE - (offset - SIZE);
+                    offset = SIZE;
+                }
+                chunkIndex ++;
+                final byte[] chunk = fts.getNextChunk();
+                assertEquals("byte size not matching for chunk:", expectedChunkSize, chunk.length);
+                assertEquals("chunk index not matching", chunkIndex, fts.getChunkIndex());
+                fts.markSendStatus(true);
+                if (!fts.isLastChunk(chunkIndex)) {
+                    fts.incrementChunkIndex();
+                }
+            }
+
+            assertEquals("totalChunks not matching", chunkIndex, fts.getTotalChunks());
+        }
+    }
+
+    private static final class MockByteSource extends ByteSource {
+        private final long size;
+
+        private MockByteSource(final long size) {
+            this.size = size;
+        }
+
+        @Override
+        public long size() {
+            return size;
+        }
+
+        @Override
+        public InputStream openStream() {
+            return new MockInputStream(size);
+        }
+    }
+
+    private static final class MockInputStream extends InputStream {
+        private long remaining;
+
+        MockInputStream(final long size) {
+            remaining = size;
+        }
+
+        @Override
+        public int read() {
+            if (remaining > 0) {
+                remaining--;
+                return 0;
+            }
+            return -1;
+        }
+
+        @Override
+        public int read(final byte[] bytes, final int off, final int len) {
+            Objects.checkFromIndexSize(off, len, bytes.length);
+            if (remaining <= 0) {
+                return -1;
+            }
+            final int count = len <= remaining ? len : (int) remaining;
+            Arrays.fill(bytes, off, off + count, (byte) 0);
+            remaining -= count;
+            return count;
+        }
+    }
+}
index 78ec33cba12deca93e9be5746eb7ce682301f2ac..0f16f92c4951ef4c018e9aeebe977d8ce447e754 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import static org.junit.Assert.assertEquals;
@@ -26,17 +25,16 @@ import akka.actor.Terminated;
 import akka.protobuf.ByteString;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
 import com.google.common.io.ByteSource;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import org.apache.commons.lang3.SerializationUtils;
@@ -65,6 +63,7 @@ import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RaftRPC;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
@@ -73,7 +72,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.policy.DefaultRaftPolicy;
 import org.opendaylight.controller.cluster.raft.policy.RaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.ForwardMessageToBehaviorActor;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.yangtools.concepts.Identifier;
@@ -163,14 +161,15 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         return sendReplicate(actorContext, 1, index);
     }
 
-    private RaftActorBehavior sendReplicate(MockRaftActorContext actorContext, long term, long index) {
+    private RaftActorBehavior sendReplicate(final MockRaftActorContext actorContext, final long term,
+            final long index) {
         return sendReplicate(actorContext, term, index, new MockRaftActorContext.MockPayload("foo"));
     }
 
-    private RaftActorBehavior sendReplicate(MockRaftActorContext actorContext, long term, long index, Payload payload) {
-        SimpleReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(index, term, payload);
-        actorContext.getReplicatedLog().append(newEntry);
-        return leader.handleMessage(leaderActor, new Replicate(null, null, newEntry, true));
+    private RaftActorBehavior sendReplicate(final MockRaftActorContext actorContext, final long term, final long index,
+            final Payload payload) {
+        actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(index, term, payload));
+        return leader.handleMessage(leaderActor, new Replicate(index, true, null, null));
     }
 
     @Test
@@ -375,22 +374,43 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
             sendReplicate(actorContext, lastIndex + i + 1);
             leader.handleMessage(followerActor, new AppendEntriesReply(
                     FOLLOWER_ID, term, true, lastIndex + i + 1, term, (short)0));
-
         }
 
-        for (int i = 3; i < 5; i++) {
-            sendReplicate(actorContext, lastIndex + i + 1);
+        // We are expecting six messages here -- a request to replicate and a consensus-reached message
+        List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+        assertEquals("The number of request/consensus appends collected", 6, allMessages.size());
+        for (int i = 0; i < 3; i++) {
+            assertRequestEntry(lastIndex, allMessages, i);
+            assertCommitEntry(lastIndex, allMessages, i);
         }
 
-        List<AppendEntries> allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
-        // We expect 4 here because the first 3 replicate got a reply and so the 4th entry would
-        // get sent to the follower - but not the 5th
-        assertEquals("The number of append entries collected should be 4", 4, allMessages.size());
+        // Now perform another commit, eliciting a request to persist
+        sendReplicate(actorContext, lastIndex + 3 + 1);
+        allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+        // This elicits another message for request to replicate
+        assertEquals("The number of request entries collected", 7, allMessages.size());
+        assertRequestEntry(lastIndex, allMessages, 3);
 
-        for (int i = 0; i < 4; i++) {
-            long expected = allMessages.get(i).getEntries().get(0).getIndex();
-            assertEquals(expected, i + 2);
-        }
+        sendReplicate(actorContext, lastIndex + 4 + 1);
+        allMessages = MessageCollectorActor.getAllMatching(followerActor, AppendEntries.class);
+        assertEquals("The number of request entries collected", 7, allMessages.size());
+    }
+
+    private static void assertCommitEntry(final long lastIndex, final List<AppendEntries> allMessages,
+            final int messageNr) {
+        final AppendEntries commitReq = allMessages.get(2 * messageNr + 1);
+        assertEquals(lastIndex + messageNr + 1, commitReq.getLeaderCommit());
+        assertEquals(List.of(), commitReq.getEntries());
+    }
+
+    private static void assertRequestEntry(final long lastIndex, final List<AppendEntries> allMessages,
+            final int messageNr) {
+        final AppendEntries req = allMessages.get(2 * messageNr);
+        assertEquals(lastIndex + messageNr, req.getLeaderCommit());
+
+        final List<ReplicatedLogEntry> entries = req.getEntries();
+        assertEquals(1, entries.size());
+        assertEquals(messageNr + 2, entries.get(0).getIndex());
     }
 
     @Test
@@ -525,16 +545,14 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setLastApplied(0);
 
-        long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
-        long term = actorContext.getTermInformation().getCurrentTerm();
-        ReplicatedLogEntry newEntry = new SimpleReplicatedLogEntry(
-                newLogIndex, term, new MockRaftActorContext.MockPayload("foo"));
+        final long newLogIndex = actorContext.getReplicatedLog().lastIndex() + 1;
+        final long term = actorContext.getTermInformation().getCurrentTerm();
+        final var data = new MockRaftActorContext.MockPayload("foo");
 
-        actorContext.getReplicatedLog().append(newEntry);
+        actorContext.getReplicatedLog().append(new SimpleReplicatedLogEntry(newLogIndex, term, data));
 
         final Identifier id = new MockIdentifier("state-id");
-        RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
-                new Replicate(leaderActor, id, newEntry, true));
+        final var raftBehavior = leader.handleMessage(leaderActor, new Replicate(newLogIndex, true, leaderActor, id));
 
         // State should not change
         assertTrue(raftBehavior instanceof Leader);
@@ -543,8 +561,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         // We should get 2 ApplyState messages - 1 for new log entry and 1 for the previous
         // one since lastApplied state is 0.
-        List<ApplyState> applyStateList = MessageCollectorActor.getAllMatching(
-                leaderActor, ApplyState.class);
+        final var applyStateList = MessageCollectorActor.getAllMatching(leaderActor, ApplyState.class);
         assertEquals("ApplyState count", newLogIndex, applyStateList.size());
 
         for (int i = 0; i <= newLogIndex - 1; i++) {
@@ -554,7 +571,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         ApplyState last = applyStateList.get((int) newLogIndex - 1);
-        assertEquals("getData", newEntry.getData(), last.getReplicatedLogEntry().getData());
+        assertEquals("getData", data, last.getReplicatedLogEntry().getData());
         assertEquals("getIdentifier", id, last.getIdentifier());
     }
 
@@ -564,11 +581,6 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         final MockRaftActorContext actorContext = createActorContextWithFollower();
 
-        Map<String, String> leadersSnapshot = new HashMap<>();
-        leadersSnapshot.put("1", "A");
-        leadersSnapshot.put("2", "B");
-        leadersSnapshot.put("3", "C");
-
         //clears leaders log
         actorContext.getReplicatedLog().removeFrom(0);
 
@@ -591,12 +603,12 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
-        ByteString bs = toByteString(leadersSnapshot);
+        ByteString bs = toByteString(Map.of("1", "A", "2", "B", "3", "C"));
         leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
                 -1, null, null), ByteSource.wrap(bs.toByteArray())));
         LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
-                actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+                actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
         fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
         leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
 
@@ -654,18 +666,15 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
 
         // new entry
-        SimpleReplicatedLogEntry entry =
-                new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                        new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
         // this should invoke a sendinstallsnapshot as followersLastIndex < snapshotIndex
         RaftActorBehavior raftBehavior = leader.handleMessage(
-                leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+                leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertTrue(raftBehavior instanceof Leader);
 
@@ -702,15 +711,13 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.setSnapshotHolder(null);
 
         // new entry
-        SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
 
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertEquals("isCapturing", true, actorContext.getSnapshotManager().isCapturing());
 
@@ -722,7 +729,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         assertEquals(2, cs.getLastTerm());
 
         // if an initiate is started again when first is in progress, it shouldnt initiate Capture
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
 
         assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
     }
@@ -747,7 +754,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.getReplicatedLog().removeFrom(0);
 
-        AtomicReference<java.util.Optional<OutputStream>> installSnapshotStream = new AtomicReference<>();
+        AtomicReference<Optional<OutputStream>> installSnapshotStream = new AtomicReference<>();
         actorContext.setCreateSnapshotProcedure(installSnapshotStream::set);
 
         leader = new Leader(actorContext);
@@ -765,10 +772,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         // new entry
-        SimpleReplicatedLogEntry entry = new SimpleReplicatedLogEntry(newEntryIndex, currentTerm,
-                new MockRaftActorContext.MockPayload("D"));
-
-        actorContext.getReplicatedLog().append(entry);
+        actorContext.getReplicatedLog().append(
+            new SimpleReplicatedLogEntry(newEntryIndex, currentTerm, new MockRaftActorContext.MockPayload("D")));
 
         //update follower timestamp
         leader.markFollowerActive(FOLLOWER_ID);
@@ -792,7 +797,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         // Sending Replicate message should not initiate another capture since the first is in progress.
-        leader.handleMessage(leaderActor, new Replicate(null, new MockIdentifier("state-id"), entry, true));
+        leader.handleMessage(leaderActor, new Replicate(newEntryIndex, true, null, new MockIdentifier("state-id")));
         assertSame("CaptureSnapshot instance", cs, actorContext.getSnapshotManager().getCaptureSnapshot());
 
         // Similarly sending another AppendEntriesReply to force a snapshot should not initiate another capture.
@@ -802,7 +807,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         // Now simulate the CaptureSnapshotReply to initiate snapshot install - the first chunk should be sent.
         final byte[] bytes = new byte[]{1, 2, 3};
-        installSnapshotStream.get().get().write(bytes);
+        installSnapshotStream.get().orElseThrow().write(bytes);
         actorContext.getSnapshotManager().persist(ByteState.of(bytes), installSnapshotStream.get(),
                 Runtime.getRuntime().totalMemory());
         MessageCollectorActor.expectFirstMatching(followerActor, InstallSnapshot.class);
@@ -850,7 +855,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.getFollower(FOLLOWER_ID).setNextIndex(0);
 
         byte[] bytes = toByteString(leadersSnapshot).toByteArray();
-        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
                 lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
 
         RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
@@ -902,7 +907,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.getFollower(FOLLOWER_ID).setNextIndex(-1);
 
         byte[] bytes = toByteString(leadersSnapshot).toByteArray();
-        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), Collections.<ReplicatedLogEntry>emptyList(),
+        Snapshot snapshot = Snapshot.create(ByteState.of(bytes), List.of(),
                 lastAppliedIndex, snapshotTerm, lastAppliedIndex, snapshotTerm, -1, null, null);
 
         RaftActorBehavior raftBehavior = leader.handleMessage(leaderActor,
@@ -957,10 +962,10 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         leader.setSnapshotHolder(new SnapshotHolder(Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
                 -1, null, null), ByteSource.wrap(bs.toByteArray())));
         LeaderInstallSnapshotState fts = new LeaderInstallSnapshotState(
-                actorContext.getConfigParams().getSnapshotChunkSize(), leader.logName());
+                actorContext.getConfigParams().getMaximumMessageSliceSize(), leader.logName());
         fts.setSnapshotBytes(ByteSource.wrap(bs.toByteArray()));
         leader.getFollower(FOLLOWER_ID).setLeaderInstallSnapshotState(fts);
         while (!fts.isLastChunk(fts.getChunkIndex())) {
@@ -998,7 +1003,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         DefaultConfigParamsImpl configParams = new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         };
@@ -1026,8 +1031,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
 
@@ -1076,7 +1080,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setConfigParams(new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         });
@@ -1100,8 +1104,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
@@ -1141,7 +1144,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         actorContext.setConfigParams(new DefaultConfigParamsImpl() {
             @Override
-            public int getSnapshotChunkSize() {
+            public int getMaximumMessageSliceSize() {
                 return 50;
             }
         });
@@ -1165,8 +1168,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         ByteString bs = toByteString(leadersSnapshot);
         Snapshot snapshot = Snapshot.create(ByteState.of(bs.toByteArray()),
-                Collections.<ReplicatedLogEntry>emptyList(), commitIndex, snapshotTerm, commitIndex, snapshotTerm,
-                -1, null, null);
+                List.of(), commitIndex, snapshotTerm, commitIndex, snapshotTerm, -1, null, null);
 
         leader.handleMessage(leaderActor, new SendInstallSnapshot(snapshot, ByteSource.wrap(bs.toByteArray())));
 
@@ -1175,8 +1177,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         assertEquals(1, installSnapshot.getChunkIndex());
         assertEquals(3, installSnapshot.getTotalChunks());
-        assertEquals(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE,
-                installSnapshot.getLastChunkHashCode().get().intValue());
+        assertEquals(OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE),
+                installSnapshot.getLastChunkHashCode());
 
         final int hashCode = Arrays.hashCode(installSnapshot.getData());
 
@@ -1189,7 +1191,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
         assertEquals(2, installSnapshot.getChunkIndex());
         assertEquals(3, installSnapshot.getTotalChunks());
-        assertEquals(hashCode, installSnapshot.getLastChunkHashCode().get().intValue());
+        assertEquals(OptionalInt.of(hashCode), installSnapshot.getLastChunkHashCode());
     }
 
     @Test
@@ -1259,8 +1261,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
 
     private MockRaftActorContext createActorContextWithFollower() {
         MockRaftActorContext actorContext = createActorContext();
-        actorContext.setPeerAddresses(ImmutableMap.<String, String>builder().put(FOLLOWER_ID,
-                followerActor.path().toString()).build());
+        actorContext.setPeerAddresses(Map.of(FOLLOWER_ID, followerActor.path().toString()));
         return actorContext;
     }
 
@@ -1269,7 +1270,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         DefaultConfigParamsImpl followerConfig = new DefaultConfigParamsImpl();
         followerConfig.setElectionTimeoutFactor(10000);
         followerActorContext.setConfigParams(followerConfig);
-        followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+        followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
         return followerActorContext;
     }
 
@@ -1335,7 +1336,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         final MockRaftActorContext leaderActorContext = createActorContext();
 
         MockRaftActorContext followerActorContext = createActorContext(FOLLOWER_ID, followerActor);
-        followerActorContext.setPeerAddresses(ImmutableMap.of(LEADER_ID, leaderActor.path().toString()));
+        followerActorContext.setPeerAddresses(Map.of(LEADER_ID, leaderActor.path().toString()));
 
         Follower follower = new Follower(followerActorContext);
         followerActor.underlyingActor().setBehavior(follower);
@@ -1716,7 +1717,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         FollowerLogInformation followerInfo = leader.getFollower(FOLLOWER_ID);
 
         assertEquals(payloadVersion, leader.getLeaderPayloadVersion());
-        assertEquals(RaftVersions.HELIUM_VERSION, followerInfo.getRaftVersion());
+        assertEquals(RaftVersions.FLUORINE_VERSION, followerInfo.getRaftVersion());
 
         AppendEntriesReply reply = new AppendEntriesReply(FOLLOWER_ID, 1, true, 2, 1, payloadVersion);
 
@@ -1770,7 +1771,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MockRaftActorContext leaderActorContext = createActorContextWithFollower();
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(1000, TimeUnit.SECONDS));
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(2);
+        // Note: the size here depends on estimate
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(246);
 
         leaderActorContext.setReplicatedLog(
                 new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 4, 1).build());
@@ -2115,7 +2117,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.transferLeadership(mockTransferCohort);
 
         verify(mockTransferCohort, never()).transferComplete();
-        doReturn(Optional.absent()).when(mockTransferCohort).getRequestedFollowerId();
+        doReturn(Optional.empty()).when(mockTransferCohort).getRequestedFollowerId();
         MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
         leader.handleMessage(leaderActor, new AppendEntriesReply(FOLLOWER_ID, 1, true, 0, 1, (short)0));
 
@@ -2146,7 +2148,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         RaftActorLeadershipTransferCohort mockTransferCohort = mock(RaftActorLeadershipTransferCohort.class);
-        doReturn(Optional.absent()).when(mockTransferCohort).getRequestedFollowerId();
+        doReturn(Optional.empty()).when(mockTransferCohort).getRequestedFollowerId();
         leader.transferLeadership(mockTransferCohort);
 
         verify(mockTransferCohort, never()).transferComplete();
@@ -2178,7 +2180,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         RaftActorLeadershipTransferCohort mockTransferCohort = mock(RaftActorLeadershipTransferCohort.class);
-        doReturn(Optional.absent()).when(mockTransferCohort).getRequestedFollowerId();
+        doReturn(Optional.empty()).when(mockTransferCohort).getRequestedFollowerId();
         leader.transferLeadership(mockTransferCohort);
 
         verify(mockTransferCohort, never()).transferComplete();
@@ -2245,7 +2247,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         logStart("testReplicationWithPayloadSizeThatExceedsThreshold");
 
         final int serializedSize = SerializationUtils.serialize(new AppendEntries(1, LEADER_ID, -1, -1,
-                Arrays.asList(new SimpleReplicatedLogEntry(0, 1,
+                List.of(new SimpleReplicatedLogEntry(0, 1,
                         new MockRaftActorContext.MockPayload("large"))), 0, -1, (short)0)).length;
         final MockRaftActorContext.MockPayload largePayload =
                 new MockRaftActorContext.MockPayload("large", serializedSize);
@@ -2253,7 +2255,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MockRaftActorContext leaderActorContext = createActorContextWithFollower();
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(300, TimeUnit.MILLISECONDS));
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(serializedSize - 50);
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(serializedSize - 50);
         leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
         leaderActorContext.setCommitIndex(-1);
         leaderActorContext.setLastApplied(-1);
@@ -2337,7 +2339,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setHeartBeatInterval(
                 new FiniteDuration(100, TimeUnit.MILLISECONDS));
         ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setElectionTimeoutFactor(1);
-        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setSnapshotChunkSize(10);
+        ((DefaultConfigParamsImpl)leaderActorContext.getConfigParams()).setMaximumMessageSliceSize(10);
         leaderActorContext.setReplicatedLog(new MockRaftActorContext.MockReplicatedLogBuilder().build());
         leaderActorContext.setCommitIndex(-1);
         leaderActorContext.setLastApplied(-1);
@@ -2352,7 +2354,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         MessageCollectorActor.clearMessages(followerActor);
 
         sendReplicate(leaderActorContext, term, 0, new MockRaftActorContext.MockPayload("large",
-                leaderActorContext.getConfigParams().getSnapshotChunkSize() + 1));
+                leaderActorContext.getConfigParams().getMaximumMessageSliceSize() + 1));
         MessageCollectorActor.expectFirstMatching(followerActor, MessageSlice.class);
 
         // Sleep for at least 3 * election timeout so the slicing state expires.
@@ -2399,7 +2401,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         // Initial heartbeat shouldn't have the leader address
 
         AppendEntries appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertFalse(appendEntries.getLeaderAddress().isPresent());
+        assertNull(appendEntries.leaderAddress());
         MessageCollectorActor.clearMessages(followerActor);
 
         // Send AppendEntriesReply indicating the follower needs the leader address
@@ -2414,8 +2416,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
 
         appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertTrue(appendEntries.getLeaderAddress().isPresent());
-        assertEquals(leaderActor.path().toString(), appendEntries.getLeaderAddress().get());
+        assertEquals(leaderActor.path().toString(), appendEntries.leaderAddress());
         MessageCollectorActor.clearMessages(followerActor);
 
         // Send AppendEntriesReply indicating the follower does not need the leader address
@@ -2429,7 +2430,7 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         leader.handleMessage(leaderActor, SendHeartBeat.INSTANCE);
 
         appendEntries = MessageCollectorActor.expectFirstMatching(followerActor, AppendEntries.class);
-        assertFalse(appendEntries.getLeaderAddress().isPresent());
+        assertNull(appendEntries.leaderAddress());
     }
 
     @Override
@@ -2439,14 +2440,14 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         assertEquals("New votedFor", null, actorContext.getTermInformation().getVotedFor());
     }
 
-    private class MockConfigParamsImpl extends DefaultConfigParamsImpl {
+    private static class MockConfigParamsImpl extends DefaultConfigParamsImpl {
 
         private final long electionTimeOutIntervalMillis;
-        private final int snapshotChunkSize;
+        private final int maximumMessageSliceSize;
 
-        MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int snapshotChunkSize) {
+        MockConfigParamsImpl(final long electionTimeOutIntervalMillis, final int maximumMessageSliceSize) {
             this.electionTimeOutIntervalMillis = electionTimeOutIntervalMillis;
-            this.snapshotChunkSize = snapshotChunkSize;
+            this.maximumMessageSliceSize = maximumMessageSliceSize;
         }
 
         @Override
@@ -2455,8 +2456,8 @@ public class LeaderTest extends AbstractLeaderTest<Leader> {
         }
 
         @Override
-        public int getSnapshotChunkSize() {
-            return snapshotChunkSize;
+        public int getMaximumMessageSliceSize() {
+            return maximumMessageSliceSize;
         }
     }
 }
index 443b931d2599ffcc818eef0a0e72c8a9c4696b46..2c83f67582f4b97c452fa0f1e0b6c3554c85faef 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import static org.junit.Assert.assertEquals;
@@ -15,31 +14,32 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
 import akka.protobuf.ByteString;
-import com.google.common.base.Optional;
 import com.google.common.io.ByteSource;
 import java.io.IOException;
-import java.io.Serializable;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.OptionalInt;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class SnapshotTrackerTest {
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotTrackerTest.class);
 
+    private final HashMap<String, String> data = new HashMap<>();
+
     @Mock
     private RaftActorContext mockContext;
     private FileBackedOutputStream fbos;
-    private Map<String, String> data;
     private ByteString byteString;
     private byte[] chunk1;
     private byte[] chunk2;
@@ -47,14 +47,11 @@ public class SnapshotTrackerTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
-        data = new HashMap<>();
         data.put("key1", "value1");
         data.put("key2", "value2");
         data.put("key3", "value3");
 
-        byteString = ByteString.copyFrom(SerializationUtils.serialize((Serializable) data));
+        byteString = ByteString.copyFrom(SerializationUtils.serialize(data));
         chunk1 = getNextChunk(byteString, 0, 10);
         chunk2 = getNextChunk(byteString, 10, 10);
         chunk3 = getNextChunk(byteString, 20, byteString.size());
@@ -68,9 +65,9 @@ public class SnapshotTrackerTest {
     @Test
     public void testAddChunks() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 3, "leader", mockContext)) {
-            tracker.addChunk(1, chunk1, Optional.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE));
-            tracker.addChunk(2, chunk2, Optional.of(Arrays.hashCode(chunk1)));
-            tracker.addChunk(3, chunk3, Optional.of(Arrays.hashCode(chunk2)));
+            tracker.addChunk(1, chunk1, OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE));
+            tracker.addChunk(2, chunk2, OptionalInt.of(Arrays.hashCode(chunk1)));
+            tracker.addChunk(3, chunk3, OptionalInt.of(Arrays.hashCode(chunk2)));
 
             ByteSource snapshotBytes = tracker.getSnapshotBytes();
             assertEquals("Deserialized", data, SerializationUtils.deserialize(snapshotBytes.read()));
@@ -82,39 +79,39 @@ public class SnapshotTrackerTest {
     @Test(expected = SnapshotTracker.InvalidChunkException.class)
     public void testAddChunkWhenAlreadySealed() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 2, "leader", mockContext)) {
-            tracker.addChunk(1, chunk1, Optional.<Integer>absent());
-            tracker.addChunk(2, chunk2, Optional.<Integer>absent());
-            tracker.addChunk(3, chunk3, Optional.<Integer>absent());
+            tracker.addChunk(1, chunk1, OptionalInt.empty());
+            tracker.addChunk(2, chunk2, OptionalInt.empty());
+            tracker.addChunk(3, chunk3, OptionalInt.empty());
         }
     }
 
     @Test(expected = SnapshotTracker.InvalidChunkException.class)
     public void testInvalidFirstChunkIndex() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 2, "leader", mockContext)) {
-            tracker.addChunk(LeaderInstallSnapshotState.FIRST_CHUNK_INDEX - 1, chunk1, Optional.<Integer>absent());
+            tracker.addChunk(LeaderInstallSnapshotState.FIRST_CHUNK_INDEX - 1, chunk1, OptionalInt.empty());
         }
     }
 
     @Test(expected = SnapshotTracker.InvalidChunkException.class)
     public void testOutOfSequenceChunk() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 2, "leader", mockContext)) {
-            tracker.addChunk(1, chunk1, Optional.<Integer>absent());
-            tracker.addChunk(3, chunk3, Optional.<Integer>absent());
+            tracker.addChunk(1, chunk1, OptionalInt.empty());
+            tracker.addChunk(3, chunk3, OptionalInt.empty());
         }
     }
 
     @Test(expected = SnapshotTracker.InvalidChunkException.class)
     public void testInvalidLastChunkHashCode() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 2, "leader", mockContext)) {
-            tracker.addChunk(1, chunk1, Optional.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE));
-            tracker.addChunk(2, chunk2, Optional.of(1));
+            tracker.addChunk(1, chunk1, OptionalInt.of(LeaderInstallSnapshotState.INITIAL_LAST_CHUNK_HASH_CODE));
+            tracker.addChunk(2, chunk2, OptionalInt.of(1));
         }
     }
 
     @Test(expected = IllegalStateException.class)
     public void testGetSnapshotBytesWhenNotSealed() throws IOException {
         try (SnapshotTracker tracker = new SnapshotTracker(LOG, 2, "leader", mockContext)) {
-            tracker.addChunk(1, chunk1, Optional.<Integer>absent());
+            tracker.addChunk(1, chunk1, OptionalInt.empty());
             tracker.getSnapshotBytes();
         }
     }
@@ -124,10 +121,8 @@ public class SnapshotTrackerTest {
         int start = offset;
         if (size > snapshotLength) {
             size = snapshotLength;
-        } else {
-            if (start + size > snapshotLength) {
-                size = snapshotLength - start;
-            }
+        } else if (start + size > snapshotLength) {
+            size = snapshotLength - start;
         }
 
         byte[] nextChunk = new byte[size];
index 81b9fbbb860bd98767d0283379ce849526b13f1f..4db399666f27725bc411736744a407e1d93273d7 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.client.messages;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,10 +19,11 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ShutdownTest {
-
     @Test
     public void test() {
-        Shutdown cloned = (Shutdown) SerializationUtils.clone(Shutdown.INSTANCE);
+        final var bytes = SerializationUtils.serialize(Shutdown.INSTANCE);
+        assertEquals(86, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("Cloned instance", Shutdown.INSTANCE, cloned);
     }
 }
index 8452a71c24a1b14faf5525b9513eebf80bda1722..79c7477ba2d892c854ea0e9998417a47063506c5 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 
@@ -19,29 +19,14 @@ import org.opendaylight.controller.cluster.raft.RaftVersions;
  * @author Thomas Pantelis
  */
 public class AppendEntriesReplyTest {
-
     @Test
     public void testSerialization() {
-        AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
-                RaftVersions.CURRENT_VERSION);
-        AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+        final var expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
+            RaftVersions.CURRENT_VERSION);
 
-        assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
-        assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
-        assertEquals("getLogLastTerm", expected.getLogLastTerm(), cloned.getLogLastTerm());
-        assertEquals("getLogLastIndex", expected.getLogLastIndex(), cloned.getLogLastIndex());
-        assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
-        assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
-        assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
-        assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
-    }
-
-    @Test
-    @Deprecated
-    public void testPreFluorineSerialization() {
-        AppendEntriesReply expected = new AppendEntriesReply("follower", 5, true, 100, 4, (short)6, true, true,
-                RaftVersions.BORON_VERSION);
-        AppendEntriesReply cloned = (AppendEntriesReply) SerializationUtils.clone(expected);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(98, bytes.length);
+        final var cloned = (AppendEntriesReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
@@ -50,6 +35,6 @@ public class AppendEntriesReplyTest {
         assertEquals("getPayloadVersion", expected.getPayloadVersion(), cloned.getPayloadVersion());
         assertEquals("getRaftVersion", expected.getRaftVersion(), cloned.getRaftVersion());
         assertEquals("isForceInstallSnapshot", expected.isForceInstallSnapshot(), cloned.isForceInstallSnapshot());
-        assertEquals("isNeedsLeaderAddress", false, cloned.isNeedsLeaderAddress());
+        assertEquals("isNeedsLeaderAddress", expected.isNeedsLeaderAddress(), cloned.isNeedsLeaderAddress());
     }
 }
index a7c3c8b9d5e97bde0e4b6fb431e105545eea754b..38f1defb9f420fdf7769fd2a5270feb9476b3544 100644 (file)
@@ -8,11 +8,10 @@
 package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
-import java.util.Arrays;
 import java.util.Iterator;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
@@ -25,7 +24,6 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt
  * @author Thomas Pantelis
  */
 public class AppendEntriesTest {
-
     @Test
     public void testSerialization() {
         ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
@@ -36,41 +34,29 @@ public class AppendEntriesTest {
 
         // Without leader address
 
-        AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.CURRENT_VERSION, null);
+        var expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+            RaftVersions.CURRENT_VERSION, null);
 
-        AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
+        var bytes = SerializationUtils.serialize(expected);
+        assertEquals(285, bytes.length);
+        var cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
 
         verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
 
         // With leader address
 
-        expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.CURRENT_VERSION, "leader address");
+        expected = new AppendEntries(5L, "node1", 7L, 8L, List.of(entry1, entry2), 10L, -1, payloadVersion,
+            RaftVersions.CURRENT_VERSION, "leader address");
 
-        cloned = (AppendEntries) SerializationUtils.clone(expected);
+        bytes = SerializationUtils.serialize(expected);
+        assertEquals(301, bytes.length);
+        cloned = (AppendEntries) SerializationUtils.deserialize(bytes);
 
         verifyAppendEntries(expected, cloned, RaftVersions.CURRENT_VERSION);
     }
 
-    @Test
-    @Deprecated
-    public void testPreFluorineSerialization() {
-        ReplicatedLogEntry entry1 = new SimpleReplicatedLogEntry(1, 2, new MockPayload("payload1"));
-
-        ReplicatedLogEntry entry2 = new SimpleReplicatedLogEntry(3, 4, new MockPayload("payload2"));
-
-        short payloadVersion = 5;
-
-        AppendEntries expected = new AppendEntries(5L, "node1", 7L, 8L, Arrays.asList(entry1, entry2), 10L,
-                -1, payloadVersion, RaftVersions.BORON_VERSION, "leader address");
-
-        AppendEntries cloned = (AppendEntries) SerializationUtils.clone(expected);
-
-        verifyAppendEntries(expected, cloned, RaftVersions.BORON_VERSION);
-    }
-
-    private static void verifyAppendEntries(AppendEntries expected, AppendEntries actual, short recipientRaftVersion) {
+    private static void verifyAppendEntries(final AppendEntries expected, final AppendEntries actual,
+            final short recipientRaftVersion) {
         assertEquals("getLeaderId", expected.getLeaderId(), actual.getLeaderId());
         assertEquals("getTerm", expected.getTerm(), actual.getTerm());
         assertEquals("getLeaderCommit", expected.getLeaderCommit(), actual.getLeaderCommit());
@@ -85,16 +71,11 @@ public class AppendEntriesTest {
             verifyReplicatedLogEntry(iter.next(), e);
         }
 
-        if (recipientRaftVersion >= RaftVersions.FLUORINE_VERSION) {
-            assertEquals("getLeaderAddress", expected.getLeaderAddress(), actual.getLeaderAddress());
-            assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
-        } else {
-            assertFalse(actual.getLeaderAddress().isPresent());
-            assertEquals("getLeaderRaftVersion", RaftVersions.BORON_VERSION, actual.getLeaderRaftVersion());
-        }
+        assertEquals("getLeaderAddress", expected.leaderAddress(), actual.leaderAddress());
+        assertEquals("getLeaderRaftVersion", RaftVersions.CURRENT_VERSION, actual.getLeaderRaftVersion());
     }
 
-    private static void verifyReplicatedLogEntry(ReplicatedLogEntry expected, ReplicatedLogEntry actual) {
+    private static void verifyReplicatedLogEntry(final ReplicatedLogEntry expected, final ReplicatedLogEntry actual) {
         assertEquals("getIndex", expected.getIndex(), actual.getIndex());
         assertEquals("getTerm", expected.getTerm(), actual.getTerm());
         assertEquals("getData", expected.getData().toString(), actual.getData().toString());
index 2841d989cf68772f82bb15a1023c4c2a24372d82..9db4cf4d037907461ec0b92ba724f4b933d61c16 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class InstallSnapshotReplyTest {
-
     @Test
     public void testSerialization() {
-        InstallSnapshotReply expected = new InstallSnapshotReply(5L, "follower", 1, true);
-        InstallSnapshotReply cloned = (InstallSnapshotReply) SerializationUtils.clone(expected);
+        final var expected = new InstallSnapshotReply(5L, "follower", 1, true);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(95, bytes.length);
+        final var cloned = (InstallSnapshotReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getFollowerId", expected.getFollowerId(), cloned.getFollowerId());
index 62a3e274283cc29e6c9f6650cc8ffb975fe09673..090ab77dad7275cf68848bf80affc2dcafaa21fe 100644 (file)
@@ -10,10 +10,10 @@ package org.opendaylight.controller.cluster.raft.messages;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
-import com.google.common.base.Optional;
-import java.io.Serializable;
-import java.util.Arrays;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import java.util.Optional;
+import java.util.OptionalInt;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.RaftVersions;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
@@ -25,9 +25,17 @@ import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
  * @author Thomas Pantelis
  */
 public class InstallSnapshotTest {
+    @Test
+    public void testCurrentSerialization() {
+        testSerialization(RaftVersions.CURRENT_VERSION, 1262, 1125);
+    }
 
     @Test
-    public void testSerialization() {
+    public void testFluorineSerialization() {
+        testSerialization(RaftVersions.FLUORINE_VERSION, 1302, 1165);
+    }
+
+    private static void testSerialization(final short raftVersion, final int fullSize, final int emptySize) {
         byte[] data = new byte[1000];
         for (int i = 0, j = 0; i < data.length; i++) {
             data[i] = (byte)j;
@@ -36,24 +44,22 @@ public class InstallSnapshotTest {
             }
         }
 
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        var serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("leader", true), new ServerInfo("follower", false)));
-        InstallSnapshot expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6,
-                Optional.<Integer>of(54321), Optional.of(serverConfig));
-
-        Object serialized = expected.toSerializable(RaftVersions.CURRENT_VERSION);
-        assertEquals("Serialized type", InstallSnapshot.class, serialized.getClass());
+        assertInstallSnapshot(fullSize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.of(54321),
+            Optional.of(serverConfig), raftVersion));
 
-        InstallSnapshot actual = (InstallSnapshot) SerializationUtils.clone((Serializable) serialized);
-        verifyInstallSnapshot(expected, actual);
+        assertInstallSnapshot(emptySize, new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6, OptionalInt.empty(),
+            Optional.empty(), raftVersion));
+    }
 
-        expected = new InstallSnapshot(3L, "leaderId", 11L, 2L, data, 5, 6);
-        actual = (InstallSnapshot) SerializationUtils.clone((Serializable) expected.toSerializable(
-                RaftVersions.CURRENT_VERSION));
-        verifyInstallSnapshot(expected, actual);
+    private static void assertInstallSnapshot(final int expectedSize, final InstallSnapshot expected) {
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(expectedSize, bytes.length);
+        verifyInstallSnapshot(expected, (InstallSnapshot) SerializationUtils.deserialize(bytes));
     }
 
-    private static void verifyInstallSnapshot(InstallSnapshot expected, InstallSnapshot actual) {
+    private static void verifyInstallSnapshot(final InstallSnapshot expected, final InstallSnapshot actual) {
         assertEquals("getTerm", expected.getTerm(), actual.getTerm());
         assertEquals("getChunkIndex", expected.getChunkIndex(), actual.getChunkIndex());
         assertEquals("getTotalChunks", expected.getTotalChunks(), actual.getTotalChunks());
@@ -66,15 +72,15 @@ public class InstallSnapshotTest {
         assertEquals("getLastChunkHashCode present", expected.getLastChunkHashCode().isPresent(),
                 actual.getLastChunkHashCode().isPresent());
         if (expected.getLastChunkHashCode().isPresent()) {
-            assertEquals("getLastChunkHashCode", expected.getLastChunkHashCode().get(),
-                    actual.getLastChunkHashCode().get());
+            assertEquals("getLastChunkHashCode", expected.getLastChunkHashCode(),
+                    actual.getLastChunkHashCode());
         }
 
         assertEquals("getServerConfig present", expected.getServerConfig().isPresent(),
                 actual.getServerConfig().isPresent());
         if (expected.getServerConfig().isPresent()) {
-            assertEquals("getServerConfig", expected.getServerConfig().get().getServerConfig(),
-                    actual.getServerConfig().get().getServerConfig());
+            assertEquals("getServerConfig", expected.getServerConfig().orElseThrow().getServerConfig(),
+                    actual.getServerConfig().orElseThrow().getServerConfig());
         }
     }
 }
index fa1bb5f15277d97cab4268bb3da6c0ac92e90feb..51488a362ce411e4cb676667efe9d63b6e1b0ae2 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class RequestVoteReplyTest {
-
     @Test
     public void testSerialization() {
-        RequestVoteReply expected = new RequestVoteReply(5, true);
-        RequestVoteReply cloned = (RequestVoteReply) SerializationUtils.clone(expected);
+        final var expected = new RequestVoteReply(5, true);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(78, bytes.length);
+        final var cloned = (RequestVoteReply) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("isVoteGranted", expected.isVoteGranted(), cloned.isVoteGranted());
index 6cb9179dedd3f4af8940dfcb1f6c615a89456700..c3227be60c7955c7336b99b22b7f3ef4f6762949 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.messages;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class RequestVoteTest {
-
     @Test
     public void testSerialization() {
-        RequestVote expected = new RequestVote(4, "candidateId", 3, 2);
-        RequestVote cloned = (RequestVote) SerializationUtils.clone(expected);
+        final var expected = new RequestVote(4, "candidateId", 3, 2);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(97, bytes.length);
+        final var cloned = (RequestVote) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getCandidateId", expected.getCandidateId(), cloned.getCandidateId());
index b7f152574bcbb478bf7951b1ef8f64aea12198db..c762c1e2d07c3cb0126232df9b26582db7318f70 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ApplyJournalEntriesTest {
-
     @Test
     public void testSerialization() {
-        ApplyJournalEntries expected = new ApplyJournalEntries(5);
-        ApplyJournalEntries cloned = (ApplyJournalEntries) SerializationUtils.clone(expected);
+        final var expected = new ApplyJournalEntries(5);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(80, bytes.length);
+        final var cloned = (ApplyJournalEntries) SerializationUtils.deserialize(bytes);
 
         assertEquals("getFromIndex", expected.getToIndex(), cloned.getToIndex());
     }
index 8334296ead188fd2263e855aeaeb164f36af839b..73fb02f2bcd3b542fb8e2154a956a190512b39f3 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class DeleteEntriesTest {
-
     @Test
     public void testSerialization() {
-        DeleteEntries expected = new DeleteEntries(5);
-        DeleteEntries cloned = (DeleteEntries) SerializationUtils.clone(expected);
+        final var expected = new DeleteEntries(5);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(79, bytes.length);
+        final var cloned = (DeleteEntries) SerializationUtils.deserialize(bytes);
 
         assertEquals("getFromIndex", expected.getFromIndex(), cloned.getFromIndex());
     }
index 963580cde4b04828cbe08b90bc70cebbaf4fd4ba..18fa2d7719b64a41d221aeabc8e2a76fa7f7987e 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.raft.persisted;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -19,10 +20,11 @@ import org.junit.Test;
  *
  */
 public class EmptyStateTest {
-
     @Test
     public void testSerialization() {
-        EmptyState cloned = (EmptyState) SerializationUtils.clone(EmptyState.INSTANCE);
+        final var bytes = SerializationUtils.serialize(EmptyState.INSTANCE);
+        assertEquals(82, bytes.length);
+        final var cloned = SerializationUtils.deserialize(bytes);
         assertSame("cloned", EmptyState.INSTANCE, cloned);
     }
 }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/persisted/NoopPayloadTest.java
new file mode 100644 (file)
index 0000000..bf2e8fa
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.persisted;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Test;
+
+public class NoopPayloadTest {
+    @Test
+    public void testSerialization() {
+        final var bytes = SerializationUtils.serialize(NoopPayload.INSTANCE);
+        assertEquals(74, bytes.length);
+        assertSame(NoopPayload.INSTANCE, SerializationUtils.deserialize(bytes));
+    }
+}
index aa2fe90884540760fcd0dbd99b3f754dfa3f9412..d686e946e72064771390ce96021afa3bc632259e 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.raft.persisted;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.util.Arrays;
-import org.apache.commons.lang.SerializationUtils;
+import java.util.List;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -20,19 +20,21 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class ServerConfigurationPayloadTest {
-
     @Test
     public void testSerialization() {
-        ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true),
-                new ServerInfo("2", false)));
-        ServerConfigurationPayload cloned = (ServerConfigurationPayload) SerializationUtils.clone(expected);
+        final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true),
+            new ServerInfo("2", false)));
+
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(125, bytes.length);
+        final var cloned = (ServerConfigurationPayload) SerializationUtils.deserialize(bytes);
 
         assertEquals("getServerConfig", expected.getServerConfig(), cloned.getServerConfig());
     }
 
     @Test
     public void testSize() {
-        ServerConfigurationPayload expected = new ServerConfigurationPayload(Arrays.asList(new ServerInfo("1", true)));
+        final var expected = new ServerConfigurationPayload(List.of(new ServerInfo("1", true)));
         assertTrue(expected.size() > 0);
     }
 }
index ec4a3689b2a4a109bb2c28d81aecbab214fe225f..919aaba4cd6a4d10c50fab4962bdd83d8f2e8055 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
 
@@ -19,12 +19,12 @@ import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
  * @author Thomas Pantelis
  */
 public class SimpleReplicatedLogEntryTest {
-
     @Test
     public void testSerialization() {
-        SimpleReplicatedLogEntry expected = new SimpleReplicatedLogEntry(0, 1,
-                new MockRaftActorContext.MockPayload("A"));
-        SimpleReplicatedLogEntry cloned = (SimpleReplicatedLogEntry) SerializationUtils.clone(expected);
+        final var expected = new SimpleReplicatedLogEntry(0, 1, new MockRaftActorContext.MockPayload("A"));
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(218, bytes.length);
+        final var cloned = (SimpleReplicatedLogEntry) SerializationUtils.deserialize(bytes);
 
         assertEquals("getTerm", expected.getTerm(), cloned.getTerm());
         assertEquals("getIndex", expected.getIndex(), cloned.getIndex());
index 9f1f924252da9a9fd624973944f47ac42bb573b7..3223e482d6e75eb226a95af5049851a8e0e585ad 100644 (file)
@@ -9,10 +9,8 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext.MockPayload;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
@@ -23,27 +21,29 @@ import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
  * @author Thomas Pantelis
  */
 public class SnapshotTest {
-
     @Test
     public void testSerialization() {
-        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, Arrays.asList(
-                new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))));
-        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, Collections.emptyList());
+        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7}, List.of(
+                new SimpleReplicatedLogEntry(6, 2, new MockPayload("payload"))), 491);
+        testSerialization(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9}, List.of(), 345);
     }
 
-    private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied) {
+    private static void testSerialization(final byte[] state, final List<ReplicatedLogEntry> unapplied,
+            final int expectedSize) {
         long lastIndex = 6;
         long lastTerm = 2;
         long lastAppliedIndex = 5;
         long lastAppliedTerm = 1;
         long electionTerm = 3;
         String electionVotedFor = "member-1";
-        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(Arrays.asList(
+        ServerConfigurationPayload serverConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("1", true), new ServerInfo("2", false)));
 
-        Snapshot expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
+        final var expected = Snapshot.create(ByteState.of(state), unapplied, lastIndex, lastTerm, lastAppliedIndex,
                 lastAppliedTerm, electionTerm, electionVotedFor, serverConfig);
-        Snapshot cloned = (Snapshot) SerializationUtils.clone(expected);
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(expectedSize, bytes.length);
+        final var cloned = (Snapshot) SerializationUtils.deserialize(bytes);
 
         assertEquals("lastIndex", expected.getLastIndex(), cloned.getLastIndex());
         assertEquals("lastTerm", expected.getLastTerm(), cloned.getLastTerm());
index de95125966405f1fb3af8d4357cae6218afb2032..75e32783b9b37652c217cbadd67d304bf82efaf0 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.raft.persisted;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -18,11 +18,12 @@ import org.junit.Test;
  * @author Thomas Pantelis
  */
 public class UpdateElectionTermTest {
-
     @Test
     public void testSerialization() {
-        UpdateElectionTerm expected = new UpdateElectionTerm(5, "leader");
-        UpdateElectionTerm cloned = (UpdateElectionTerm) SerializationUtils.clone(expected);
+        final var expected = new UpdateElectionTerm(5, "leader");
+        final var bytes = SerializationUtils.serialize(expected);
+        assertEquals(88, bytes.length);
+        final var cloned = (UpdateElectionTerm) SerializationUtils.deserialize(bytes);
 
         assertEquals("getCurrentTerm", expected.getCurrentTerm(), cloned.getCurrentTerm());
         assertEquals("getVotedFor", expected.getVotedFor(), cloned.getVotedFor());
index 90aff5b72616e9d85abfb3786f28aa5f6345bcbe..21bf4bfa57acc13c7572a974ce5dc84963152bbb 100644 (file)
@@ -24,10 +24,12 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import scala.Option;
 import scala.concurrent.Future;
+import scala.jdk.javaapi.CollectionConverters;
 
 /**
  * An akka AsyncWriteJournal implementation that stores data in memory. This is intended for testing.
@@ -40,8 +42,8 @@ public class InMemoryJournal extends AsyncWriteJournal {
         final CountDownLatch latch;
         final Class<?> ofType;
 
-        WriteMessagesComplete(int count, Class<?> ofType) {
-            this.latch = new CountDownLatch(count);
+        WriteMessagesComplete(final int count, final Class<?> ofType) {
+            latch = new CountDownLatch(count);
             this.ofType = ofType;
         }
     }
@@ -56,11 +58,11 @@ public class InMemoryJournal extends AsyncWriteJournal {
 
     private static final Map<String, CountDownLatch> BLOCK_READ_MESSAGES_LATCHES = new ConcurrentHashMap<>();
 
-    private static Object deserialize(Object data) {
+    private static Object deserialize(final Object data) {
         return data instanceof byte[] ? SerializationUtils.deserialize((byte[])data) : data;
     }
 
-    public static void addEntry(String persistenceId, long sequenceNr, Object data) {
+    public static void addEntry(final String persistenceId, final long sequenceNr, final Object data) {
         Map<Long, Object> journal = JOURNALS.computeIfAbsent(persistenceId, k -> new LinkedHashMap<>());
 
         synchronized (journal) {
@@ -77,7 +79,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
     }
 
     @SuppressWarnings("unchecked")
-    public static <T> List<T> get(String persistenceId, Class<T> type) {
+    public static <T> List<T> get(final String persistenceId, final Class<T> type) {
         Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
         if (journalMap == null) {
             return Collections.<T>emptyList();
@@ -96,12 +98,12 @@ public class InMemoryJournal extends AsyncWriteJournal {
         }
     }
 
-    public static Map<Long, Object> get(String persistenceId) {
+    public static Map<Long, Object> get(final String persistenceId) {
         Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
         return journalMap != null ? journalMap : Collections.<Long, Object>emptyMap();
     }
 
-    public static void dumpJournal(String persistenceId) {
+    public static void dumpJournal(final String persistenceId) {
         StringBuilder builder = new StringBuilder(String.format("Journal log for %s:", persistenceId));
         Map<Long, Object> journalMap = JOURNALS.get(persistenceId);
         if (journalMap != null) {
@@ -115,33 +117,34 @@ public class InMemoryJournal extends AsyncWriteJournal {
         LOG.info(builder.toString());
     }
 
-    public static void waitForDeleteMessagesComplete(String persistenceId) {
+    public static void waitForDeleteMessagesComplete(final String persistenceId) {
         if (!Uninterruptibles.awaitUninterruptibly(DELETE_MESSAGES_COMPLETE_LATCHES.get(persistenceId),
                 5, TimeUnit.SECONDS)) {
             throw new AssertionError("Delete messages did not complete");
         }
     }
 
-    public static void waitForWriteMessagesComplete(String persistenceId) {
+    public static void waitForWriteMessagesComplete(final String persistenceId) {
         if (!Uninterruptibles.awaitUninterruptibly(WRITE_MESSAGES_COMPLETE.get(persistenceId).latch,
                 5, TimeUnit.SECONDS)) {
             throw new AssertionError("Journal write messages did not complete");
         }
     }
 
-    public static void addDeleteMessagesCompleteLatch(String persistenceId) {
+    public static void addDeleteMessagesCompleteLatch(final String persistenceId) {
         DELETE_MESSAGES_COMPLETE_LATCHES.put(persistenceId, new CountDownLatch(1));
     }
 
-    public static void addWriteMessagesCompleteLatch(String persistenceId, int count) {
+    public static void addWriteMessagesCompleteLatch(final String persistenceId, final int count) {
         WRITE_MESSAGES_COMPLETE.put(persistenceId, new WriteMessagesComplete(count, null));
     }
 
-    public static void addWriteMessagesCompleteLatch(String persistenceId, int count, Class<?> ofType) {
+    public static void addWriteMessagesCompleteLatch(final String persistenceId, final int count,
+            final Class<?> ofType) {
         WRITE_MESSAGES_COMPLETE.put(persistenceId, new WriteMessagesComplete(count, ofType));
     }
 
-    public static void addBlockReadMessagesLatch(String persistenceId, CountDownLatch latch) {
+    public static void addBlockReadMessagesLatch(final String persistenceId, final CountDownLatch latch) {
         BLOCK_READ_MESSAGES_LATCHES.put(persistenceId, latch);
     }
 
@@ -167,7 +170,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
                     if (++count <= max && entry.getKey() >= fromSequenceNr && entry.getKey() <= toSequenceNr) {
                         PersistentRepr persistentMessage =
                                 new PersistentImpl(deserialize(entry.getValue()), entry.getKey(), persistenceId,
-                                        null, false, null, null);
+                                        null, false, null, null, 0, Option.empty());
                         replayCallback.accept(persistentMessage);
                     }
                 }
@@ -178,7 +181,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
     }
 
     @Override
-    public Future<Long> doAsyncReadHighestSequenceNr(String persistenceId, long fromSequenceNr) {
+    public Future<Long> doAsyncReadHighestSequenceNr(final String persistenceId, final long fromSequenceNr) {
         LOG.trace("doAsyncReadHighestSequenceNr for {}: fromSequenceNr: {}", persistenceId, fromSequenceNr);
 
         // Akka calls this during recovery.
@@ -203,10 +206,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
     public Future<Iterable<Optional<Exception>>> doAsyncWriteMessages(final Iterable<AtomicWrite> messages) {
         return Futures.future(() -> {
             for (AtomicWrite write : messages) {
-                // Copy to array - workaround for eclipse "ambiguous method" errors for toIterator, toIterable etc
-                PersistentRepr[] array = new PersistentRepr[write.payload().size()];
-                write.payload().copyToArray(array);
-                for (PersistentRepr repr: array) {
+                for (PersistentRepr repr : CollectionConverters.asJava(write.payload())) {
                     LOG.trace("doAsyncWriteMessages: id: {}: seqNr: {}, payload: {}", repr.persistenceId(),
                         repr.sequenceNr(), repr.payload());
 
@@ -226,7 +226,7 @@ public class InMemoryJournal extends AsyncWriteJournal {
     }
 
     @Override
-    public Future<Void> doAsyncDeleteMessagesTo(String persistenceId, long toSequenceNr) {
+    public Future<Void> doAsyncDeleteMessagesTo(final String persistenceId, final long toSequenceNr) {
         LOG.trace("doAsyncDeleteMessagesTo: {}", toSequenceNr);
         Map<Long, Object> journal = JOURNALS.get(persistenceId);
         if (journal != null) {
index 4c3ad09d5172a6b7fe0203cc8ea193ee2da2c11f..662a063788fd522412d778eff71000bfcdaa9507 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.raft.utils;
 
 import akka.dispatch.Futures;
@@ -13,7 +12,6 @@ import akka.persistence.SelectedSnapshot;
 import akka.persistence.SnapshotMetadata;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.snapshot.japi.SnapshotStore;
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -59,7 +57,7 @@ public class InMemorySnapshotStore extends SnapshotStore {
 
         List<T> retList;
         synchronized (stored) {
-            retList = Lists.newArrayListWithCapacity(stored.size());
+            retList = new ArrayList<>(stored.size());
             for (StoredSnapshot s: stored) {
                 if (type.isInstance(s.data)) {
                     retList.add((T) s.data);
@@ -70,6 +68,10 @@ public class InMemorySnapshotStore extends SnapshotStore {
         return retList;
     }
 
+    public static void clearSnapshotsFor(final String persistenceId) {
+        snapshots.remove(persistenceId);
+    }
+
     public static void clear() {
         snapshots.clear();
     }
index b997326238b91e76c0cd2e6b19c60f14dd889de0..d7ae0cb29fd2cee152430b001e49f1b6dcb82d9a 100644 (file)
@@ -6,6 +6,8 @@ akka {
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
     actor {
+        provider = "akka.cluster.ClusterActorRefProvider"
+
         # enable to test serialization only.
         serialize-messages = off
 
index ba6b7edb6a5daa418aec6c9863ae007c76b75f2f..243f3b0bfc38d72eb3980b4608041d6b5dfcf45c 100644 (file)
@@ -12,94 +12,71 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
     <artifactId>sal-akka-segmented-journal</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
-
-        <!-- Akka -->
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-actor_2.12</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-persistence_2.12</artifactId>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-slf4j_2.12</artifactId>
-            <scope>test</scope>
+            <groupId>io.dropwizard.metrics</groupId>
+            <artifactId>metrics-core</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.12</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-persistence-tck_2.12</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>atomix-storage</artifactId>
         </dependency>
-
-        <!-- Codahale -->
         <dependency>
-            <groupId>io.dropwizard.metrics</groupId>
-            <artifactId>metrics-core</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>repackaged-akka</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
         </dependency>
-
-        <!-- Scala -->
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
         </dependency>
 
-        <!-- Atomix -->
         <dependency>
-            <groupId>io.atomix</groupId>
-            <artifactId>atomix-storage</artifactId>
-            <version>3.1.5</version>
-            <scope>provided</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
         <dependency>
-            <groupId>io.atomix</groupId>
-            <artifactId>atomix-utils</artifactId>
-            <version>3.1.5</version>
-            <scope>provided</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-persistence-tck_2.13</artifactId>
         </dependency>
-
         <dependency>
             <groupId>commons-io</groupId>
             <artifactId>commons-io</artifactId>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.scalatestplus</groupId>
+            <artifactId>junit-4-13_2.13</artifactId>
+            <scope>test</scope>
+        </dependency>
     </dependencies>
 
-    <build>
-        <plugins>
-            <plugin>
-                <groupId>org.apache.felix</groupId>
-                <artifactId>maven-bundle-plugin</artifactId>
-                <extensions>true</extensions>
-                <configuration>
-                    <instructions>
-                        <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
-                        <!-- atomix.io is using an older Guava, so let's embed it to prevent duplicates -->
-                        <Embed-Dependency>*;inline=true;groupId=io.atomix</Embed-Dependency>
-                    </instructions>
-                </configuration>
-            </plugin>
-        </plugins>
-    </build>
-
     <scm>
         <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
         <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
         <tag>HEAD</tag>
         <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
     </scm>
-
 </project>
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournal.java
new file mode 100644 (file)
index 0000000..b89ebf4
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static java.util.Objects.requireNonNull;
+
+import com.codahale.metrics.Histogram;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
+
+/**
+ * Abstraction of a data journal. This provides a unified interface towards {@link SegmentedJournalActor}, allowing
+ * specialization for various formats.
+ */
+abstract class DataJournal {
+    // Mirrors fields from associated actor
+    final @NonNull String persistenceId;
+    private final Histogram messageSize;
+
+    // Tracks largest message size we have observed either during recovery or during write
+    private int largestObservedSize;
+
+    DataJournal(final String persistenceId, final Histogram messageSize) {
+        this.persistenceId = requireNonNull(persistenceId);
+        this.messageSize = requireNonNull(messageSize);
+    }
+
+    final void recordMessageSize(final int size) {
+        messageSize.update(size);
+        updateLargestSize(size);
+    }
+
+    final void updateLargestSize(final int size) {
+        if (size > largestObservedSize) {
+            largestObservedSize = size;
+        }
+    }
+
+    /**
+     * Return the last sequence number completely written to the journal.
+     *
+     * @return Last written sequence number, {@code -1} if there are no in the journal.
+     */
+    abstract long lastWrittenSequenceNr();
+
+    /**
+     * Delete all messages up to specified sequence number.
+     *
+     * @param sequenceNr Sequence number to delete to.
+     */
+    abstract void deleteTo(long sequenceNr);
+
+    /**
+     * Delete all messages up to specified sequence number.
+     *
+     * @param sequenceNr Sequence number to compact to.
+     */
+    abstract void compactTo(long sequenceNr);
+
+    /**
+     * Close this journal, freeing up resources associated with it.
+     */
+    abstract void close();
+
+    /**
+     * Handle a request to replay messages.
+     *
+     * @param message Request message
+     * @param fromSequenceNr Sequence number to replay from, adjusted for deletions
+     */
+    abstract void handleReplayMessages(@NonNull ReplayMessages message, long fromSequenceNr);
+
+    /**
+     * Handle a request to store some messages.
+     *
+     * @param message {@link WriteMessages} message
+     * @return a {@link WrittenMessages} object
+     */
+    abstract @NonNull WrittenMessages handleWriteMessages(@NonNull WriteMessages message);
+
+    /**
+     * Flush all messages to durable storage.
+     */
+    abstract void flush();
+}
index 0713c0212a627eae426b9e551e08b5f8b0696e1c..fdd0b80d03c7a8ae24afac26b655e092a3afb1c4 100644 (file)
@@ -10,16 +10,16 @@ package org.opendaylight.controller.akka.segjournal;
 import static java.util.Objects.requireNonNull;
 
 import akka.persistence.PersistentRepr;
-import io.atomix.storage.journal.JournalSegment;
 
 /**
  * A single entry in the data journal. We do not store {@code persistenceId} for each entry, as that is a
- * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by {@link JournalSegment}'s
- * index.
- *
- * @author Robert Varga
+ * journal-invariant, nor do we store {@code sequenceNr}, as that information is maintained by a particular journal
+ * segment's index.
  */
-abstract class DataJournalEntry {
+abstract sealed class DataJournalEntry {
+    /**
+     * A single data journal entry on its way to the backing file.
+     */
     static final class ToPersistence extends DataJournalEntry {
         private final PersistentRepr repr;
 
@@ -32,6 +32,9 @@ abstract class DataJournalEntry {
         }
     }
 
+    /**
+     * A single data journal entry on its way from the backing file.
+     */
     static final class FromPersistence extends DataJournalEntry {
         private final String manifest;
         private final String writerUuid;
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerdes.java
new file mode 100644 (file)
index 0000000..e0d7be1
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorSystem;
+import akka.actor.ExtendedActorSystem;
+import akka.persistence.PersistentRepr;
+import akka.serialization.JavaSerializer;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+
+/**
+ * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
+ * a nested JavaSerializer to handle the payload.
+ *
+ * <p>
+ * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
+ * {@link #write(EntryOutput, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
+ * around a {@link PersistentRepr}, while {@link #read(EntryInput)} produces an {@link FromPersistence}, which
+ * needs further processing to reconstruct a {@link PersistentRepr}.
+ */
+final class DataJournalEntrySerdes implements EntrySerdes<DataJournalEntry> {
+    private final ExtendedActorSystem actorSystem;
+
+    DataJournalEntrySerdes(final ActorSystem actorSystem) {
+        this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
+    }
+
+    @Override
+    public void write(final EntryOutput output, final DataJournalEntry entry) throws IOException {
+        if (entry instanceof ToPersistence toPersistence) {
+            final var repr = toPersistence.repr();
+            output.writeString(repr.manifest());
+            output.writeString(repr.writerUuid());
+            output.writeObject(repr.payload());
+        } else {
+            throw new VerifyException("Unexpected entry " + entry);
+        }
+    }
+
+    @Override
+    public DataJournalEntry read(final EntryInput input) throws IOException {
+        return new FromPersistence(input.readString(), input.readString(),
+            JavaSerializer.currentSystem().withValue(actorSystem, (Callable<Object>) input::readObject));
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalEntrySerializer.java
deleted file mode 100644 (file)
index e248262..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2019 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.akka.segjournal;
-
-import static com.google.common.base.Verify.verify;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSystem;
-import akka.actor.ExtendedActorSystem;
-import akka.persistence.PersistentRepr;
-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.Serializer;
-import com.esotericsoftware.kryo.io.Input;
-import com.esotericsoftware.kryo.io.Output;
-import com.esotericsoftware.kryo.serializers.JavaSerializer;
-import java.util.concurrent.Callable;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
-
-/**
- * Kryo serializer for {@link DataJournalEntry}. Each {@link SegmentedJournalActor} has its own instance, as well as
- * a nested JavaSerializer to handle the payload.
- *
- * <p>
- * Since we are persisting only parts of {@link PersistentRepr}, this class asymmetric by design:
- * {@link #write(Kryo, Output, DataJournalEntry)} only accepts {@link ToPersistence} subclass, which is a wrapper
- * around a {@link PersistentRepr}, while {@link #read(Kryo, Input, Class)} produces an {@link FromPersistence}, which
- * needs further processing to reconstruct a {@link PersistentRepr}.
- *
- * @author Robert Varga
- */
-final class DataJournalEntrySerializer extends Serializer<DataJournalEntry> {
-    private final JavaSerializer serializer = new JavaSerializer();
-    private final ExtendedActorSystem actorSystem;
-
-    DataJournalEntrySerializer(final ActorSystem actorSystem) {
-        this.actorSystem = requireNonNull((ExtendedActorSystem) actorSystem);
-    }
-
-    @Override
-    public void write(final Kryo kryo, final Output output, final DataJournalEntry object) {
-        verify(object instanceof ToPersistence);
-        final PersistentRepr repr = ((ToPersistence) object).repr();
-        output.writeString(repr.manifest());
-        output.writeString(repr.writerUuid());
-        serializer.write(kryo, output, repr.payload());
-    }
-
-    @Override
-    public DataJournalEntry read(final Kryo kryo, final Input input, final Class<DataJournalEntry> type) {
-        final String manifest = input.readString();
-        final String uuid = input.readString();
-        final Object payload = akka.serialization.JavaSerializer.currentSystem().withValue(actorSystem,
-            (Callable<Object>)() -> serializer.read(kryo, input, type));
-        return new FromPersistence(manifest, uuid, payload);
-    }
-}
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/DataJournalV0.java
new file mode 100644 (file)
index 0000000..243a064
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2019, 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import akka.actor.ActorSystem;
+import akka.persistence.PersistentRepr;
+import com.codahale.metrics.Histogram;
+import com.google.common.base.VerifyException;
+import io.atomix.storage.journal.JournalReader;
+import io.atomix.storage.journal.JournalSerdes;
+import io.atomix.storage.journal.JournalWriter;
+import io.atomix.storage.journal.SegmentedJournal;
+import io.atomix.storage.journal.StorageLevel;
+import java.io.File;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
+import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.ReplayMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WrittenMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.jdk.javaapi.CollectionConverters;
+
+/**
+ * Version 0 data journal, where every journal entry maps to exactly one segmented file entry.
+ */
+final class DataJournalV0 extends DataJournal {
+    private static final Logger LOG = LoggerFactory.getLogger(DataJournalV0.class);
+
+    private final SegmentedJournal<DataJournalEntry> entries;
+
+    DataJournalV0(final String persistenceId, final Histogram messageSize, final ActorSystem system,
+            final StorageLevel storage, final File directory, final int maxEntrySize, final int maxSegmentSize) {
+        super(persistenceId, messageSize);
+        entries = SegmentedJournal.<DataJournalEntry>builder()
+                .withStorageLevel(storage).withDirectory(directory).withName("data")
+                .withNamespace(JournalSerdes.builder()
+                    .register(new DataJournalEntrySerdes(system), FromPersistence.class, ToPersistence.class)
+                    .build())
+                .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
+                .build();
+    }
+
+    @Override
+    long lastWrittenSequenceNr() {
+        return entries.writer().getLastIndex();
+    }
+
+    @Override
+    void deleteTo(final long sequenceNr) {
+        entries.writer().commit(sequenceNr);
+    }
+
+    @Override
+    void compactTo(final long sequenceNr) {
+        entries.compact(sequenceNr + 1);
+    }
+
+    @Override
+    void close() {
+        flush();
+        entries.close();
+    }
+
+    @Override
+    void flush() {
+        entries.writer().flush();
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:illegalCatch")
+    void handleReplayMessages(final ReplayMessages message, final long fromSequenceNr) {
+        try (var reader = entries.openReader(fromSequenceNr)) {
+            handleReplayMessages(reader, message);
+        } catch (Exception e) {
+            LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
+            message.promise.failure(e);
+        } finally {
+            message.promise.success(null);
+        }
+    }
+
+    private void handleReplayMessages(final JournalReader<DataJournalEntry> reader, final ReplayMessages message) {
+        int count = 0;
+        while (count < message.max) {
+            final var next = reader.tryNext();
+            if (next == null || next.index() > message.toSequenceNr) {
+                break;
+            }
+
+            LOG.trace("{}: replay {}", persistenceId, next);
+            updateLargestSize(next.size());
+            final var entry = next.entry();
+            if (entry instanceof FromPersistence fromPersistence) {
+                final var repr = fromPersistence.toRepr(persistenceId, next.index());
+                LOG.debug("{}: replaying {}", persistenceId, repr);
+                message.replayCallback.accept(repr);
+                count++;
+            } else {
+                throw new VerifyException("Unexpected entry " + entry);
+            }
+        }
+        LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:illegalCatch")
+    WrittenMessages handleWriteMessages(final WriteMessages message) {
+        final int count = message.size();
+        final var responses = new ArrayList<>();
+        final var writer = entries.writer();
+        long writtenBytes = 0;
+
+        for (int i = 0; i < count; ++i) {
+            final long mark = writer.getLastIndex();
+            final var request = message.getRequest(i);
+
+            final var reprs = CollectionConverters.asJava(request.payload());
+            LOG.trace("{}: append {}/{}: {} items at mark {}", persistenceId, i, count, reprs.size(), mark);
+            try {
+                writtenBytes += writePayload(writer, reprs);
+            } catch (Exception e) {
+                LOG.warn("{}: failed to write out request {}/{} reverting to {}", persistenceId, i, count, mark, e);
+                responses.add(e);
+                writer.truncate(mark);
+                continue;
+            }
+            responses.add(null);
+        }
+
+        return new WrittenMessages(message, responses, writtenBytes);
+    }
+
+    private long writePayload(final JournalWriter<DataJournalEntry> writer, final List<PersistentRepr> reprs) {
+        long bytes = 0;
+        for (var repr : reprs) {
+            final Object payload = repr.payload();
+            if (!(payload instanceof Serializable)) {
+                throw new UnsupportedOperationException("Non-serializable payload encountered "
+                        + payload.getClass());
+            }
+
+            LOG.trace("{}: starting append of {}", persistenceId, payload);
+            final var entry = writer.append(new ToPersistence(repr));
+            final int size = entry.size();
+            LOG.trace("{}: finished append of {} with {} bytes at {}", persistenceId, payload, size, entry.index());
+            recordMessageSize(size);
+            bytes += size;
+        }
+        return bytes;
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/main/java/org/opendaylight/controller/akka/segjournal/LongEntrySerdes.java
new file mode 100644 (file)
index 0000000..eebf95f
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2023 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import io.atomix.storage.journal.JournalSerdes.EntryInput;
+import io.atomix.storage.journal.JournalSerdes.EntryOutput;
+import io.atomix.storage.journal.JournalSerdes.EntrySerdes;
+import java.io.IOException;
+
+enum LongEntrySerdes implements EntrySerdes<Long> {
+    LONG_ENTRY_SERDES {
+        @Override
+        public Long read(final EntryInput input) throws IOException {
+            return input.readLong();
+        }
+
+        @Override
+        public void write(final EntryOutput output, final Long entry) throws IOException {
+            output.writeLong(entry);
+        }
+    }
+}
index 1dfcf4aef68021e87c859d0390b5436f9b0d9257..b9320998c95f28b7fcbd8eb170bf1842d3b83a47 100644 (file)
@@ -17,15 +17,13 @@ import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import akka.persistence.journal.japi.AsyncWriteJournal;
 import com.typesafe.config.Config;
-import com.typesafe.config.ConfigMemorySize;
-import io.atomix.storage.StorageLevel;
 import io.atomix.storage.journal.SegmentedJournal;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
+import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
-import java.util.Base64;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.function.Consumer;
@@ -39,8 +37,6 @@ import scala.concurrent.Future;
  * An Akka persistence journal implementation on top of {@link SegmentedJournal}. This actor represents aggregation
  * of multiple journals and performs a receptionist job between Akka and invidual per-persistenceId actors. See
  * {@link SegmentedJournalActor} for details on how the persistence works.
- *
- * @author Robert Varga
  */
 public class SegmentedFileJournal extends AsyncWriteJournal {
     public static final String STORAGE_ROOT_DIRECTORY = "root-directory";
@@ -48,6 +44,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     public static final int STORAGE_MAX_ENTRY_SIZE_DEFAULT = 16 * 1024 * 1024;
     public static final String STORAGE_MAX_SEGMENT_SIZE = "max-segment-size";
     public static final int STORAGE_MAX_SEGMENT_SIZE_DEFAULT = STORAGE_MAX_ENTRY_SIZE_DEFAULT * 8;
+    public static final String STORAGE_MAX_UNFLUSHED_BYTES = "max-unflushed-bytes";
     public static final String STORAGE_MEMORY_MAPPED = "memory-mapped";
 
     private static final Logger LOG = LoggerFactory.getLogger(SegmentedFileJournal.class);
@@ -57,6 +54,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     private final StorageLevel storage;
     private final int maxEntrySize;
     private final int maxSegmentSize;
+    private final int maxUnflushedBytes;
 
     public SegmentedFileJournal(final Config config) {
         rootDir = new File(config.getString(STORAGE_ROOT_DIRECTORY));
@@ -68,6 +66,7 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
 
         maxEntrySize = getBytes(config, STORAGE_MAX_ENTRY_SIZE, STORAGE_MAX_ENTRY_SIZE_DEFAULT);
         maxSegmentSize = getBytes(config, STORAGE_MAX_SEGMENT_SIZE, STORAGE_MAX_SEGMENT_SIZE_DEFAULT);
+        maxUnflushedBytes = getBytes(config, STORAGE_MAX_UNFLUSHED_BYTES, maxEntrySize);
 
         if (config.hasPath(STORAGE_MEMORY_MAPPED)) {
             storage = config.getBoolean(STORAGE_MEMORY_MAPPED) ? StorageLevel.MAPPED : StorageLevel.DISK;
@@ -80,12 +79,12 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
 
     @Override
     public Future<Iterable<Optional<Exception>>> doAsyncWriteMessages(final Iterable<AtomicWrite> messages) {
-        final Map<ActorRef, WriteMessages> map = new HashMap<>();
-        final List<Future<Optional<Exception>>> result = new ArrayList<>();
+        final var map = new HashMap<ActorRef, WriteMessages>();
+        final var result = new ArrayList<Future<Optional<Exception>>>();
 
-        for (AtomicWrite message : messages) {
-            final String persistenceId = message.persistenceId();
-            final ActorRef handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
+        for (var message : messages) {
+            final var persistenceId = message.persistenceId();
+            final var handler = handlers.computeIfAbsent(persistenceId, this::createHandler);
             result.add(map.computeIfAbsent(handler, key -> new WriteMessages()).add(message));
         }
 
@@ -116,19 +115,18 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
     }
 
     private ActorRef createHandler(final String persistenceId) {
-        final String directoryName = Base64.getUrlEncoder().encodeToString(persistenceId.getBytes(
-            StandardCharsets.UTF_8));
-        final File directory = new File(rootDir, directoryName);
+        final var directoryName = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8);
+        final var directory = new File(rootDir, directoryName);
         LOG.debug("Creating handler for {} in directory {}", persistenceId, directory);
 
-        final ActorRef handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
-            maxEntrySize, maxSegmentSize));
+        final var handler = context().actorOf(SegmentedJournalActor.props(persistenceId, directory, storage,
+            maxEntrySize, maxSegmentSize, maxUnflushedBytes));
         LOG.debug("Directory {} handled by {}", directory, handler);
         return handler;
     }
 
     private <T> Future<T> delegateMessage(final String persistenceId, final AsyncMessage<T> message) {
-        final ActorRef handler = handlers.get(persistenceId);
+        final var handler = handlers.get(persistenceId);
         if (handler == null) {
             return Futures.failed(new IllegalStateException("Cannot find handler for " + persistenceId));
         }
@@ -146,9 +144,8 @@ public class SegmentedFileJournal extends AsyncWriteJournal {
         if (!config.hasPath(path)) {
             return defaultValue;
         }
-        final ConfigMemorySize value = config.getMemorySize(path);
-        final long result = value.toBytes();
-        checkArgument(result <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
-        return (int) result;
+        final long value = config.getBytes(path);
+        checkArgument(value <= Integer.MAX_VALUE, "Size %s exceeds maximum allowed %s", Integer.MAX_VALUE);
+        return (int) value;
     }
 }
index c28a58c8bc254642aeb5286eb09322f9a46d607a..9f63892d26fdfd949b46d0bb9213bd84df2deb80 100644 (file)
@@ -12,33 +12,32 @@ import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.AbstractActor;
+import akka.actor.ActorRef;
 import akka.actor.Props;
+import akka.japi.pf.ReceiveBuilder;
 import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import com.codahale.metrics.Histogram;
 import com.codahale.metrics.Meter;
-import com.codahale.metrics.SlidingTimeWindowReservoir;
+import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.google.common.base.MoreObjects;
-import io.atomix.storage.StorageLevel;
+import com.google.common.base.Stopwatch;
 import io.atomix.storage.journal.Indexed;
+import io.atomix.storage.journal.JournalSerdes;
 import io.atomix.storage.journal.SegmentedJournal;
-import io.atomix.storage.journal.SegmentedJournalReader;
-import io.atomix.storage.journal.SegmentedJournalWriter;
-import io.atomix.utils.serializer.Namespace;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
-import java.io.Serializable;
+import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.FromPersistence;
-import org.opendaylight.controller.akka.segjournal.DataJournalEntry.ToPersistence;
+import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.Iterator;
-import scala.collection.SeqLike;
 import scala.concurrent.Future;
 import scala.concurrent.Promise;
 
@@ -61,11 +60,9 @@ import scala.concurrent.Promise;
  * <p>
  * Split-file approach allows us to treat sequence numbers and indices as equivalent, without maintaining any explicit
  * mapping information. The only additional information we need to maintain is the last deleted sequence number.
- *
- * @author Robert Varga
  */
-final class SegmentedJournalActor extends AbstractActor {
-    abstract static class AsyncMessage<T> {
+abstract sealed class SegmentedJournalActor extends AbstractActor {
+    abstract static sealed class AsyncMessage<T> {
         final Promise<T> promise = Promise.apply();
     }
 
@@ -82,11 +79,11 @@ final class SegmentedJournalActor extends AbstractActor {
         }
     }
 
-    private static final class ReplayMessages extends AsyncMessage<Void> {
+    static final class ReplayMessages extends AsyncMessage<Void> {
         private final long fromSequenceNr;
-        private final long toSequenceNr;
-        private final long max;
-        private final Consumer<PersistentRepr> replayCallback;
+        final long toSequenceNr;
+        final long max;
+        final Consumer<PersistentRepr> replayCallback;
 
         ReplayMessages(final long fromSequenceNr,
                 final long toSequenceNr, final long max, final Consumer<PersistentRepr> replayCallback) {
@@ -108,12 +105,29 @@ final class SegmentedJournalActor extends AbstractActor {
         private final List<Promise<Optional<Exception>>> results = new ArrayList<>();
 
         Future<Optional<Exception>> add(final AtomicWrite write) {
-            final Promise<Optional<Exception>> promise = Promise.apply();
+            final var promise = Promise.<Optional<Exception>>apply();
             requests.add(write);
             results.add(promise);
             return promise.future();
         }
 
+        int size() {
+            return requests.size();
+        }
+
+        AtomicWrite getRequest(final int index) {
+            return requests.get(index);
+        }
+
+        void setFailure(final int index, final Exception cause) {
+            results.get(index).success(Optional.of(cause));
+
+        }
+
+        void setSuccess(final int index) {
+            results.get(index).success(Optional.empty());
+        }
+
         @Override
         public String toString() {
             return MoreObjects.toStringHelper(this).add("requests", requests).toString();
@@ -133,31 +147,164 @@ final class SegmentedJournalActor extends AbstractActor {
         }
     }
 
+    // responses == null on success, Exception on failure
+    record WrittenMessages(WriteMessages message, List<Object> responses, long writtenBytes) {
+        WrittenMessages {
+            verify(responses.size() == message.size(), "Mismatched %s and %s", message, responses);
+            verify(writtenBytes >= 0, "Unexpected length %s", writtenBytes);
+        }
+
+        private void complete() {
+            for (int i = 0, size = responses.size(); i < size; ++i) {
+                if (responses.get(i) instanceof Exception ex) {
+                    message.setFailure(i, ex);
+                } else {
+                    message.setSuccess(i);
+                }
+            }
+        }
+    }
+
+    /**
+     * A {@link SegmentedJournalActor} which delays issuing a flush operation until a watermark is reached or when the
+     * queue is empty.
+     *
+     * <p>
+     * The problem we are addressing is that there is a queue sitting in from of the actor, which we have no direct
+     * access to. Since a flush involves committing data to durable storage, that operation can easily end up dominating
+     * workloads.
+     *
+     * <p>
+     * We solve this by having an additional queue in which we track which messages were written and trigger a flush
+     * only when the number of bytes we have written exceeds specified limit. The other part is that each time this
+     * queue becomes non-empty, we send a dedicated message to self. This acts as a actor queue probe -- when we receive
+     * it, we know we have processed all messages that were in the queue when we first delayed the write.
+     *
+     * <p>
+     * The combination of these mechanisms ensure we use a minimal delay while also ensuring we take advantage of
+     * batching opportunities.
+     */
+    private static final class Delayed extends SegmentedJournalActor {
+        private static final class Flush extends AsyncMessage<Void> {
+            final long batch;
+
+            Flush(final long batch) {
+                this.batch = batch;
+            }
+        }
+
+        private final ArrayDeque<WrittenMessages> unflushedWrites = new ArrayDeque<>();
+        private final Stopwatch unflushedDuration = Stopwatch.createUnstarted();
+        private final long maxUnflushedBytes;
+
+        private long batch = 0;
+        private long unflushedBytes = 0;
+
+        Delayed(final String persistenceId, final File directory, final StorageLevel storage,
+                final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+            super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+            this.maxUnflushedBytes = maxUnflushedBytes;
+        }
+
+        @Override
+        ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+            return super.addMessages(builder).match(Flush.class, this::handleFlush);
+        }
+
+        private void handleFlush(final Flush message) {
+            if (message.batch == batch) {
+                flushWrites();
+            } else {
+                LOG.debug("{}: batch {} not flushed by {}", persistenceId(), batch, message.batch);
+            }
+        }
+
+        @Override
+        void onWrittenMessages(final WrittenMessages message) {
+            boolean first = unflushedWrites.isEmpty();
+            if (first) {
+                unflushedDuration.start();
+            }
+            unflushedWrites.addLast(message);
+            unflushedBytes = unflushedBytes + message.writtenBytes;
+            if (unflushedBytes >= maxUnflushedBytes) {
+                LOG.debug("{}: reached {} unflushed journal bytes", persistenceId(), unflushedBytes);
+                flushWrites();
+            } else if (first) {
+                LOG.debug("{}: deferring journal flush", persistenceId());
+                self().tell(new Flush(++batch), ActorRef.noSender());
+            }
+        }
+
+        @Override
+        void flushWrites() {
+            final var unsyncedSize = unflushedWrites.size();
+            if (unsyncedSize == 0) {
+                // Nothing to flush
+                return;
+            }
+
+            LOG.debug("{}: flushing {} journal writes after {}", persistenceId(), unsyncedSize,
+                unflushedDuration.stop());
+            flushJournal(unflushedBytes, unsyncedSize);
+
+            final var sw = Stopwatch.createStarted();
+            unflushedWrites.forEach(WrittenMessages::complete);
+            unflushedWrites.clear();
+            unflushedBytes = 0;
+            unflushedDuration.reset();
+            LOG.debug("{}: completed {} flushed journal writes in {}", persistenceId(), unsyncedSize, sw);
+        }
+    }
+
+    private static final class Immediate extends SegmentedJournalActor {
+        Immediate(final String persistenceId, final File directory, final StorageLevel storage,
+                final int maxEntrySize, final int maxSegmentSize) {
+            super(persistenceId, directory, storage, maxEntrySize, maxSegmentSize);
+        }
+
+        @Override
+        void onWrittenMessages(final WrittenMessages message) {
+            flushJournal(message.writtenBytes, 1);
+            message.complete();
+        }
+
+        @Override
+        void flushWrites() {
+            // No-op
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(SegmentedJournalActor.class);
-    private static final Namespace DELETE_NAMESPACE = Namespace.builder().register(Long.class).build();
+    private static final JournalSerdes DELETE_NAMESPACE = JournalSerdes.builder()
+        .register(LongEntrySerdes.LONG_ENTRY_SERDES, Long.class)
+        .build();
     private static final int DELETE_SEGMENT_SIZE = 64 * 1024;
 
-    // Tracks the time it took us to write a batch of messages
-    private final Timer batchWriteTime = new Timer();
-    // Tracks the number of individual messages written
-    private final Meter messageWriteCount = new Meter();
-    // Tracks the size distribution of messages for last 5 minutes
-    private final Histogram messageSize = new Histogram(new SlidingTimeWindowReservoir(5, TimeUnit.MINUTES));
-
     private final String persistenceId;
     private final StorageLevel storage;
     private final int maxSegmentSize;
     private final int maxEntrySize;
     private final File directory;
 
-    private SegmentedJournal<DataJournalEntry> dataJournal;
+    // Tracks the time it took us to write a batch of messages
+    private Timer batchWriteTime;
+    // Tracks the number of individual messages written
+    private Meter messageWriteCount;
+    // Tracks the size distribution of messages
+    private Histogram messageSize;
+    // Tracks the number of messages completed for each flush
+    private Histogram flushMessages;
+    // Tracks the number of bytes completed for each flush
+    private Histogram flushBytes;
+    // Tracks the duration of flush operations
+    private Timer flushTime;
+
+    private DataJournal dataJournal;
     private SegmentedJournal<Long> deleteJournal;
     private long lastDelete;
 
-    // Tracks largest message size we have observed either during recovery or during write
-    private int largestObservedSize;
-
-    SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
+    private SegmentedJournalActor(final String persistenceId, final File directory, final StorageLevel storage,
             final int maxEntrySize, final int maxSegmentSize) {
         this.persistenceId = requireNonNull(persistenceId);
         this.directory = requireNonNull(directory);
@@ -167,26 +314,55 @@ final class SegmentedJournalActor extends AbstractActor {
     }
 
     static Props props(final String persistenceId, final File directory, final StorageLevel storage,
-            final int maxEntrySize, final int maxSegmentSize) {
-        return Props.create(SegmentedJournalActor.class, requireNonNull(persistenceId), directory, storage,
-            maxEntrySize, maxSegmentSize);
+            final int maxEntrySize, final int maxSegmentSize, final int maxUnflushedBytes) {
+        final var pid = requireNonNull(persistenceId);
+        return maxUnflushedBytes > 0
+            ? Props.create(Delayed.class, pid, directory, storage, maxEntrySize, maxSegmentSize, maxUnflushedBytes)
+            : Props.create(Immediate.class, pid, directory, storage, maxEntrySize, maxSegmentSize);
+    }
+
+    final String persistenceId() {
+        return persistenceId;
+    }
+
+    final void flushJournal(final long bytes, final int messages) {
+        final var sw = Stopwatch.createStarted();
+        dataJournal.flush();
+        LOG.debug("{}: journal flush completed in {}", persistenceId, sw.stop());
+        flushBytes.update(bytes);
+        flushMessages.update(messages);
+        flushTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
     }
 
     @Override
     public Receive createReceive() {
-        return receiveBuilder()
-                .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
-                .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
-                .match(ReplayMessages.class, this::handleReplayMessages)
-                .match(WriteMessages.class, this::handleWriteMessages)
-                .matchAny(this::handleUnknown)
-                .build();
+        return addMessages(receiveBuilder())
+            .matchAny(this::handleUnknown)
+            .build();
+    }
+
+    ReceiveBuilder addMessages(final ReceiveBuilder builder) {
+        return builder
+            .match(DeleteMessagesTo.class, this::handleDeleteMessagesTo)
+            .match(ReadHighestSequenceNr.class, this::handleReadHighestSequenceNr)
+            .match(ReplayMessages.class, this::handleReplayMessages)
+            .match(WriteMessages.class, this::handleWriteMessages);
     }
 
     @Override
     public void preStart() throws Exception {
         LOG.debug("{}: actor starting", persistenceId);
         super.preStart();
+
+        final var registry = MetricsReporter.getInstance(MeteringBehavior.DOMAIN).getMetricsRegistry();
+        final var actorName = self().path().parent().toStringWithoutAddress() + '/' + directory.getName();
+
+        batchWriteTime = registry.timer(MetricRegistry.name(actorName, "batchWriteTime"));
+        messageWriteCount = registry.meter(MetricRegistry.name(actorName, "messageWriteCount"));
+        messageSize = registry.histogram(MetricRegistry.name(actorName, "messageSize"));
+        flushBytes = registry.histogram(MetricRegistry.name(actorName, "flushBytes"));
+        flushMessages = registry.histogram(MetricRegistry.name(actorName, "flushMessages"));
+        flushTime = registry.timer(MetricRegistry.name(actorName, "flushTime"));
     }
 
     @Override
@@ -223,20 +399,22 @@ final class SegmentedJournalActor extends AbstractActor {
         ensureOpen();
 
         LOG.debug("{}: delete messages {}", persistenceId, message);
-        final long to = Long.min(dataJournal.writer().getLastIndex(), message.toSequenceNr);
+        flushWrites();
+
+        final long to = Long.min(dataJournal.lastWrittenSequenceNr(), message.toSequenceNr);
         LOG.debug("{}: adjusted delete to {}", persistenceId, to);
 
         if (lastDelete < to) {
             LOG.debug("{}: deleting entries up to {}", persistenceId, to);
 
             lastDelete = to;
-            final SegmentedJournalWriter<Long> deleteWriter = deleteJournal.writer();
-            final Indexed<Long> entry = deleteWriter.append(lastDelete);
+            final var deleteWriter = deleteJournal.writer();
+            final var entry = deleteWriter.append(lastDelete);
             deleteWriter.commit(entry.index());
-            dataJournal.writer().commit(lastDelete);
+            dataJournal.deleteTo(lastDelete);
 
             LOG.debug("{}: compaction started", persistenceId);
-            dataJournal.compact(lastDelete + 1);
+            dataJournal.compactTo(lastDelete);
             deleteJournal.compact(entry.index());
             LOG.debug("{}: compaction finished", persistenceId);
         } else {
@@ -246,13 +424,13 @@ final class SegmentedJournalActor extends AbstractActor {
         message.promise.success(null);
     }
 
-    @SuppressWarnings("checkstyle:illegalCatch")
     private void handleReadHighestSequenceNr(final ReadHighestSequenceNr message) {
         LOG.debug("{}: looking for highest sequence on {}", persistenceId, message);
         final Long sequence;
         if (directory.isDirectory()) {
             ensureOpen();
-            sequence = dataJournal.writer().getLastIndex();
+            flushWrites();
+            sequence = dataJournal.lastWrittenSequenceNr();
         } else {
             sequence = 0L;
         }
@@ -261,116 +439,63 @@ final class SegmentedJournalActor extends AbstractActor {
         message.promise.success(sequence);
     }
 
-    @SuppressWarnings("checkstyle:illegalCatch")
     private void handleReplayMessages(final ReplayMessages message) {
         LOG.debug("{}: replaying messages {}", persistenceId, message);
         ensureOpen();
+        flushWrites();
 
         final long from = Long.max(lastDelete + 1, message.fromSequenceNr);
         LOG.debug("{}: adjusted fromSequenceNr to {}", persistenceId, from);
 
-        try (SegmentedJournalReader<DataJournalEntry> reader = dataJournal.openReader(from)) {
-            int count = 0;
-            while (reader.hasNext() && count < message.max) {
-                final Indexed<DataJournalEntry> next = reader.next();
-                if (next.index() > message.toSequenceNr) {
-                    break;
-                }
-
-                LOG.trace("{}: replay {}", persistenceId, next);
-                updateLargestSize(next.size());
-                final DataJournalEntry entry = next.entry();
-                verify(entry instanceof FromPersistence, "Unexpected entry %s", entry);
-
-                final PersistentRepr repr = ((FromPersistence) entry).toRepr(persistenceId, next.index());
-                LOG.debug("{}: replaying {}", persistenceId, repr);
-                message.replayCallback.accept(repr);
-                count++;
-            }
-            LOG.debug("{}: successfully replayed {} entries", persistenceId, count);
-        } catch (Exception e) {
-            LOG.warn("{}: failed to replay messages for {}", persistenceId, message, e);
-            message.promise.failure(e);
-        } finally {
-            message.promise.success(null);
-        }
+        dataJournal.handleReplayMessages(message, from);
     }
 
-    @SuppressWarnings("checkstyle:illegalCatch")
     private void handleWriteMessages(final WriteMessages message) {
         ensureOpen();
 
-        final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
-        final long startTicks = System.nanoTime();
-        final int count = message.requests.size();
-        final long start = writer.getLastIndex();
-
-        for (int i = 0; i < count; ++i) {
-            final long mark = writer.getLastIndex();
-            try {
-                writeRequest(writer, message.requests.get(i));
-            } catch (Exception e) {
-                LOG.warn("{}: failed to write out request", persistenceId, e);
-                message.results.get(i).success(Optional.of(e));
-                writer.truncate(mark);
-                continue;
-            }
+        final var sw = Stopwatch.createStarted();
+        final long start = dataJournal.lastWrittenSequenceNr();
+        final var writtenMessages = dataJournal.handleWriteMessages(message);
+        sw.stop();
 
-            message.results.get(i).success(Optional.empty());
-        }
-        writer.flush();
-        batchWriteTime.update(System.nanoTime() - startTicks, TimeUnit.NANOSECONDS);
-        messageWriteCount.mark(writer.getLastIndex() - start);
-    }
+        batchWriteTime.update(sw.elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
+        messageWriteCount.mark(dataJournal.lastWrittenSequenceNr() - start);
 
-    private void writeRequest(final SegmentedJournalWriter<DataJournalEntry> writer, final AtomicWrite request) {
-        // Cast is needed for Eclipse because of https://bugs.eclipse.org/bugs/show_bug.cgi?id=468276
-        final Iterator<PersistentRepr> it = ((SeqLike<PersistentRepr, ?>) request.payload()).iterator();
-        while (it.hasNext()) {
-            final PersistentRepr repr = it.next();
-            final Object payload = repr.payload();
-            if (!(payload instanceof Serializable)) {
-                throw new UnsupportedOperationException("Non-serializable payload encountered " + payload.getClass());
-            }
-
-            final int size = writer.append(new ToPersistence(repr)).size();
-            messageSize.update(size);
-            updateLargestSize(size);
-        }
+        // log message after statistics are updated
+        LOG.debug("{}: write of {} bytes completed in {}", persistenceId, writtenMessages.writtenBytes, sw);
+        onWrittenMessages(writtenMessages);
     }
 
+    /**
+     * Handle a check of written messages.
+     *
+     * @param message Messages which were written
+     */
+    abstract void onWrittenMessages(WrittenMessages message);
+
     private void handleUnknown(final Object message) {
         LOG.error("{}: Received unknown message {}", persistenceId, message);
     }
 
-    private void updateLargestSize(final int size) {
-        if (size > largestObservedSize) {
-            largestObservedSize = size;
-        }
-    }
-
     private void ensureOpen() {
         if (dataJournal != null) {
             verifyNotNull(deleteJournal);
             return;
         }
 
+        final var sw = Stopwatch.createStarted();
         deleteJournal = SegmentedJournal.<Long>builder().withDirectory(directory).withName("delete")
                 .withNamespace(DELETE_NAMESPACE).withMaxSegmentSize(DELETE_SEGMENT_SIZE).build();
-        final Indexed<Long> lastEntry = deleteJournal.writer().getLastEntry();
+        final var lastEntry = deleteJournal.writer().getLastEntry();
         lastDelete = lastEntry == null ? 0 : lastEntry.entry();
 
-        dataJournal = SegmentedJournal.<DataJournalEntry>builder()
-                .withStorageLevel(storage).withDirectory(directory).withName("data")
-                .withNamespace(Namespace.builder()
-                    .register(new DataJournalEntrySerializer(context().system()),
-                        FromPersistence.class, ToPersistence.class)
-                    .build())
-                .withMaxEntrySize(maxEntrySize).withMaxSegmentSize(maxSegmentSize)
-                .build();
-        final SegmentedJournalWriter<DataJournalEntry> writer = dataJournal.writer();
-        writer.commit(lastDelete);
-        LOG.debug("{}: journal open with last index {}, deleted to {}", persistenceId, writer.getLastIndex(),
-            lastDelete);
+        dataJournal = new DataJournalV0(persistenceId, messageSize, context().system(), storage, directory,
+            maxEntrySize, maxSegmentSize);
+        dataJournal.deleteTo(lastDelete);
+        LOG.debug("{}: journal open in {} with last index {}, deleted to {}", persistenceId, sw,
+            dataJournal.lastWrittenSequenceNr(), lastDelete);
     }
+
+    abstract void flushWrites();
+
 }
diff --git a/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java b/opendaylight/md-sal/sal-akka-segmented-journal/src/test/java/org/opendaylight/controller/akka/segjournal/PerformanceTest.java
new file mode 100644 (file)
index 0000000..636a5e1
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.akka.segjournal;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import akka.persistence.AtomicWrite;
+import akka.persistence.PersistentRepr;
+import akka.testkit.CallingThreadDispatcher;
+import akka.testkit.javadsl.TestKit;
+import com.codahale.metrics.Histogram;
+import com.codahale.metrics.UniformReservoir;
+import com.google.common.base.Stopwatch;
+import com.google.common.base.Ticker;
+import io.atomix.storage.journal.StorageLevel;
+import java.io.File;
+import java.io.Serializable;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.commons.io.FileUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
+import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+class PerformanceTest {
+    private static final class Payload implements Serializable {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        final byte[] bytes;
+
+        Payload(final int size, final ThreadLocalRandom random) {
+            bytes = new byte[size];
+            random.nextBytes(bytes);
+        }
+    }
+
+    private static final class Request {
+        final WriteMessages write = new WriteMessages();
+        final Future<Optional<Exception>> future;
+
+        Request(final AtomicWrite atomicWrite) {
+            future = write.add(atomicWrite);
+        }
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(PerformanceTest.class);
+    private static final File DIRECTORY = new File("target/sfj-perf");
+
+    private static ActorSystem SYSTEM;
+
+    private TestKit kit;
+    private ActorRef actor;
+
+    @BeforeAll
+    static void beforeClass() {
+        SYSTEM = ActorSystem.create("test");
+    }
+
+    @AfterAll
+    static void afterClass() {
+        TestKit.shutdownActorSystem(SYSTEM);
+        SYSTEM = null;
+    }
+
+    @BeforeEach
+    void before() {
+        kit = new TestKit(SYSTEM);
+        FileUtils.deleteQuietly(DIRECTORY);
+    }
+
+    @AfterEach
+    void after() {
+        if (actor != null) {
+            actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        }
+        FileUtils.deleteQuietly(DIRECTORY);
+    }
+
+    @Disabled("Disable due to being an extensive time hog")
+    @ParameterizedTest
+    @MethodSource
+    void writeRequests(final StorageLevel storage, final int maxEntrySize, final int maxSegmentSize,
+            final int payloadSize, final int requestCount) {
+        LOG.info("Test {} entrySize={} segmentSize={} payload={} count={}", storage, maxEntrySize, maxSegmentSize,
+            payloadSize, requestCount);
+
+        actor = kit.childActorOf(
+            SegmentedJournalActor.props("perf", DIRECTORY, storage, maxEntrySize, maxSegmentSize, maxEntrySize)
+            .withDispatcher(CallingThreadDispatcher.Id()));
+
+        final var random = ThreadLocalRandom.current();
+        final var sw = Stopwatch.createStarted();
+        final var payloads = new Payload[1_000];
+        for (int i = 0; i < payloads.length; ++i) {
+            payloads[i] = new Payload(payloadSize, random);
+        }
+        LOG.info("{} payloads created in {}", payloads.length, sw.stop());
+
+        sw.reset().start();
+        final var requests = new Request[requestCount];
+        for (int i = 0; i < requests.length; ++i) {
+            requests[i] = new Request(AtomicWrite.apply(PersistentRepr.apply(payloads[random.nextInt(payloads.length)],
+                i, "foo", null, false, kit.getRef(), "uuid")));
+        }
+        LOG.info("{} requests created in {}", requests.length, sw.stop());
+
+        final var histogram = new Histogram(new UniformReservoir(requests.length));
+        sw.reset().start();
+        long started = System.nanoTime();
+        for (var req : requests) {
+            actor.tell(req.write, ActorRef.noSender());
+            assertTrue(req.future.isCompleted());
+            assertTrue(req.future.value().get().get().isEmpty());
+
+            final long now = System.nanoTime();
+            histogram.update(now - started);
+            started = now;
+        }
+        sw.stop();
+        final var snap = histogram.getSnapshot();
+
+        LOG.info("{} requests completed in {}", requests.length, sw);
+        LOG.info("Minimum: {}", formatNanos(snap.getMin()));
+        LOG.info("Maximum: {}", formatNanos(snap.getMax()));
+        LOG.info("Mean:    {}", formatNanos(snap.getMean()));
+        LOG.info("StdDev:  {}", formatNanos(snap.getStdDev()));
+        LOG.info("Median:  {}", formatNanos(snap.getMedian()));
+        LOG.info("75th:    {}", formatNanos(snap.get75thPercentile()));
+        LOG.info("95th:    {}", formatNanos(snap.get95thPercentile()));
+        LOG.info("98th:    {}", formatNanos(snap.get98thPercentile()));
+        LOG.info("99th:    {}", formatNanos(snap.get99thPercentile()));
+        LOG.info("99.9th:  {}", formatNanos(snap.get999thPercentile()));
+    }
+
+    private static List<Arguments> writeRequests() {
+        return List.of(
+            // DISK:
+            // 100K requests, 10K each, 16M max, 128M segment
+            Arguments.of(StorageLevel.DISK, 16 * 1024 * 1024, 128 * 1024 * 1024,    10_000,  100_000),
+            // 100K requests, 10K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024,    10_000,  100_000),
+            // 10K requests, 100K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024,   100_000,   10_000),
+            // 1K requests, 1M each, 1M max, 16M segment
+            Arguments.of(StorageLevel.DISK,      1024 * 1024,  16 * 1024 * 1024, 1_000_000,    1_000),
+
+            // MAPPED:
+            // 100K requests, 10K each, 16M max, 128M segment
+            Arguments.of(StorageLevel.MAPPED, 16 * 1024 * 1024, 128 * 1024 * 1024,    10_000,  100_000),
+            // 100K requests, 10K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024,    10_000,  100_000),
+            // 10K requests, 100K each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024,   100_000,   10_000),
+            // 1K requests, 1M each, 1M max, 16M segment
+            Arguments.of(StorageLevel.MAPPED,      1024 * 1024,  16 * 1024 * 1024, 1_000_000,    1_000));
+
+    }
+
+    private static String formatNanos(final double nanos) {
+        return formatNanos(Math.round(nanos));
+    }
+
+    private static String formatNanos(final long nanos) {
+        return Stopwatch.createStarted(new Ticker() {
+            boolean started;
+
+            @Override
+            public long read() {
+                if (started) {
+                    return nanos;
+                }
+                started = true;
+                return 0;
+            }
+        }).toString();
+    }
+}
index da1ba454e12c3bccbc8ad99bcd42613eadedd3d2..d488dc6cf2e3a354834cfdb604073bd1ea443f24 100644 (file)
@@ -12,7 +12,7 @@ import com.typesafe.config.ConfigFactory;
 import java.io.File;
 import org.apache.commons.io.FileUtils;
 import org.junit.runner.RunWith;
-import org.scalatest.junit.JUnitRunner;
+import org.scalatestplus.junit.JUnitRunner;
 
 @RunWith(JUnitRunner.class)
 public class SegmentedFileJournalSpecTest extends JavaJournalSpec {
@@ -29,4 +29,10 @@ public class SegmentedFileJournalSpecTest extends JavaJournalSpec {
         FileUtils.deleteQuietly(JOURNAL_DIR);
         super.beforeAll();
     }
+
+    @Override
+    public void afterAll() {
+        super.afterAll();
+        FileUtils.deleteQuietly(JOURNAL_DIR);
+    }
 }
index 7db0d4b87e0da205f4a227385d01ec46e4cae6a6..4d3db7980e2116ef013f62b13ed120ddb43c33f9 100644 (file)
@@ -7,13 +7,12 @@
  */
 package org.opendaylight.controller.akka.segjournal;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -24,7 +23,7 @@ import akka.persistence.AtomicWrite;
 import akka.persistence.PersistentRepr;
 import akka.testkit.CallingThreadDispatcher;
 import akka.testkit.javadsl.TestKit;
-import io.atomix.storage.StorageLevel;
+import io.atomix.storage.journal.StorageLevel;
 import java.io.File;
 import java.io.IOException;
 import java.io.Serializable;
@@ -36,50 +35,59 @@ import java.util.Optional;
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 import org.apache.commons.io.FileUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.AsyncMessage;
 import org.opendaylight.controller.akka.segjournal.SegmentedJournalActor.WriteMessages;
 import scala.concurrent.Future;
 
-public class SegmentedFileJournalTest {
+@ExtendWith(MockitoExtension.class)
+class SegmentedFileJournalTest {
     private static final File DIRECTORY = new File("target/sfj-test");
     private static final int SEGMENT_SIZE = 1024 * 1024;
     private static final int MESSAGE_SIZE = 512 * 1024;
+    private static final int FLUSH_SIZE = 16 * 1024;
 
     private static ActorSystem SYSTEM;
 
+    @Mock
+    private Consumer<PersistentRepr> firstCallback;
+
     private TestKit kit;
     private ActorRef actor;
 
-    @BeforeClass
-    public static void beforeClass() {
+    @BeforeAll
+    static void beforeClass() {
         SYSTEM = ActorSystem.create("test");
     }
 
-    @AfterClass
-    public static void afterClass() {
+    @AfterAll
+    static void afterClass() {
         TestKit.shutdownActorSystem(SYSTEM);
         SYSTEM = null;
     }
 
-    @Before
-    public void before() {
+    @BeforeEach
+    void before() {
         kit = new TestKit(SYSTEM);
         FileUtils.deleteQuietly(DIRECTORY);
         actor = actor();
     }
 
-    @After
-    public void after() {
+    @AfterEach
+    void after() {
         actor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        FileUtils.deleteQuietly(DIRECTORY);
     }
 
     @Test
-    public void testDeleteAfterStop() {
+    void testDeleteAfterStop() {
         // Preliminary setup
         final WriteMessages write = new WriteMessages();
         final Future<Optional<Exception>> first = write.add(AtomicWrite.apply(PersistentRepr.apply("first", 1, "foo",
@@ -108,7 +116,7 @@ public class SegmentedFileJournalTest {
     }
 
     @Test
-    public void testSegmentation() throws IOException {
+    void testSegmentation() throws IOException {
         // We want to have roughly three segments
         final LargePayload payload = new LargePayload();
 
@@ -133,7 +141,7 @@ public class SegmentedFileJournalTest {
     }
 
     @Test
-    public void testComplexDeletesAndPartialReplays() throws Exception {
+    void testComplexDeletesAndPartialReplays() throws Exception {
         for (int i = 0; i <= 4; i++) {
             writeBigPaylod();
         }
@@ -203,7 +211,7 @@ public class SegmentedFileJournalTest {
 
     private ActorRef actor() {
         return kit.childActorOf(SegmentedJournalActor.props("foo", DIRECTORY, StorageLevel.DISK, MESSAGE_SIZE,
-            SEGMENT_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
+            SEGMENT_SIZE, FLUSH_SIZE).withDispatcher(CallingThreadDispatcher.Id()));
     }
 
     private void deleteEntries(final long deleteTo) {
@@ -219,8 +227,8 @@ public class SegmentedFileJournalTest {
     }
 
     private void assertReplayCount(final int expected) {
-        Consumer<PersistentRepr> firstCallback = mock(Consumer.class);
-        doNothing().when(firstCallback).accept(any(PersistentRepr.class));
+        // Cast fixes an Eclipse warning 'generic array created'
+        reset((Object) firstCallback);
         AsyncMessage<Void> replay = SegmentedJournalActor.replayMessages(0, Long.MAX_VALUE, Long.MAX_VALUE,
             firstCallback);
         actor.tell(replay, ActorRef.noSender());
@@ -243,10 +251,10 @@ public class SegmentedFileJournalTest {
         return future.value().get().get();
     }
 
-    private static final class LargePayload implements Serializable {
+    static final class LargePayload implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final byte[] bytes = new byte[MESSAGE_SIZE / 2];
-
     }
 }
diff --git a/opendaylight/md-sal/sal-binding-api/.gitignore b/opendaylight/md-sal/sal-binding-api/.gitignore
deleted file mode 100644 (file)
index ea8c4bf..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/target
diff --git a/opendaylight/md-sal/sal-binding-api/pom.xml b/opendaylight/md-sal/sal-binding-api/pom.xml
deleted file mode 100644 (file)
index 4a41829..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-binding-api</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>concepts</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-      <scope>provided</scope>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionProviderService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionProviderService.java
deleted file mode 100644 (file)
index 9dd05a8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Bridge to action registration.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.ActionProviderService} instead
- */
-@Deprecated
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface ActionProviderService
-        extends BindingService, org.opendaylight.mdsal.binding.api.ActionProviderService {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ActionService.java
deleted file mode 100644 (file)
index a505de4..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Bridge to action invocation.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.ActionService} instead
- */
-@Deprecated
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface ActionService extends BindingService, org.opendaylight.mdsal.binding.api.ActionService {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingService.java
deleted file mode 100644 (file)
index f8a7bff..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareService;
-
-/**
- * Marker interface for MD-SAL services which are available for users of MD-SAL.
- *
- * <p>
- * BindingService is marker interface for infrastructure services provided by
- * the SAL. These services may be session-specific, and wrapped by custom
- * delegator patterns in order to introduce additional semantics / checks
- * to the system.
- *
- * <p>
- * This interface extends {@link BindingAwareService}, order to be make
- * new services available via
- * {@link org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext}
- * and via
- * {@link org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext}
- *
- */
-public interface BindingService extends BindingAwareService {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingTransactionChain.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/BindingTransactionChain.java
deleted file mode 100644 (file)
index b28fbe4..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A chain of transactions.
- *
- * <p>
- * For more information about transaction chaining and transaction chains
- * see {@link TransactionChain}.
- *
- * @see TransactionChain
- *
- */
-public interface BindingTransactionChain extends TransactionFactory,
-        TransactionChain<InstanceIdentifier<?>, DataObject> {
-    @Override
-    ReadOnlyTransaction newReadOnlyTransaction();
-
-    @Override
-    ReadWriteTransaction newReadWriteTransaction();
-
-    @Override
-    WriteTransaction newWriteOnlyTransaction();
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ClusteredDataTreeChangeListener.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ClusteredDataTreeChangeListener.java
deleted file mode 100644 (file)
index aa4d8d3..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * ClusteredDataTreeChangeListener is a marker interface to enable data tree change notifications on all
- * instances in a cluster where this listener is registered.
- *
- * <p>
- * Applications should implement ClusteredDataTreeChangeListener instead of {@link DataTreeChangeListener},
- * if they want to listen for data tree change notifications on any node of a clustered data store.
- * {@link DataTreeChangeListener} enables notifications only at the leader of the data store.
- *
- * @author Thomas Pantelis
- *
- * @param <T> the DataObject type
- */
-public interface ClusteredDataTreeChangeListener<T extends DataObject> extends DataTreeChangeListener<T> {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataBroker.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataBroker.java
deleted file mode 100644 (file)
index bc5dd21..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainFactory;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Provides access to a conceptual data tree store and also provides the ability to
- * subscribe for changes to data under a given branch of the tree.
- *
- * <p>
- * For more information on usage, please see the documentation in {@link AsyncDataBroker}.
- *
- * @see AsyncDataBroker
- * @see TransactionChainFactory
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.DataBroker} instead
- */
-@Deprecated
-public interface DataBroker extends  AsyncDataBroker<InstanceIdentifier<?>, DataObject>,
-        TransactionChainFactory<InstanceIdentifier<?>, DataObject>, TransactionFactory, BindingService,
-        DataTreeChangeService {
-    @Override
-    ReadOnlyTransaction newReadOnlyTransaction();
-
-    @Override
-    ReadWriteTransaction newReadWriteTransaction();
-
-    @Override
-    WriteTransaction newWriteOnlyTransaction();
-
-    @Override
-    BindingTransactionChain createTransactionChain(TransactionChainListener listener);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataObjectModification.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataObjectModification.java
deleted file mode 100644 (file)
index 5448f79..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.collect.Collections2;
-import java.util.Collection;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.binding.Augmentation;
-import org.opendaylight.yangtools.yang.binding.ChildOf;
-import org.opendaylight.yangtools.yang.binding.ChoiceIn;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.Identifier;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.IdentifiableItem;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.Item;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
-
-/**
- * Represents a modification of DataObject.
- */
-public interface DataObjectModification<T extends DataObject>
-        extends org.opendaylight.yangtools.concepts.Identifiable<PathArgument> {
-
-    enum ModificationType {
-        /**
-         * Child node (direct or indirect) was modified.
-         *
-         */
-        SUBTREE_MODIFIED,
-
-        /**
-         * Node was explicitly created / overwritten.
-         *
-         */
-
-        WRITE,
-        /**
-         * Node was deleted.
-         *
-         */
-        DELETE
-    }
-
-    @Override
-    PathArgument getIdentifier();
-
-    /**
-     * Returns type of modified object.
-     *
-     * @return type of modified object.
-     */
-    @NonNull Class<T> getDataType();
-
-    /**
-     * Returns type of modification.
-     *
-     * @return type Type of performed modification.
-     */
-    @NonNull ModificationType getModificationType();
-
-    /**
-     * Returns before-state of top level container. Implementations are encouraged,
-     * but not required to provide this state.
-     *
-     * @return State of object before modification. Null if subtree was not present,
-     *         or the implementation cannot provide the state.
-     */
-    @Nullable T getDataBefore();
-
-    /**
-     * Returns after-state of top level container.
-     *
-     * @return State of object after modification. Null if subtree is not present.
-     */
-    @Nullable T getDataAfter();
-
-    /**
-     * Returns unmodifiable collection of modified direct children.
-     *
-     * @return unmodifiable collection of modified direct children.
-     */
-    @NonNull Collection<? extends DataObjectModification<? extends DataObject>> getModifiedChildren();
-
-    /**
-     * Returns child list item modification if {@code child} was modified by this modification. This method should be
-     * used if the child is defined in a grouping brought into a case inside this object.
-     *
-     * @param caseType Case type class
-     * @param childType Type of list item - must be list item with key
-     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code childType} class is not valid child according
-     *         to generated model.
-     */
-    default <H extends ChoiceIn<? super T> & DataObject, C extends ChildOf<? super H>>
-        Collection<DataObjectModification<C>> getModifiedChildren(final @NonNull Class<H> caseType,
-                final @NonNull Class<C> childType) {
-        final Item<C> item = Item.of(caseType, childType);
-        return (Collection<DataObjectModification<C>>) Collections2.filter(getModifiedChildren(),
-            mod -> item.equals(mod.getIdentifier()));
-    }
-
-    /**
-     * Returns container child modification if {@code child} was modified by this modification. This method should be
-     * used if the child is defined in a grouping brought into a case inside this object.
-     *
-     * <p>
-     * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
-     *
-     * @param caseType Case type class
-     * @param child Type of child - must be only container
-     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code child} class is not valid child according
-     *         to generated model.
-     */
-    default @Nullable <H extends ChoiceIn<? super T> & DataObject, C extends ChildOf<? super H>>
-            DataObjectModification<C> getModifiedChildContainer(final @NonNull Class<H> caseType,
-                    final @NonNull Class<C> child) {
-        return (DataObjectModification<C>) getModifiedChild(Item.of(caseType, child));
-    }
-
-    /**
-     * Returns container child modification if {@code child} was modified by this modification.
-     *
-     * <p>
-     * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
-     *
-     * @param child Type of child - must be only container
-     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code child} class is not valid child according
-     *         to generated model.
-     */
-    @Nullable <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(
-            @NonNull Class<C> child);
-
-    /**
-     * Returns augmentation child modification if {@code augmentation} was modified by this modification.
-     *
-     * <p>
-     * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
-     *
-     * @param augmentation Type of augmentation - must be only container
-     * @return Modification of {@code augmentation} if {@code augmentation} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code augmentation} class is not valid augmentation
-     *         according to generated model.
-     */
-    @Nullable <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(
-            @NonNull Class<C> augmentation);
-
-    /**
-     * Returns child list item modification if {@code child} was modified by this modification.
-     *
-     * @param listItem Type of list item - must be list item with key
-     * @param listKey List item key
-     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code listItem} class is not valid child according
-     *         to generated model.
-     */
-    <N extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<N>> DataObjectModification<N>
-            getModifiedChildListItem(@NonNull Class<N> listItem, @NonNull K listKey);
-
-    /**
-     * Returns child list item modification if {@code child} was modified by this modification.
-     *
-     * @param caseType Case type class
-     * @param listItem Type of list item - must be list item with key
-     * @param listKey List item key
-     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied {@code listItem} class is not valid child according
-     *         to generated model.
-     */
-    default <H extends ChoiceIn<? super T> & DataObject, C extends Identifiable<K> & ChildOf<? super H>,
-            K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
-                    final @NonNull Class<H> caseType, final @NonNull Class<C> listItem, final @NonNull K listKey) {
-        return (DataObjectModification<C>) getModifiedChild(IdentifiableItem.of(caseType, listItem, listKey));
-    }
-
-    /**
-     * Returns a child modification if a node identified by {@code childArgument} was modified by
-     * this modification.
-     *
-     * @param childArgument Path Argument of child node
-     * @return Modification of child identified by {@code childArgument} if {@code childArgument}
-     *         was modified, null otherwise.
-     * @throws IllegalArgumentException If supplied path argument is not valid child according to
-     *         generated model.
-     *
-     */
-    @Nullable DataObjectModification<? extends DataObject> getModifiedChild(PathArgument childArgument);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeListener.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeListener.java
deleted file mode 100644 (file)
index 222242e..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import java.util.Collection;
-import java.util.EventListener;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * Interface implemented by classes interested in receiving notifications about
- * data tree changes. This interface provides a cursor-based view of the change, which has potentially
- * lower overhead and allow more flexible consumption of change event.
- *
- * <p>
- * Note: this interface enables notifications only at the leader of the data store, if clustered. If you want
- * notifications on all instances in a cluster, use the {@link ClusteredDataTreeChangeListener}.
- */
-public interface DataTreeChangeListener<T extends DataObject> extends EventListener {
-    /**
-     * Invoked when there was data change for the supplied path, which was used
-     * to register this listener.
-     *
-     * <p>
-     * This method may be also invoked during registration of the listener if
-     * there is any pre-existing data in the conceptual data tree for supplied
-     * path. This initial event will contain all pre-existing data as created.
-     *
-     * <p>
-     * A data change event may be triggered spuriously, e.g. such that data before
-     * and after compare as equal. Implementations of this interface are expected
-     * to recover from such events. Event producers are expected to exert reasonable
-     * effort to suppress such events.
-     *
-     * <p>
-     * In other words, it is completely acceptable to observe
-     * a {@link DataObjectModification}, while the state observed before and
-     * after- data items compare as equal.
-     *
-     * @param changes Collection of change events, may not be null or empty.
-     */
-    void onDataTreeChanged(@NonNull Collection<DataTreeModification<T>> changes);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeService.java
deleted file mode 100644 (file)
index db44e50..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * A {@link BindingService} which allows users to register for changes to a
- * subtree.
- */
-public interface DataTreeChangeService extends BindingService {
-    /**
-     * Registers a {@link DataTreeChangeListener} to receive
-     * notifications when data changes under a given path in the conceptual data
-     * tree.
-     *
-     * <p>
-     * You are able to register for notifications  for any node or subtree
-     * which can be represented using {@link DataTreeIdentifier}.
-     *
-     * <p>
-     * You are able to register for data change notifications for a subtree or leaf
-     * even if it does not exist. You will receive notification once that node is
-     * created.
-     *
-     * <p>
-     * If there is any pre-existing data in the data tree for the path for which you are
-     * registering, you will receive an initial data change event, which will
-     * contain all pre-existing data, marked as created.
-     *
-     * <p>
-     * This method returns a {@link ListenerRegistration} object. To
-     * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
-     * method on the returned object.
-     *
-     * <p>
-     * You MUST explicitly unregister your listener when you no longer want to receive
-     * notifications. This is especially true in OSGi environments, where failure to
-     * do so during bundle shutdown can lead to stale listeners being still registered.
-     *
-     * @param treeId
-     *            Data tree identifier of the subtree which should be watched for
-     *            changes.
-     * @param listener
-     *            Listener instance which is being registered
-     * @return Listener registration object, which may be used to unregister
-     *         your listener using {@link ListenerRegistration#close()} to stop
-     *         delivery of change events.
-     */
-    <T extends DataObject, L extends DataTreeChangeListener<T>> @NonNull ListenerRegistration<L>
-            registerDataTreeChangeListener(@NonNull DataTreeIdentifier<T> treeId, @NonNull L listener);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeIdentifier.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeIdentifier.java
deleted file mode 100644 (file)
index 94e1f8b..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import static java.util.Objects.requireNonNull;
-
-import java.io.Serializable;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A unique identifier for a particular subtree. It is composed of the logical
- * data store type and the instance identifier of the root node.
- */
-public final class DataTreeIdentifier<T extends DataObject> implements Immutable,
-        Path<DataTreeIdentifier<?>>, Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private final @NonNull InstanceIdentifier<T> rootIdentifier;
-    private final @NonNull LogicalDatastoreType datastoreType;
-
-    public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> rootIdentifier) {
-        this.datastoreType = requireNonNull(datastoreType);
-        this.rootIdentifier = requireNonNull(rootIdentifier);
-    }
-
-    /**
-     * Return the logical data store type.
-     *
-     * @return Logical data store type. Guaranteed to be non-null.
-     */
-    public @NonNull LogicalDatastoreType getDatastoreType() {
-        return datastoreType;
-    }
-
-    /**
-     * Return the {@link InstanceIdentifier} of the root node.
-     *
-     * @return Instance identifier corresponding to the root node.
-     */
-    public @NonNull InstanceIdentifier<T> getRootIdentifier() {
-        return rootIdentifier;
-    }
-
-    @Override
-    public boolean contains(final DataTreeIdentifier<?> other) {
-        return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + datastoreType.hashCode();
-        result = prime * result + rootIdentifier.hashCode();
-        return result;
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof DataTreeIdentifier)) {
-            return false;
-        }
-        final DataTreeIdentifier<?> other = (DataTreeIdentifier<?>) obj;
-        if (datastoreType != other.datastoreType) {
-            return false;
-        }
-        return rootIdentifier.equals(other.rootIdentifier);
-    }
-
-    @Override
-    public String toString() {
-        return getClass().getSimpleName() + "{datastoreType = " + datastoreType + ", rootIdentifier = "
-                + rootIdentifier + "}";
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeModification.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeModification.java
deleted file mode 100644 (file)
index bd35c63..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * Represent root of modification.
- *
- * @author Tony Tkacik &lt;ttkacik@cisco.com&gt;
- *
- */
-public interface DataTreeModification<T extends DataObject> {
-
-    /**
-     * Get the modification root path. This is the path of the root node
-     * relative to the root of InstanceIdentifier namespace.
-     *
-     * @return absolute path of the root node
-     */
-    @NonNull DataTreeIdentifier<T> getRootPath();
-
-    /**
-     * Get the modification root node.
-     *
-     * @return modification root node
-     */
-    @NonNull DataObjectModification<T> getRootNode();
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingDataBroker.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingDataBroker.java
deleted file mode 100644 (file)
index 40b8b8c..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.collect.ForwardingObject;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * Utility {@link DataBroker} implementation which forwards all interface method
- * invocation to a delegate instance.
- */
-public abstract class ForwardingDataBroker extends ForwardingObject implements DataBroker {
-
-    @Override
-    protected abstract @NonNull DataBroker delegate();
-
-    @Override
-    public ReadOnlyTransaction newReadOnlyTransaction() {
-        return delegate().newReadOnlyTransaction();
-    }
-
-    @Override
-    public ReadWriteTransaction newReadWriteTransaction() {
-        return delegate().newReadWriteTransaction();
-    }
-
-    @Override
-    public WriteTransaction newWriteOnlyTransaction() {
-        return delegate().newWriteOnlyTransaction();
-    }
-
-    @Override
-    public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L>
-            registerDataTreeChangeListener(DataTreeIdentifier<T> treeId, L listener) {
-        return delegate().registerDataTreeChangeListener(treeId, listener);
-    }
-
-    @Override
-    public BindingTransactionChain createTransactionChain(TransactionChainListener listener) {
-        return delegate().createTransactionChain(listener);
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadOnlyTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadOnlyTransaction.java
deleted file mode 100644 (file)
index a71c08a..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Utility {@link ReadOnlyTransaction} implementation which forwards all interface method
- * invocation to a delegate instance.
- */
-@SuppressWarnings("deprecation") // due to CheckedFuture
-public class ForwardingReadOnlyTransaction extends ForwardingObject implements ReadOnlyTransaction {
-
-    private final ReadOnlyTransaction delegate;
-
-    protected ForwardingReadOnlyTransaction(final ReadOnlyTransaction delegate) {
-        this.delegate = delegate;
-    }
-
-    @Override
-    protected ReadTransaction delegate() {
-        return delegate;
-    }
-
-    @Override
-    public <T extends DataObject> CheckedFuture<Optional<T>, ReadFailedException> read(final LogicalDatastoreType store,
-            final InstanceIdentifier<T> path) {
-        return delegate.read(store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        return delegate.exists(store, path);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadWriteTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingReadWriteTransaction.java
deleted file mode 100644 (file)
index 3e8dfe7..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Utility {@link ReadWriteTransaction} implementation which forwards all interface method
- * invocation to a delegate instance.
- */
-@SuppressWarnings("deprecation") // due to CheckedFuture & TransactionStatus
-public class ForwardingReadWriteTransaction extends ForwardingObject implements ReadWriteTransaction {
-
-    private final ReadWriteTransaction delegate;
-
-    protected ForwardingReadWriteTransaction(final ReadWriteTransaction delegate) {
-        this.delegate = delegate;
-    }
-
-    @Override
-    protected ReadWriteTransaction delegate() {
-        return delegate;
-    }
-
-    @Override
-    public <T extends DataObject> void put(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
-            final T data) {
-        delegate.put(store, path, data);
-    }
-
-    @Override
-    public <T extends DataObject> void put(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
-            final T data, final boolean createMissingParents) {
-        delegate.put(store, path, data, createMissingParents);
-    }
-
-    @Override
-    public <T extends DataObject> CheckedFuture<Optional<T>, ReadFailedException> read(final LogicalDatastoreType store,
-            final InstanceIdentifier<T> path) {
-        return delegate.read(store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        return delegate.exists(store, path);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate.cancel();
-    }
-
-    @Override
-    public <T extends DataObject> void merge(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
-            final T data) {
-        delegate.merge(store, path, data);
-    }
-
-    @Override
-    public <T extends DataObject> void merge(final LogicalDatastoreType store, final InstanceIdentifier<T> path,
-            final T data, final boolean createMissingParents) {
-        delegate.merge(store, path, data, createMissingParents);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate.commit();
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final InstanceIdentifier<?> path) {
-        delegate.delete(store, path);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingWriteTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ForwardingWriteTransaction.java
deleted file mode 100644 (file)
index 99dbe36..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Utility {@link WriteTransaction} implementation which forwards all interface method
- * invocation to a delegate instance.
- */
-@SuppressWarnings("deprecation") // due to CheckedFuture & TransactionStatus
-public class ForwardingWriteTransaction extends ForwardingObject implements WriteTransaction {
-
-    private final WriteTransaction delegate;
-
-    protected ForwardingWriteTransaction(WriteTransaction delegate) {
-        this.delegate = delegate;
-    }
-
-    @Override
-    protected WriteTransaction delegate() {
-        return delegate;
-    }
-
-    @Override
-    public <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data) {
-        delegate.put(store, path, data);
-    }
-
-    @Override
-    public <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
-            boolean createMissingParents) {
-        delegate.put(store, path, data, createMissingParents);
-    }
-
-    @Override
-    public <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data) {
-        delegate.merge(store, path, data);
-    }
-
-    @Override
-    public <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
-            boolean createMissingParents) {
-        delegate.merge(store, path, data, createMissingParents);
-    }
-
-    @Override
-    public void delete(LogicalDatastoreType store, InstanceIdentifier<?> path) {
-        delegate.delete(store, path);
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate.cancel();
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate.commit();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPoint.java
deleted file mode 100644 (file)
index 0c676e0..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.base.Optional;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public interface MountPoint extends Identifiable<InstanceIdentifier<?>> {
-    <T extends BindingService> Optional<T> getService(Class<T> service);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/MountPointService.java
deleted file mode 100644 (file)
index dec4b8c..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.base.Optional;
-import java.util.EventListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Deprecated.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.MountPointService} instead
- */
-@Deprecated
-public interface MountPointService extends BindingService {
-
-    Optional<MountPoint> getMountPoint(InstanceIdentifier<?> mountPoint);
-
-    <T extends MountPointListener> ListenerRegistration<T> registerListener(InstanceIdentifier<?> path, T listener);
-
-
-    interface MountPointListener extends EventListener {
-
-        void onMountPointCreated(InstanceIdentifier<?> path);
-
-        void onMountPointRemoved(InstanceIdentifier<?> path);
-
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationPublishService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationPublishService.java
deleted file mode 100644 (file)
index 7a8b09a..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * A {@link NotificationService} which also allows its users to
- * submit YANG-modeled notifications for delivery. There are three
- * methods of submission, following the patters from {@link java.util.concurrent.BlockingQueue}:
- * - {@link #putNotification(Notification)}, which may block indefinitely
- *   if the implementation cannot allocate resources to accept the notification,
- * - {@link #offerNotification(Notification)}, which does not block if face
- *   of resource starvation,
- * - {@link #offerNotification(Notification, int, TimeUnit)}, which may block
- *   for specified time if resources are thin.
- *
- * <p>
- * The actual delivery to listeners is asynchronous and implementation-specific.
- * Users of this interface should not make any assumptions as to whether the
- * notification has or has not been seen.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.NotificationPublishService} instead
- */
-@Deprecated
-public interface NotificationPublishService extends BindingService {
-
-    /**
-     * Well-known value indicating that the binding-aware implementation is currently not
-     * able to accept a notification.
-     */
-    ListenableFuture<Object> REJECTED = Futures.immediateFailedFuture(new NotificationRejectedException(
-            "Rejected due to resource constraints."));
-
-    /**
-     * Publishes a notification to subscribed listeners. This initiates
-     * the process of sending the notification, but delivery to the
-     * listeners can happen asynchronously, potentially after a call to
-     * this method returns.
-     *
-     * <b>Note:</b> This call will block when the notification queue is full.
-     *
-     * @param notification
-     *            the notification to publish.
-     * @throws InterruptedException if interrupted while waiting
-     * @throws NullPointerException if the notification is null
-     */
-    void putNotification(Notification notification) throws InterruptedException;
-
-    /**
-     * Publishes a notification to subscribed listeners. This initiates
-     * the process of sending the notification, but delivery to the
-     * listeners can happen asynchronously, potentially after a call to
-     * this method returns.
-     *
-     * <p>
-     * Still guaranteed not to block. Returns Listenable Future which will complete once.
-     *
-     * @param notification
-     *            the notification to publish.
-     * @return A listenable future which will report completion when the service has finished
-     *     propagating the notification to its immediate registrants, or {@link #REJECTED} if resource
-     *     constraints prevent
-     * @throws NullPointerException if the notification is null
-     */
-    ListenableFuture<?> offerNotification(Notification notification);
-
-    /**
-     * Publishes a notification to subscribed listeners. This initiates
-     * the process of sending the notification, but delivery to the
-     * listeners can happen asynchronously, potentially after a call to
-     * this method returns. This method is guaranteed not to block more
-     * than the specified timeout.
-     *
-     * @param notification
-     *            the notification to publish.
-     * @param timeout how long to wait before giving up, in units of unit
-     * @param unit a TimeUnit determining how to interpret the
-     *             timeout parameter
-     * @return A listenable future which will report completion when the service has finished
-     *     propagating the notification to its immediate registrants, or {@link #REJECTED} if resource
-     *     constraints prevent
-     * @throws InterruptedException if interrupted while waiting
-     * @throws NullPointerException if the notification or unit is null
-     * @throws IllegalArgumentException if timeout is negative.
-     */
-    ListenableFuture<?> offerNotification(Notification notification, int timeout, TimeUnit unit)
-            throws InterruptedException;
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationRejectedException.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationRejectedException.java
deleted file mode 100644 (file)
index 41c3a40..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-/**
- * <p>
- * This exception indicates that given notification can not be processed by corresponding mechanism.
- * More info can be provided in message.
- * </p>
- * <p>
- * Expected use: {@link NotificationPublishService}
- * </p>
- */
-public class NotificationRejectedException extends Exception {
-    private static final long serialVersionUID = 1L;
-
-    public NotificationRejectedException(final String message) {
-        super(message);
-    }
-
-    public NotificationRejectedException(final String message, final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/NotificationService.java
deleted file mode 100644 (file)
index 8492296..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-/**
- * Notification broker which allows clients to subscribe for and publish YANG-modeled notifications.
- *
- * <p>
- * Each YANG module which defines notifications results in a generated interface <code>{ModuleName}Listener</code>
- * which handles all the notifications defined in the YANG model. Each notification type translates to
- * a specific method of the form <code>on{NotificationType}</code> on the generated interface.
- * The generated interface also extends the
- * {@link org.opendaylight.yangtools.yang.binding.NotificationListener} interface and implementations
- * are registered using
- *  {@link #registerNotificationListener(org.opendaylight.yangtools.yang.binding.NotificationListener)} method.
- *
- * <h3>Dispatch Listener Example</h3>
- *
- * <p>
- * Lets assume we have following YANG model:
- *
- * <pre>
- * module example {
- *      ...
- *
- *      notification start {
- *          ...
- *      }
- *
- *      notification stop {
- *           ...
- *      }
- * }
- * </pre>
- *
- * <p>
- * The generated interface will be:
- * {@code
- *  public interface ExampleListener extends NotificationListener {
- *      void onStart(Start notification);
- *      void onStop(Stop notification);
- *  }
- * }
- * The following defines an implementation of the generated interface:
- * {@code
- *  public class MyExampleListener implements ExampleListener {
- *      public void onStart(Start notification) {
- *          // do something
- *      }
- *
- *      public void onStop(Stop notification) {
- *          // do something
- *      }
- *  }
- * }
- * The implementation is registered as follows:
- * {@code
- *  MyExampleListener listener = new MyExampleListener();
- *  ListenerRegistration<NotificationListener> reg = service.registerNotificationListener( listener );
- * }
- * The <code>onStart</code> method will be invoked when someone publishes a <code>Start</code> notification and
- * the <code>onStop</code> method will be invoked when someone publishes a <code>Stop</code> notification.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.NotificationService} instead
- */
-@Deprecated
-public interface NotificationService extends BindingService {
-    /**
-     * Registers a listener which implements a YANG-generated notification interface derived from
-     * {@link NotificationListener}. The listener is registered for all notifications present in
-     * the implemented interface.
-     *
-     * @param listener the listener implementation that will receive notifications.
-     * @return a {@link ListenerRegistration} instance that should be used to unregister the listener
-     *         by invoking the {@link ListenerRegistration#close()} method when no longer needed.
-     */
-    <T extends NotificationListener> ListenerRegistration<T> registerNotificationListener(T listener);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadOnlyTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadOnlyTransaction.java
deleted file mode 100644 (file)
index 5e76e21..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadOnlyTransaction;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A transaction that provides a stateful read-only view of the data tree.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in
- *  {@link org.opendaylight.controller.md.sal.common.api.data.AsyncReadTransaction}.
- */
-public interface ReadOnlyTransaction extends ReadTransaction, AsyncReadOnlyTransaction<InstanceIdentifier<?>,
-        DataObject> {
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadTransaction.java
deleted file mode 100644 (file)
index 01f5254..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A transaction that provides read access to a logical data store.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in {@link AsyncReadTransaction}.
- */
-public interface ReadTransaction extends AsyncReadTransaction<InstanceIdentifier<?>, DataObject> {
-
-    /**
-     * Reads data from the provided logical data store located at the provided path.
-     *<p>
-     * If the target is a subtree, then the whole subtree is read (and will be
-     * accessible from the returned data object).
-     *
-     * @param store
-     *            Logical data store from which read should occur.
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            read
-     * @return a CheckFuture containing the result of the read. The Future blocks until the
-     *         commit operation is complete. Once complete:
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns an Optional object
-     *         containing the data.</li>
-     *         <li>If the data at the supplied path does not exist, the Future returns
-     *         Optional#absent().</li>
-     *         <li>If the read of the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    <T extends DataObject> CheckedFuture<Optional<T>, ReadFailedException> read(
-            LogicalDatastoreType store, InstanceIdentifier<T> path);
-
-    /**
-     * Checks if data is available in the logical data store located at provided path.
-     *
-     * <p>
-     * Note: a successful result from this method makes no guarantee that a subsequent call to {@link #read}
-     * will succeed. It is possible that the data resides in a data store on a remote node and, if that
-     * node goes down or a network failure occurs, a subsequent read would fail. Another scenario is if
-     * the data is deleted in between the calls to <code>exists</code> and <code>read</code>
-     *
-     * <p>
-     * Default implementation delegates to {@link #read(LogicalDatastoreType, InstanceIdentifier)}, implementations
-     * are advised to provide a more efficient override.
-     *
-     * @param store
-     *            Logical data store from which read should occur.
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            check existence of
-     * @return a CheckFuture containing the result of the check.
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns a Boolean
-     *         whose value is true, false otherwise</li>
-     *         <li>If checking for the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    default CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        return MappingCheckedFuture.create(Futures.transform(read(store, path), Optional::isPresent,
-            MoreExecutors.directExecutor()), ReadFailedException.MAPPER);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadWriteTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/ReadWriteTransaction.java
deleted file mode 100644 (file)
index 99da3b6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadWriteTransaction;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A transaction that enables combined read/write capabilities.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in {@link AsyncReadWriteTransaction}.
- */
-public interface ReadWriteTransaction extends ReadTransaction, WriteTransaction,
-        AsyncReadWriteTransaction<InstanceIdentifier<?>, DataObject> {
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/TransactionFactory.java
deleted file mode 100644 (file)
index fee1b7c..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataTransactionFactory;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public interface TransactionFactory extends AsyncDataTransactionFactory<InstanceIdentifier<?>, DataObject> {
-    @Override
-    ReadOnlyTransaction newReadOnlyTransaction();
-
-    @Override
-    ReadWriteTransaction newReadWriteTransaction();
-
-    @Override
-    WriteTransaction newWriteOnlyTransaction();
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/WriteTransaction.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/WriteTransaction.java
deleted file mode 100644 (file)
index 7a11e8c..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncWriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * A transaction that provides mutation capabilities on a data tree.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
- */
-public interface WriteTransaction extends AsyncWriteTransaction<InstanceIdentifier<?>, DataObject> {
-
-    /**
-     * Stores a piece of data at the specified path. This acts as an add / replace
-     * operation, which is to say that whole subtree will be replaced by the specified data.
-     * * <p>
-     * This method does not automatically create missing parent nodes. It is equivalent to invoking
-     * {@link #put(LogicalDatastoreType, InstanceIdentifier, DataObject, boolean)}
-     * with <code>createMissingParents</code> set to false.
-     * <p>
-     * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
-     * <p>
-     * If you need to make sure that a parent object exists but you do not want modify
-     * its pre-existing state by using put, consider using {@link #merge} instead.
-     *
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be written to the specified path
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data);
-
-
-    /**
-     * Stores a piece of data at the specified path. This acts as an add /
-     * replace operation, which is to say that whole subtree will be replaced by
-     * the specified data.
-     * <p>
-     * For more information on usage and examples, please see the documentation
-     * in {@link AsyncWriteTransaction}.
-     * <p>
-     * If you need to make sure that a parent object exists but you do not want
-     * modify its pre-existing state by using put, consider using {@link #merge}
-     * instead.
-     *
-     * Note: Using <code>createMissingParents</code> with value true, may
-     * introduce garbage in data store, or recreate nodes, which were deleted by
-     * previous transaction.
-     *
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be written to the specified path
-     * @param createMissingParents
-     *            if {@link #CREATE_MISSING_PARENTS} ({@code true}), any missing
-     *            parent nodes will be automatically created using a merge
-     *            operation.
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    <T extends DataObject> void put(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
-            boolean createMissingParents);
-
-    /**
-     * Merges a piece of data with the existing data at a specified path. Any pre-existing data
-     * which is not explicitly overwritten will be preserved. This means that if you store a container,
-     * its child lists will be merged.
-     * <p>
-     * This method does not automatically create missing parent nodes. It is equivalent to invoking
-     * {@link #merge(LogicalDatastoreType, InstanceIdentifier, DataObject, boolean)}
-     * with <code>createMissingParents</code> set to false.
-     * <p>
-     * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
-     *<p>
-     * If you require an explicit replace operation, use {@link #put} instead.
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be merged to the specified path
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data);
-
-    /**
-     * Merges a piece of data with the existing data at a specified path. Any
-     * pre-existing data which is not explicitly overwritten will be preserved.
-     * This means that if you store a container, its child lists will be merged.
-     * <p>
-     * For more information on usage and examples, please see the documentation
-     * in {@link AsyncWriteTransaction}.
-     * <p>
-     * If you require an explicit replace operation, use {@link #put} instead.
-     *
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be merged to the specified path
-     * @param createMissingParents
-     *            if {@link #CREATE_MISSING_PARENTS} ({@code true}), any missing
-     *            parent nodes will be automatically created using a merge
-     *            operation.
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    <T extends DataObject> void merge(LogicalDatastoreType store, InstanceIdentifier<T> path, T data,
-            boolean createMissingParents);
-
-    @Override
-    void delete(LogicalDatastoreType store, InstanceIdentifier<?> path);
-
-    /**
-     * Flag value indicating that missing parents should be created.
-     */
-    boolean CREATE_MISSING_PARENTS = true;
-
-    /**
-     * Flag value indicating that missing parents should cause an error.
-     */
-    boolean FAIL_ON_MISSING_PARENTS = false;
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareConsumer.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareConsumer.java
deleted file mode 100644 (file)
index 031c157..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.osgi.framework.BundleContext;
-
-@Deprecated
-public abstract class AbstractBindingAwareConsumer extends AbstractBrokerAwareActivator
-        implements BindingAwareConsumer {
-    @Override
-    protected final void onBrokerAvailable(BindingAwareBroker broker, BundleContext context) {
-        broker.registerConsumer(this, context);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareProvider.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBindingAwareProvider.java
deleted file mode 100644 (file)
index 967bbbf..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.osgi.framework.BundleContext;
-
-@Deprecated
-public abstract class AbstractBindingAwareProvider extends AbstractBrokerAwareActivator
-        implements BindingAwareProvider {
-    @Override
-    protected final void onBrokerAvailable(BindingAwareBroker broker, BundleContext context) {
-        broker.registerProvider(this, context);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBrokerAwareActivator.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/AbstractBrokerAwareActivator.java
deleted file mode 100644 (file)
index 64e777d..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
-import org.osgi.framework.BundleActivator;
-import org.osgi.framework.BundleContext;
-import org.osgi.framework.ServiceReference;
-import org.osgi.util.tracker.ServiceTracker;
-import org.osgi.util.tracker.ServiceTrackerCustomizer;
-
-public abstract class AbstractBrokerAwareActivator implements BundleActivator {
-
-    private static final ExecutorService MD_ACTIVATION_POOL = Executors.newCachedThreadPool();
-    private BundleContext context;
-    private ServiceTracker<BindingAwareBroker, BindingAwareBroker> tracker;
-    private BindingAwareBroker broker;
-    private final ServiceTrackerCustomizer<BindingAwareBroker, BindingAwareBroker> customizer =
-            new ServiceTrackerCustomizer<BindingAwareBroker, BindingAwareBroker>() {
-
-        @Override
-        public BindingAwareBroker addingService(ServiceReference<BindingAwareBroker> reference) {
-            broker = context.getService(reference);
-            MD_ACTIVATION_POOL.execute(() -> onBrokerAvailable(broker, context));
-            return broker;
-        }
-
-        @Override
-        public void modifiedService(ServiceReference<BindingAwareBroker> reference, BindingAwareBroker service) {
-            removedService(reference, service);
-            addingService(reference);
-        }
-
-        @Override
-        public void removedService(ServiceReference<BindingAwareBroker> reference, BindingAwareBroker service) {
-            broker = context.getService(reference);
-            MD_ACTIVATION_POOL.execute(() -> onBrokerRemoved(broker, context));
-        }
-
-    };
-
-
-    @Override
-    public final void start(BundleContext bundleContext) {
-        this.context = bundleContext;
-        startImpl(bundleContext);
-        tracker = new ServiceTracker<>(bundleContext, BindingAwareBroker.class, customizer);
-        tracker.open();
-
-    }
-
-
-
-    @Override
-    public final  void stop(BundleContext bundleContext) {
-        if (tracker != null) {
-            tracker.close();
-        }
-        stopImpl(bundleContext);
-    }
-
-    /**
-     * Called when this bundle is started (before
-     * {@link BindingAwareProvider#onSessionInitiated(ProviderContext)} so the Framework can perform
-     * the bundle-specific activities necessary to start this bundle. This
-     * method can be used to register services or to allocate any resources that
-     * this bundle needs.
-     *
-     * <p>
-     * This method must complete and return to its caller in a timely manner.
-     *
-     * @param bundleContext
-     *            The execution context of the bundle being started.
-     * @throws RuntimeException
-     *             If this method throws an exception, this bundle is marked as
-     *             stopped and the Framework will remove this bundle's
-     *             listeners, unregister all services registered by this bundle,
-     *             and release all services used by this bundle.
-     */
-    protected void startImpl(BundleContext bundleContext) {
-        // NOOP
-    }
-
-    /**
-     * Called when this bundle is stopped so the Framework can perform the
-     * bundle-specific activities necessary to stop the bundle. In general, this
-     * method should undo the work that the {@code BundleActivator.start} method
-     * started. There should be no active threads that were started by this
-     * bundle when this bundle returns. A stopped bundle must not call any
-     * Framework objects.
-     *
-     * <p>
-     * This method must complete and return to its caller in a timely manner.
-     *
-     * @param bundleContext The execution context of the bundle being stopped.
-     * @throws RuntimeException If this method throws an exception, the bundle is still
-     *         marked as stopped, and the Framework will remove the bundle's
-     *         listeners, unregister all services registered by the bundle, and
-     *         release all services used by the bundle.
-     */
-    protected void stopImpl(BundleContext bundleContext) {
-        // NOOP
-    }
-
-    protected abstract void onBrokerAvailable(BindingAwareBroker bindingBroker, BundleContext bundleContext);
-
-    protected void onBrokerRemoved(BindingAwareBroker bindingBroker, BundleContext bundleContext) {
-        stopImpl(bundleContext);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareBroker.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareBroker.java
deleted file mode 100644 (file)
index 385754f..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.routing.RoutedRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.BundleContext;
-
-/**
- * Binding-aware core of the SAL layer responsible for wiring the SAL consumers.
- *
- * <p>
- * The responsibility of the broker is to maintain registration of SAL
- * functionality Consumers and Providers, store provider and
- * consumer specific context and functionality registration via
- * {@link ConsumerContext} and provide access to infrastructure services, which
- * removes direct dependencies between providers and consumers.
- *
- * <p>
- * The Binding-aware broker is also responsible for translation from Java
- * classes modeling the functionality and data to binding-independent form which
- * is used in SAL Core.
- *
- * <h3>Infrastructure services</h3> Some examples of infrastructure services:
- *
- * <ul>
- * <li>YANG Module service - see {@link ConsumerContext#getRpcService(Class)},
- * {@link ProviderContext}
- * <li>Notification Service - see {@link NotificationService} and
- * {@link NotificationProviderService}
- * </ul>
- *
- * <p>
- * The services are exposed via session.
- *
- * <h3>Session-based access</h3>
- *
- * <p>
- * The providers and consumers needs to register in order to use the
- * binding-independent SAL layer and to expose functionality via SAL layer.
- *
- * <p>
- * For more information about session-based access see {@link ConsumerContext}
- * and {@link ProviderContext}
- */
-public interface BindingAwareBroker {
-    @Deprecated
-    ConsumerContext registerConsumer(BindingAwareConsumer consumer, BundleContext ctx);
-
-    /**
-     * Registers the {@link BindingAwareConsumer}, which will use the SAL layer.
-     *
-     * <p>
-     * Note that consumer could register additional functionality at later point
-     * by using service and functionality specific APIs.
-     *
-     * <p>
-     * The consumer is required to use returned session for all communication
-     * with broker or one of the broker services. The session is announced to
-     * the consumer by invoking
-     * {@link BindingAwareConsumer#onSessionInitialized(ConsumerContext)}.
-     *
-     * @param consumer
-     *            Consumer to be registered.
-     * @return a session specific to consumer registration
-     * @throws IllegalArgumentException
-     *             If the consumer is <code>null</code>.
-     * @throws IllegalStateException
-     *             If the consumer is already registered.
-     */
-    @Deprecated
-    ConsumerContext registerConsumer(BindingAwareConsumer consumer);
-
-    /*
-     * @deprecated Use registerProvider(BindingAwareProvider prov) instead (BundleContext is no longer used)
-     */
-    @Deprecated
-    ProviderContext registerProvider(BindingAwareProvider provider, BundleContext ctx);
-
-    /**
-     * Registers the {@link BindingAwareProvider}, which will use the SAL layer.
-     *
-     * <p>
-     * During the registration, the broker obtains the initial functionality
-     * from consumer, using the
-     * BindingAwareProvider#getImplementations(), and register that
-     * functionality into system and concrete infrastructure services.
-     *
-     * <p>
-     * Note that provider could register additional functionality at later point
-     * by using service and functionality specific APIs.
-     *
-     * <p>
-     * The consumer is <b>required to use</b> returned session for all
-     * communication with broker or one of the broker services. The session is
-     * announced to the consumer by invoking
-     * {@link BindingAwareProvider#onSessionInitiated(ProviderContext)}.
-     *
-     *
-     * @param provider
-     *            Provider to be registered.
-     * @return a session unique to the provider registration.
-     * @throws IllegalArgumentException
-     *             If the provider is <code>null</code>.
-     * @throws IllegalStateException
-     *             If the consumer is already registered.
-     */
-    @Deprecated
-    ProviderContext registerProvider(BindingAwareProvider provider);
-
-    /**
-     * {@link BindingAwareConsumer} specific access to the SAL functionality.
-     *
-     * <p>
-     * ConsumerSession is {@link BindingAwareConsumer}-specific access to the
-     * SAL functionality and infrastructure services.
-     *
-     * <p>
-     * The session serves to store SAL context (e.g. registration of
-     * functionality) for the consumer and provides access to the SAL
-     * infrastructure services and other functionality provided by
-     * {@link BindingAwareProvider}s.
-     */
-    @Deprecated
-    interface ConsumerContext extends RpcConsumerRegistry {
-
-        /**
-         * Returns a session specific instance (implementation) of requested binding-aware infrastructure service.
-         *
-         * @param service
-         *            Broker service
-         * @return Session specific implementation of service
-         */
-        <T extends BindingAwareService> T getSALService(Class<T> service);
-    }
-
-    /**
-     * {@link BindingAwareProvider} specific access to the SAL functionality.
-     *
-     * <p>
-     * ProviderSession is {@link BindingAwareProvider}-specific access to the
-     * SAL functionality and infrastructure services, which also allows for
-     * exposing the provider's functionality to the other
-     * {@link BindingAwareConsumer}s.
-     *
-     * <p>
-     * The session serves to store SAL context (e.g. registration of
-     * functionality) for the providers and exposes access to the SAL
-     * infrastructure services, dynamic functionality registration and any other
-     * functionality provided by other {@link BindingAwareConsumer}s.
-     *
-     */
-    @Deprecated
-    interface ProviderContext extends ConsumerContext, RpcProviderRegistry {
-
-    }
-
-    /**
-     * Represents an RPC implementation registration. Users should call the
-     * {@link ObjectRegistration#close close} method when the registration is no longer needed.
-     *
-     * @param <T> the implemented RPC service interface
-     */
-    interface RpcRegistration<T extends RpcService> extends ObjectRegistration<T> {
-
-        /**
-         * Returns the implemented RPC service interface.
-         */
-        Class<T> getServiceType();
-
-        @Override
-        void close();
-    }
-
-    /**
-     * Represents a routed RPC implementation registration. Users should call the
-     * {@link RoutedRegistration#close close} method when the registration is no longer needed.
-     *
-     * @param <T> the implemented RPC service interface
-     */
-    interface RoutedRpcRegistration<T extends RpcService> extends RpcRegistration<T>,
-            RoutedRegistration<Class<? extends BaseIdentity>, InstanceIdentifier<?>, T> {
-
-        /**
-         * Register particular instance identifier to be processed by this RpcService.
-         *
-         * @deprecated in favor of RoutedRegistration#registerPath(Object, Object).
-         */
-        @Deprecated
-        void registerInstance(Class<? extends BaseIdentity> context, InstanceIdentifier<?> instance);
-
-        /**
-         * Unregister particular instance identifier to be processed by this RpcService.
-         *
-         * @deprecated in favor of RoutedRegistration#unregisterPath(Class, InstanceIdentifier).
-         */
-        @Deprecated
-        void unregisterInstance(Class<? extends BaseIdentity> context, InstanceIdentifier<?> instance);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareConsumer.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareConsumer.java
deleted file mode 100644 (file)
index 5cbdc92..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
-
-/**
- * A developer implemented component that gets registered with the Broker.
- *
- * <p>
- * Semantically, a consumer may:
- *
- * <ol>
- *   <li>Subscribe for Notifications </li>
- *   <li>Invoke RPCs</li>
- *   <li>Read from either the operational or config data tree</li>
- *   <li>Write to the config data tree</li>
- * </ol>
- * If you need to:
- * <ol>
- *   <li> Emit Notifications</li>
- *   <li> Provide the implementation of RPCs </li>
- *   <li> Write to the operational data tree </li>
- * </ol>
- *
- * <p>
- * Consider using a BindingAwareProvider
- *
- * <p>
- * Examples:
- *
- * <p>
- * To get a NotificationService:
- *
- * <p>
- * {code
- * public void onSessionInitiated(ProviderContext session) {
- *      NotificationProviderService notificationService = session.getSALService(NotificationProviderService.class);
- *      notificationService.publish(notification)
- * }
- * where notification is an instance of a modeled Notification.
- * For more information on sending notifications via the NotificationProviderService
- * see org.opendaylight.controller.sal.binding.api.NotificationProviderService
- *
- * <p>
- * A consumer can *invoke* and RPC ( ie, call foo(fooArgs)) but it cannot register an RPC
- * implementation with the MD-SAL that others can invoke(call).
- * To get an invokable RPC:
- *
- * <p>
- * {code
- * public void onSessionInitiated(ProviderContext session) {
- *    MyService rpcFlowSalService = session.getRpcService(MyService.class);
- * }
- *
- * <p>
- * Where MyService.class is a Service interface generated from a yang model with RPCs modeled in it.  The returned
- * rpcFlowSalService can be used like any other object by invoking its methods.  Note, nothing special needs to be done
- * for RoutedRPCs.  They just work.
- *
- * <p>
- * To get a DataBroker to allow access to the data tree:
- *
- * <p>
- * {code
- * public void onSessionInitiated(final ProviderContext session) {
- *      DataBroker databroker = session.getSALService(BindingDataBroker.class);
- * }
- * }
-*/
-@Deprecated
-public interface BindingAwareConsumer {
-
-    /**
-     * Callback signaling initialization of the consumer session to the SAL.
-     *
-     * <p>
-     * The consumer MUST use the session for all communication with SAL or
-     * retrieving SAL infrastructure services.
-     *
-     * <p>
-     * This method is invoked by {@link BindingAwareBroker#registerConsumer(BindingAwareConsumer)}
-     *
-     * @param session
-     *            Unique session between consumer and SAL.
-     */
-    void onSessionInitialized(ConsumerContext session);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareProvider.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareProvider.java
deleted file mode 100644 (file)
index 8b3bc62..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
-
-/**
- * A developer implemented component that gets registered with the Broker.
- *
- * <p>
- * Semantically, a provider may:
- *
- * <ol>
- *   <li> Emit Notifications</li>
- *   <li> Provide the implementation of RPCs </li>
- *   <li> Write to the operational data tree </li>
- * </ol>
- *
- * <p>
- * If a class is not doing at least one of those three, consider using
- * a BindingAwareConsumer instead:
- * see {@link org.opendaylight.controller.sal.binding.api.BindingAwareConsumer}
- *
- * <p>
- * In addition, a BindingAwareProvider can in pursuit of its goals:
- * <ol>
- *   <li>Subscribe for Notifications </li>
- *   <li>Invoke RPCs</li>
- *   <li>Read from either the operational or config data tree</li>
- *   <li>Write to the config data tree</li>
- * </ol>
- * (All of the above are things a Consumer can also do).
- *
- * <p>
- * Examples:
- *
- *<p>
- * To get a NotificationService:
- *
- * {@code
- * public void onSessionInitiated(ProviderContext session) {
- *      NotificationProviderService notificationService = session.getSALService(NotificationProviderService.class);
- * }
- * }
- * For more information on sending notifications via the NotificationProviderService
- * see {@link org.opendaylight.controller.sal.binding.api.NotificationProviderService}
- *
- * <p>
- * To register an RPC implementation:
- *
- * {@code
- * public void onSessionInitiated(ProviderContext session) {
- *    RpcRegistration<MyService> registration = session.addRpcImplementation(MyService.class, myImplementationInstance);
- * }
- * }
- *
- * <p>
- * Where MyService.class is a Service interface generated from a yang model with RPCs modeled in it and
- * myImplementationInstance is an instance of a class that implements MyService.
- *
- * <p>
- * To register a Routed RPC Implementation:
- * {@code
- * public void onSessionInitiated(ProviderContext session) {
- *   RoutedRpcRegistration<SalFlowService> flowRegistration = session.addRoutedRpcImplementation(SalFlowService.class,
- *       salFlowServiceImplementationInstance);
-     flowRegistration.registerPath(NodeContext.class, nodeInstanceId);
- * }
- * }
- *
- * <p>
- * Where SalFlowService.class is a Service interface generated from a yang model with RPCs modeled in it and
- * salFlowServiceImplementationInstance is an instance of a class that implements SalFlowService.
- *
- * <p>
- * The line:
- * {@code
- * flowRegistration.registerPath(NodeContext.class, nodeInstanceId);
- * }
- * Is indicating that the RPC implementation is registered to handle RPC invocations that have their NodeContext
- * pointing to the node with instance id nodeInstanceId. This bears a bit of further explanation.  RoutedRPCs can be
- * 'routed' to an implementation based upon 'context'.  'context' is a pointer (instanceId) to some place in the data
- * tree.  In this example, the 'context' is a pointer to a Node.  In this way, a provider can register its ability to
- * provide a service for a particular Node, but not *all* Nodes.  The Broker routes the RPC by 'context' to the correct
- * implementation, without the caller having to do extra work.  Because of this when a RoutedRPC is registered, it
- * needs to also be able to indicate for which 'contexts' it is providing an implementation.
- *
- * <p>
- * An example of a Routed RPC would be an updateFlow(node, flow) that would be routed based on node to the provider
- * which had registered to provide it *for that node*.
- *
- *<p>
- * To get a DataBroker to allow access to the data tree:
- *
- * {@code
- * public void onSessionInitiated(final ProviderContext session) {
- *      DataBroker databroker = session.getSALService(BindingDataBroker.class);
- * }
- * }
- */
-@Deprecated
-public interface BindingAwareProvider {
-
-    /**
-     * Callback signaling initialization of the consumer session to the SAL.
-     *
-     * <p>
-     * The consumer MUST use the session for all communication with SAL or
-     * retrieving SAL infrastructure services.
-     *
-     * <p>
-     * This method is invoked by
-     * {@link BindingAwareBroker#registerProvider(BindingAwareProvider)}
-     *
-     * @param session Unique session between consumer and SAL.
-     */
-    void onSessionInitiated(ProviderContext session);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/BindingAwareService.java
deleted file mode 100644 (file)
index 61714b1..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-/**
- * Session-specific instance of the broker functionality.
- *
- * <p>
- * BindingAwareService is marker interface for infrastructure services provided
- * by the SAL. These services are session-specific, each
- * {@link BindingAwareConsumer} and {@link BindingAwareProvider} usually has own
- * instance of the service with it's own context.
- *
- * <p>
- * The consumer's (or provider's) instance of specific service could be obtained by invoking
- * {@link org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext#getSALService(Class)}
- * method on session assigned to the consumer.
- *
- * <p>
- * {@link org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext}
- * and {@link BindingAwareProvider} may seem similar, but provider provides YANG model-based functionality and
- * {@link BindingAwareProvider} exposes the necessary supporting functionality
- * to implement specific functionality of YANG and to reuse it in the
- * development of {@link BindingAwareConsumer}s and {@link BindingAwareProvider}s.
- */
-public interface BindingAwareService {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationListener.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationListener.java
deleted file mode 100644 (file)
index dbe6937..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import java.util.EventListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * Interface for a generic listener that is interested in receiving YANG modeled notifications.
- * This interface acts as a base interface for specific listeners which usually are a type
- * capture of this interface.
- *
- * @param <T> the interested notification type
- * @deprecated Deprecated unused API.
- */
-@Deprecated
-public interface NotificationListener<T extends Notification> extends EventListener {
-    /**
-     * Invoked to deliver a notification.
-     *
-     * <p>
-     * Note that this method may be invoked from a shared thread pool, so implementations SHOULD NOT
-     * perform CPU-intensive operations and MUST NOT invoke any potentially blocking operations.
-     *
-     * @param notification the notification.
-     */
-    void onNotification(T notification);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationProviderService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationProviderService.java
deleted file mode 100644 (file)
index 9d3efc2..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import java.util.EventListener;
-import java.util.concurrent.ExecutorService;
-import org.opendaylight.controller.md.sal.common.api.notify.NotificationPublishService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * Interface for a notification service that provides publish/subscribe capabilities for YANG
- * modeled notifications. This interface is a combination of the {@link NotificationService} and
- * {@link NotificationPublishService} interfaces.
- *
- * @deprecated Please use {@link org.opendaylight.mdsal.binding.api.NotificationPublishService}.
- */
-@Deprecated
-public interface NotificationProviderService extends NotificationService, NotificationPublishService<Notification> {
-
-    @Override
-    void publish(Notification notification);
-
-    @Override
-    void publish(Notification notification, ExecutorService executor);
-
-    /**
-     * Registers a listener to be notified about notification subscriptions. This
-     * enables a component to know when there is a notification listener subscribed
-     * for a particular notification type.
-     *
-     * <p>
-     * On registration of this listener, the
-     * {@link NotificationInterestListener#onNotificationSubscribtion(Class)} method
-     * will be invoked for every notification type that currently has a notification listener
-     * subscribed.
-     *
-     * @param interestListener the listener that will be notified when subscriptions
-     *                         for new notification types occur.
-     * @return a {@link ListenerRegistration} instance that should be used to unregister the listener
-     *         by invoking the {@link ListenerRegistration#close()} method when no longer needed.
-     */
-    ListenerRegistration<NotificationInterestListener> registerInterestListener(
-            NotificationInterestListener interestListener);
-
-    /**
-     * Interface for a listener interested in being notified about notification subscriptions.
-     */
-    interface NotificationInterestListener extends EventListener {
-
-        /**
-         * Callback that is invoked when a notification listener subscribes for a particular notification type.
-         *
-         * <p>
-         * This method is only called for the first subscription that occurs for a
-         * particular notification type. Subsequent subscriptions for the same
-         * notification type do not trigger invocation of this method.
-         *
-         * <p>
-         * <b>Note:</b>This callback is delivered from thread not owned by this listener,
-         * all processing should be as fast as possible and implementations should
-         * not do any blocking calls or block this thread.
-         *
-         * @param notificationType the notification type for the subscription that occurred.
-         */
-        void onNotificationSubscribtion(Class<? extends Notification> notificationType);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationService.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/NotificationService.java
deleted file mode 100644 (file)
index d8678d3..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * Notification broker which allows clients to subscribe for and publish YANG-modeled notifications.
- *
- *<p>
- * Two styles of listeners are supported:
- * <ul>
- * <li>Generic listener</li>
- * <li>Dispatch listener - listener, which implements <code>{ModelName}Listener</code> interface,
- * which has dispatch methods for each defined notification. Methods are invoked based on notification type (class).
- * </li>
- * </ul>
- *
- * <h3>Generic Listener</h3>
- *
- * <p>
- * A generic listener implements the {@link NotificationListener} interface which has one callback method
- * <code>onNotification</code> that is invoked for any notification type the listener is subscribed to.
- *
- * <p>
- * A generic listener is subscribed using the {@link #registerNotificationListener(Class, NotificationListener)}
- * method by which you specify the type of notification to receive. A generic listener may be registered for
- * multiple notification types via multiple subscriptions.
- *
- * <p>
- * Generic listeners allow for a more flexible approach, allowing you to subscribe for just
- * one type of notification from a YANG model. You could also have a general subscription
- * for all notification in the system via
- * <pre>
- *   service.registerNotificationListener(Notification.class, listener);
- * </pre>
- *
- * <h3>Dispatch Listener</h3>
- *
- * <p>
- * A dispatch listener implements a YANG-generated module interface <code>{ModuleName}Listener</code>
- * which handles all the notifications defined in the YANG model. Each notification type translates to
- * a specific method of the form <code>on{NotificationType}</code> on the generated interface.
- * The generated interface also extends the
- * {@link org.opendaylight.yangtools.yang.binding.NotificationListener} interface and implementations
- * are registered using
- * {@link #registerNotificationListener(org.opendaylight.yangtools.yang.binding.NotificationListener)} method.
- *
- * <h4>Dispatch Listener Example</h4>
- *
- * <p>
- * Lets assume we have following YANG model:
- *
- * {@code
- * module example {
- *      ...
- *
- *      notification start {
- *          ...
- *      }
- *
- *      notification stop {
- *           ...
- *      }
- * }
- * }
- *
- * <p>
- * The generated interface will be:
- * {@code
- *  public interface ExampleListener extends NotificationListener {
- *      void onStart(Start notification);
- *      void onStop(Stop notification);
- *  }
- * }
- * The following defines an implementation of the generated interface:
- * {@code
- *  public class MyExampleListener implements ExampleListener {
- *      public void onStart(Start notification) {
- *          // do something
- *      }
- *
- *      public void onStop(Stop notification) {
- *          // do something
- *      }
- *  }
- * }
- * The implementation is registered as follows:
- * {@code
- *  MyExampleListener listener = new MyExampleListener();
- *  ListenerRegistration<NotificationListener> reg = service.registerNotificationListener( listener );
- * }
- * The <code>onStart</code> method will be invoked when someone publishes a <code>Start</code> notification and
- * the <code>onStop</code> method will be invoked when someone publishes a <code>Stop</code> notification.
- *
- * @deprecated Please use {@link org.opendaylight.mdsal.binding.api.NotificationService} instead.
- */
-@Deprecated
-public interface NotificationService extends BindingAwareService {
-    /**
-     * Registers a generic listener implementation for a specified notification type.
-     *
-     * @param notificationType the YANG-generated interface of the notification type.
-     * @param listener the listener implementation that will receive notifications.
-     * @return a {@link ListenerRegistration} instance that should be used to unregister the listener
-     *         by invoking the {@link ListenerRegistration#close()} method when no longer needed.
-     */
-    <T extends Notification> ListenerRegistration<NotificationListener<T>> registerNotificationListener(
-            Class<T> notificationType, NotificationListener<T> listener);
-
-    /**
-     * Registers a listener which implements a YANG-generated notification interface derived from
-     * {@link org.opendaylight.yangtools.yang.binding.NotificationListener}.
-     * The listener is registered for all notifications present in the implemented interface.
-     *
-     * @param listener the listener implementation that will receive notifications.
-     * @return a {@link ListenerRegistration} instance that should be used to unregister the listener
-     *         by invoking the {@link ListenerRegistration#close()} method when no longer needed.
-     */
-    ListenerRegistration<org.opendaylight.yangtools.yang.binding.NotificationListener> registerNotificationListener(
-            org.opendaylight.yangtools.yang.binding.NotificationListener listener);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcAvailabilityListener.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcAvailabilityListener.java
deleted file mode 100644 (file)
index 3cf6e8d..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-public interface RpcAvailabilityListener {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcConsumerRegistry.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcConsumerRegistry.java
deleted file mode 100644 (file)
index d6978d7..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-/**
- * Provides access to registered Remote Procedure Call (RPC) service implementations. The RPCs are
- * defined in YANG models.
- *
- * <p>
- * RPC implementations are registered using the {@link RpcProviderRegistry}.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.RpcConsumerRegistry} instead
- */
-@Deprecated
-public interface RpcConsumerRegistry extends BindingAwareService, BindingService {
-    /**
-     * Returns an implementation of a requested RPC service.
-     *
-     * <p>
-     * The returned instance is not an actual implementation of the RPC service
-     * interface, but a proxy implementation of the interface that forwards to
-     * an actual implementation, if any.
-     * <p>
-     *
-     * The following describes the behavior of the proxy when invoking RPC methods:
-     * <ul>
-     * <li>If an actual implementation is registered with the MD-SAL, all invocations are
-     * forwarded to the registered implementation.</li>
-     * <li>If no actual implementation is registered, all invocations will fail by
-     * throwing {@link IllegalStateException}.</li>
-     * <li>Prior to invoking the actual implementation, the method arguments are are validated.
-     * If any are invalid, an {@link IllegalArgumentException} is thrown.
-     * </ul>
-     *
-     * The returned proxy is automatically updated with the most recent
-     * registered implementation.
-     *
-     * {@code
-     *   final Future<RpcResult<SomeRpcOutput>> future = someRpcService.someRpc( ... );
-     *   Futures.addCallback(future,
-     *       new FutureCallback<RpcResult<SomeRpcOutput>>() {
-     *           public void onSuccess(RpcResult<SomeRpcOutput> result) {
-     *               // process result ...
-     *           }
-     *
-     *           public void onFailure(Throwable t) {
-     *              // RPC failed
-     *           }
-     *       });
-     *  }
-     *
-     * @param serviceInterface the interface of the RPC Service. Typically this is an interface generated
-     *                         from a YANG model.
-     * @return the proxy for the requested RPC service. This method never returns null.
-     */
-    <T extends RpcService> T getRpcService(Class<T> serviceInterface);
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcProviderRegistry.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/RpcProviderRegistry.java
deleted file mode 100644 (file)
index 367e55d..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangePublisher;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-/**
- * Provides a registry for Remote Procedure Call (RPC) service implementations. The RPCs are
- * defined in YANG models.
- *
- * <p>
- * There are 2 types of RPCs:
- * <ul>
- * <li>Global</li>
- * <li>Routed</li>
- * </ul>
- *
- * <h2>Global RPC</h2>
- *
- * <p>
- * An RPC is global if there is intended to be only 1 registered implementation. A global RPC is not
- * explicitly declared as such, essentially any RPC that is not defined to be routed is considered global.
- *
- * <p>
- * Global RPCs are registered using the
- * {@link #addRpcImplementation(Class, RpcService)} method.
- *
- * <h2>Routed RPC</h2>
- *
- * <p>
- * MD-SAL supports routing of RPC between multiple implementations where the appropriate
- * implementation is selected at run time based on the content of the RPC message as described in
- * YANG model.
- *
- * <p>
- * RPC routing is based on:
- * <ul>
- * <li><b>Route identifier</b> -
- * An {@link org.opendaylight.yangtools.yang.binding.InstanceIdentifier InstanceIdentifier} value
- * which is part of the RPC input. This value is used to select the correct
- * implementation at run time.</li>
- * <li><b>Context Type</b> - A YANG-defined construct which constrains the subset of
- * valid route identifiers for a particular RPC.</li>
- * </ul>
- *
- * <h3>Context type</h3>
- *
- * <p>
- * A context type is modeled in YANG using a combination of a YANG <code>identity</code>
- * and Opendaylight specific extensions from <code>yang-ext</code> module. These extensions are:
- * <ul>
- * <li><b>context-instance</b> - This is used in the data tree part of a YANG model to
- * define a context type that associates nodes with a specified context <code>identity</code>.
- * Instance identifiers that reference these nodes are valid route identifiers for RPCs that
- * reference this context type.</li>
- * <li><b>context-reference</b> - This is used in RPC input to mark a leaf of type
- * <code>instance-identifier</code> as a reference to the particular context type defined by the
- * specified context <code>identity</code>. The value of this
- * leaf is used by the RPC broker at run time to route the RPC request to the correct implementation.
- * Note that <code>context-reference</code> may only be used on leaf elements of type
- * <code>instance-identifier</code> or a type derived from <code>instance-identifier</code>.</li>
- * </ul>
- *
- * <p><br>
- * <h4>1. Defining a Context Type</h4>
- *
- * <p>
- * The following snippet declares a simple YANG <code>identity</code> named <code>example-context</code>:
- *
- * {@code
- * module example {
- *     ...
- *     identity example-context {
- *          description "Identity used to define an example-context type";
- *     }
- *     ...
- * }
- * }
- *
- * <p>
- * We then use the declared identity to define a context type by using it in combination
- * with the <code>context-instance</code> YANG extension. We'll associate the context type
- * with a list element in the data tree. This defines the set of nodes whose instance
- * identifiers are valid for the <code>example-context</code> context type.
- *
- * <p>
- * The following YANG snippet imports the <code>yang-ext</code> module and defines the list
- * element named <code>item</code> inside a container named <code>foo</code>:
- *
- * {@code
- * module foo {
- *     ...
- *     import yang-ext {prefix ext;}
- *     ...
- *     container foo {
- *          list item {
- *              key "id";
- *              leaf id {type string;}
- *              ext:context-instance "example-context";
- *          }
- *     }
- *     ...
- * }
- * }
- *
- * <p>
- * The statement <code>ext:context-instance "example-context";</code> inside the list element
- * declares that any instance identifier referencing <code>item</code> in the data
- * tree is valid for <code>example-context</code>. For example, the following instance
- * identifier:
- * <pre>
- *     InstanceIdentifier.create(Foo.class).child(Item.class,new ItemKey("Foo"))
- * </pre>
- * is valid for <code>example-context</code>. However the following:
- * <pre>
- *     InstanceIdentifier.create(Example.class)
- * </pre>
- * is not valid.
- *
- * <p>
- * So using an <code>identity</code> in combination with <code>context-instance</code> we
- * have effectively defined a context type that can be referenced in a YANG RPC input.
- *
- * <p>
- * <h5>2. Defining an RPC to use the Context Type</h5>
- *
- * <p>
- * To define an RPC to be routed based on the context type we need to add an input leaf element
- * that references the context type which will hold an instance identifier value to be
- * used to route the RPC.
- *
- * <p>
- * The following snippet defines an RPC named <code>show-item</code> with 2 leaf elements
- * as input: <code>item</code> of type <code>instance-identifier</code> and <code>description</code>:
- *
- * <pre>
- * module foo {
- *      ...
- *      import yang-ext {prefix ext;}
- *      ...
- *      rpc show-item {
- *          input {
- *              leaf item {
- *                  type instance-identifier;
- *                  ext:context-reference example-context;
- *              }
- *              leaf description {
- *                  type "string";
- *              }
- *          }
- *      }
- * }
- * </pre>
- *
- * <p>
- * We mark the <code>item</code> leaf with a <code>context-reference</code> statement that
- * references the <code>example-context</code> context type. RPC calls will then be routed
- * based on the instance identifier value contained in <code>item</code>. Only instance
- * identifiers that point to a <code>foo/item</code> node are valid as input.
- *
- * <p>
- * The generated RPC Service interface for the module is:
- *
- * <pre>
- * interface FooService implements RpcService {
- *      Future&lt;RpcResult&lt;Void&gt;&gt; showItem(ShowItemInput input);
- * }
- * </pre>
- *
- * <p>
- * For constructing the RPC input, there are generated classes ShowItemInput and ShowItemInputBuilder.
- *
- * <h5>3. Registering a routed RPC implementation</h5>
- *
- * <p>
- * To register a routed implementation for the <code>show-item</code> RPC, we must use the
- * {@link #addRoutedRpcImplementation(Class, RpcService)} method. This
- * will return a {@link RoutedRpcRegistration} instance which can then be used to register /
- * unregister routed paths associated with the registered implementation.
- *
- * <p>
- * The following snippet registers <code>myImpl</code> as the RPC implementation for an
- * <code>item</code> with key <code>"foo"</code>:
- * <pre>
- * // Create the instance identifier path for item "foo"
- * InstanceIdentifier path = InstanceIdentifier.create(Foo.class).child(Item.class, new ItemKey(&quot;foo&quot;));
- *
- * // Register myImpl as the implementation for the FooService RPC interface
- * RoutedRpcRegistration reg = rpcRegistry.addRoutedRpcImplementation(FooService.class, myImpl);
- *
- * // Now register for the context type and specific path ID. The context type is specified by the
- * // YANG-generated class for the example-context identity.
- * reg.registerPath(ExampleContext.class, path);
- * </pre>
- *
- * <p>
- * It is also possible to register the same implementation for multiple paths:
- *
- * <pre>
- * InstanceIdentifier one = InstanceIdentifier.create(Foo.class).child(Item.class, new ItemKey(&quot;One&quot;));
- * InstanceIdentifier two = InstanceIdentifier.create(Foo.class).child(Item.class, new ItemKey(&quot;Two&quot;));
- *
- * RoutedRpcRegistration reg = rpcRegistry.addRoutedRpcImplementation(FooService.class, myImpl);
- * reg.registerPath(ExampleContext.class, one);
- * reg.registerPath(ExampleContext.class, two);
- * </pre>
- *
- * <p>
- * When another client invokes the <code>showItem(ShowItemInput)</code> method on the proxy instance
- * retrieved via {@link RpcConsumerRegistry#getRpcService(Class)}, the proxy will inspect the
- * arguments in ShowItemInput, extract the InstanceIdentifier value of the <code>item</code> leaf and select
- * the implementation whose registered path matches the InstanceIdentifier value of the <code>item</code> leaf.
- *
- * <p><br>
- * <h2>Notes for RPC Implementations</h2>
- *
- * <p>
- * <h3>RpcResult</h3>
- *
- * <p>
- * The generated interfaces require implementors to return
- *  {@link java.util.concurrent.Future Future}&lt;{@link org.opendaylight.yangtools.yang.common.RpcResult RpcResult}
- *  &lt;{RpcName}Output&gt;&gt; instances.
- *
- * <p>
- * Implementations should do processing of RPC calls asynchronously and update the
- * returned {@link java.util.concurrent.Future Future} instance when processing is complete.
- * However using {@link com.google.common.util.concurrent.Futures#immediateFuture(Object) Futures.immediateFuture}
- * is valid only if the result is immediately available and asynchronous processing is unnecessary and
- * would only introduce additional complexity.
- *
- * <p>
- * The {@link org.opendaylight.yangtools.yang.common.RpcResult RpcResult} is a generic
- * wrapper for the RPC output payload, if any, and also allows for attaching error or
- * warning information (possibly along with the payload) should the RPC processing partially
- * or completely fail. This is intended to provide additional human readable information
- * for users of the API and to transfer warning / error information across the system
- * so it may be visible via other external APIs such as Restconf.
- *
- * <p>
- * It is recommended to use the {@link org.opendaylight.yangtools.yang.common.RpcResult RpcResult}
- * for conveying appropriate error information
- * on failure rather than purposely throwing unchecked exceptions if at all possible.
- * While unchecked exceptions will fail the returned {@link java.util.concurrent.Future Future},
- * using the intended RpcResult to convey the error information is more user-friendly.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.binding.api.RpcProviderService} instead
- */
-@Deprecated
-public interface RpcProviderRegistry extends //
-        RpcConsumerRegistry, //
-        RouteChangePublisher<RpcContextIdentifier, InstanceIdentifier<?>> {
-    /**
-     * Registers a global implementation of the provided RPC service interface.
-     * All methods of the interface are required to be implemented.
-     *
-     * @param serviceInterface the YANG-generated interface of the RPC Service for which to register.
-     * @param implementation "the implementation of the RPC service interface.
-     * @return an RpcRegistration instance that should be used to unregister the RPC implementation
-     *         when no longer needed by calling {@link RpcRegistration#close()}.
-     *
-     * @throws IllegalStateException
-     *             if the supplied RPC interface is a routed RPC type.
-     */
-    <T extends RpcService> RpcRegistration<T> addRpcImplementation(Class<T> serviceInterface, T implementation)
-            throws IllegalStateException;
-
-    /**
-     * Registers an implementation of the given routed RPC service interface.
-     * <p>
-     * See the {@link RpcProviderRegistry class} documentation for information and example on
-     * how to use routed RPCs.
-     *
-     * @param serviceInterface the YANG-generated interface of the RPC Service for which to register.
-     * @param implementation the implementation instance to register.
-     * @return a RoutedRpcRegistration instance which can be used to register paths for the RPC
-     *         implementation via invoking RoutedRpcRegistration#registerPath(Class, InstanceIdentifer).
-     *         {@link RoutedRpcRegistration#close()} should be called to unregister the implementation
-     *         and all previously registered paths when no longer needed.
-     *
-     * @throws IllegalStateException
-     *            if the supplied RPC interface is not a routed RPC type.
-     */
-    <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> serviceInterface,
-                                                                               T implementation)
-            throws IllegalStateException;
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/package-info.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/package-info.java
deleted file mode 100644 (file)
index 31eec6b..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api;
-
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcContextIdentifier.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcContextIdentifier.java
deleted file mode 100644 (file)
index 3bdb432..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api.rpc;
-
-import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final  class RpcContextIdentifier implements Immutable {
-
-    public final Class<? extends RpcService> rpcService;
-    public final Class<? extends BaseIdentity> routingContext;
-
-    private RpcContextIdentifier(Class<? extends RpcService> rpcService, Class<? extends BaseIdentity> routingContext) {
-        this.rpcService = rpcService;
-        this.routingContext = routingContext;
-    }
-
-    public Class<? extends RpcService> getRpcService() {
-        return rpcService;
-    }
-
-    public Class<? extends BaseIdentity> getRoutingContext() {
-        return routingContext;
-    }
-
-    public static RpcContextIdentifier contextForGlobalRpc(Class<? extends RpcService> serviceType) {
-        return new RpcContextIdentifier(serviceType, null);
-    }
-
-    public static RpcContextIdentifier contextFor(Class<? extends RpcService> serviceType,
-            Class<? extends BaseIdentity> routingContext) {
-        return new RpcContextIdentifier(serviceType, routingContext);
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + (routingContext == null ? 0 : routingContext.hashCode());
-        result = prime * result + (rpcService == null ? 0 : rpcService.hashCode());
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null) {
-            return false;
-        }
-        if (getClass() != obj.getClass()) {
-            return false;
-        }
-        RpcContextIdentifier other = (RpcContextIdentifier) obj;
-        if (routingContext == null) {
-            if (other.routingContext != null) {
-                return false;
-            }
-        } else if (!routingContext.equals(other.routingContext)) {
-            return false;
-        }
-        if (rpcService == null) {
-            if (other.rpcService != null) {
-                return false;
-            }
-        } else if (!rpcService.equals(other.rpcService)) {
-            return false;
-        }
-        return true;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcRoutingContext.java b/opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/sal/binding/api/rpc/RpcRoutingContext.java
deleted file mode 100644 (file)
index 4d42ff2..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.api.rpc;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public interface RpcRoutingContext<C extends BaseIdentity,S extends RpcService> {
-
-    Class<C> getContextType();
-
-    Class<S> getServiceType();
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/pom.xml b/opendaylight/md-sal/sal-binding-broker/pom.xml
deleted file mode 100644 (file)
index f488631..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-binding-broker-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.javassist</groupId>
-      <artifactId>javassist</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-broker-impl</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-generator-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-codec</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-adapter</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-      <scope>provided</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.truth</groupId>
-      <artifactId>truth</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-test-model</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-
-      <plugin>
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <extensions>true</extensions>
-        <configuration>
-          <instructions>
-            <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
-            <Export-Package>
-              org.opendaylight.controller.sal.binding.impl,
-              org.opendaylight.controller.md.sal.binding.impl,
-              org.opendaylight.controller.md.sal.binding.compat,
-              org.opendaylight.controller.md.sal.binding.spi,
-              org.opendaylight.controller.sal.binding.codegen.impl,
-            </Export-Package>
-          </instructions>
-        </configuration>
-      </plugin>
-
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-source-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>attach-sources</id>
-            <phase>verify</phase>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/.gitignore b/opendaylight/md-sal/sal-binding-broker/src/main/.gitignore
deleted file mode 100644 (file)
index 04b73cb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/xtend-gen
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AbstractNotificationListenerRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AbstractNotificationListenerRegistration.java
deleted file mode 100644 (file)
index e327e6f..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * Abstract implementation of {@link NotificationListenerRegistration}.
- *
- * @param <T> Notification type
- */
-abstract class AbstractNotificationListenerRegistration<T extends Notification>
-        extends AbstractListenerRegistration<NotificationListener<T>> implements NotificationListenerRegistration<T> {
-    private final Class<? extends Notification> type;
-
-    protected AbstractNotificationListenerRegistration(final Class<? extends Notification> type,
-            final NotificationListener<T> listener) {
-        super(listener);
-        this.type = Preconditions.checkNotNull(type);
-    }
-
-    @Override
-    public Class<? extends Notification> getType() {
-        return type;
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public void notify(final Notification notification) {
-        if (!isClosed()) {
-            getInstance().onNotification((T)notification);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AggregatedNotificationListenerRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/AggregatedNotificationListenerRegistration.java
deleted file mode 100644 (file)
index 339ad68..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * An aggregated listener registration. This is a result of registering an invoker which can handle multiple
- * interfaces at the same time. In order to support correct delivery, we need to maintain per-type registrations
- * which get squashed if a notification which implements multiple interfaces is encountered.
- *
- * <p>
- * We take care of that by implementing alternate {@link #hashCode()}/{@link #equals(Object)}, which resolve
- * to the backing aggregator.
- *
- * @param <N> Notification type
- * @param <A> Aggregator type
- */
-abstract class AggregatedNotificationListenerRegistration<N extends Notification, A>
-        extends AbstractNotificationListenerRegistration<N> {
-    private final A aggregator;
-
-    protected AggregatedNotificationListenerRegistration(final Class<? extends Notification> type,
-            final NotificationListener<N> listener, final A aggregator) {
-        super(type, listener);
-        this.aggregator = Preconditions.checkNotNull(aggregator);
-    }
-
-    protected A getAggregator() {
-        return aggregator;
-    }
-
-    @Override
-    public int hashCode() {
-        return aggregator.hashCode();
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null) {
-            return false;
-        }
-        if (!this.getClass().equals(obj.getClass())) {
-            return false;
-        }
-
-        return aggregator.equals(((AggregatedNotificationListenerRegistration<?, ?>)obj).aggregator);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/CompositeRoutedRpcRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/CompositeRoutedRpcRegistration.java
deleted file mode 100644 (file)
index 1a405e0..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.HashMap;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-final class CompositeRoutedRpcRegistration<T extends RpcService> implements RoutedRpcRegistration<T> {
-
-    private final Class<T> type;
-    private final T instance;
-    private final BindingDOMRpcProviderServiceAdapter adapter;
-    private final Map<InstanceIdentifier<?>, ObjectRegistration<T>> registrations = new HashMap<>(2);
-
-    CompositeRoutedRpcRegistration(final Class<T> type, final T impl,
-            final BindingDOMRpcProviderServiceAdapter providerAdapter) {
-        this.type = type;
-        this.instance = impl;
-        this.adapter = providerAdapter;
-    }
-
-    @Override
-    public Class<T> getServiceType() {
-        return type;
-    }
-
-    @Override
-    public T getInstance() {
-        return instance;
-    }
-
-    @Deprecated
-    @Override
-    public void registerInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
-        registerPath(context, path);
-    }
-
-    @Override
-    public synchronized void registerPath(final Class<? extends BaseIdentity> context,
-            final InstanceIdentifier<?> path) {
-        if (!registrations.containsKey(path)) {
-            registrations.put(path,
-                    adapter.registerRpcImplementation(type, instance, ImmutableSet.<InstanceIdentifier<?>>of(path)));
-        }
-    }
-
-
-    @Override
-    @Deprecated
-    public void unregisterInstance(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
-        unregisterPath(context, path);
-    }
-
-    @Override
-    public synchronized  void unregisterPath(final Class<? extends BaseIdentity> context,
-            final InstanceIdentifier<?> path) {
-        final ObjectRegistration<T> reg = registrations.remove(path);
-        if (reg != null) {
-            reg.close();
-        }
-    }
-
-    @Override
-    public synchronized void close() {
-        for (final ObjectRegistration<T> reg : registrations.values()) {
-            reg.close();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/DelegatedRootRpcRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/DelegatedRootRpcRegistration.java
deleted file mode 100644 (file)
index aa94e21..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-final class DelegatedRootRpcRegistration<T extends RpcService> implements RpcRegistration<T> {
-
-    private final ObjectRegistration<T> delegate;
-    private final Class<T> type;
-
-    DelegatedRootRpcRegistration(final Class<T> type, final ObjectRegistration<T> impl) {
-        this.delegate = impl;
-        this.type = type;
-    }
-
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-
-    @Override
-    public T getInstance() {
-        return delegate.getInstance();
-    }
-
-    @Override
-    public Class<T> getServiceType() {
-        return type;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/FunctionalNotificationListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/FunctionalNotificationListenerAdapter.java
deleted file mode 100644 (file)
index 712d025..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import org.opendaylight.controller.md.sal.binding.impl.LazySerializedDOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-final class FunctionalNotificationListenerAdapter<N extends Notification> implements DOMNotificationListener {
-
-    private final BindingNormalizedNodeSerializer codec;
-    private final NotificationListener<N> delegate;
-    private final Class<N> type;
-
-    FunctionalNotificationListenerAdapter(final BindingNormalizedNodeSerializer codec, final Class<N> type,
-            final NotificationListener<N> delegate) {
-        this.codec = codec;
-        this.type = type;
-        this.delegate = delegate;
-    }
-
-    @Override
-    public void onNotification(final DOMNotification notification) {
-        delegate.onNotification(type.cast(deserialize(notification)));
-    }
-
-    private Notification deserialize(final DOMNotification notification) {
-        if (notification instanceof LazySerializedDOMNotification) {
-            return ((LazySerializedDOMNotification) notification).getBindingData();
-        }
-        return codec.fromNormalizedNodeNotification(notification.getType(), notification.getBody());
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceAdapter.java
deleted file mode 100644 (file)
index ac85f65..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import java.util.concurrent.ExecutorService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class HeliumNotificationProviderServiceAdapter extends HeliumNotificationServiceAdapter
-        implements NotificationProviderService {
-    private static final Logger LOG = LoggerFactory.getLogger(HeliumNotificationProviderServiceAdapter.class);
-
-    private final NotificationPublishService notificationPublishService;
-
-    public HeliumNotificationProviderServiceAdapter(final NotificationPublishService notificationPublishService,
-            final NotificationService notificationService) {
-        super(notificationService);
-        this.notificationPublishService = notificationPublishService;
-    }
-
-    @Override
-    public void publish(final Notification notification) {
-        try {
-            notificationPublishService.putNotification(notification);
-        } catch (InterruptedException e) {
-            LOG.error("Notification publication was interupted", e);
-        }
-    }
-
-    @Override
-    public void publish(final Notification notification, final ExecutorService executor) {
-        try {
-            notificationPublishService.putNotification(notification);
-        } catch (InterruptedException e) {
-            LOG.error("Notification publication was interupted", e);
-        }
-    }
-
-    @Override
-    public ListenerRegistration<NotificationInterestListener> registerInterestListener(
-            final NotificationInterestListener interestListener) {
-        throw new UnsupportedOperationException("InterestListener is not supported.");
-    }
-
-    @Override
-    public void close() {
-
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceWithInterestListeners.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationProviderServiceWithInterestListeners.java
deleted file mode 100644 (file)
index 2a07b03..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.collect.Sets;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationPublishServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListener;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListenerRegistry;
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class HeliumNotificationProviderServiceWithInterestListeners extends HeliumNotificationProviderServiceAdapter {
-
-    private static final Logger LOG = LoggerFactory.getLogger(
-            HeliumNotificationProviderServiceWithInterestListeners.class);
-
-    private final ListenerRegistry<NotificationInterestListener> interestListeners = ListenerRegistry.create();
-    private final ListenerRegistration<Listener> domListener;
-    private final DOMNotificationService domService;
-    private final BindingToNormalizedNodeCodec codec;
-
-    public HeliumNotificationProviderServiceWithInterestListeners(
-            final BindingDOMNotificationPublishServiceAdapter publishService,
-            final BindingDOMNotificationServiceAdapter listenService,
-            final DOMNotificationSubscriptionListenerRegistry registry) {
-        super(publishService, listenService);
-        this.codec = publishService.getCodecRegistry();
-        this.domListener = registry.registerSubscriptionListener(new Listener());
-        this.domService = listenService.getDomService();
-    }
-
-    @Override
-    public ListenerRegistration<NotificationInterestListener> registerInterestListener(
-            final NotificationInterestListener listener) {
-        notifyListener(listener, translate(domListener.getInstance().getAllObserved()));
-        return interestListeners.register(listener);
-    }
-
-    private Set<Class<? extends Notification>> translate(final Set<SchemaPath> added) {
-        return codec.getNotificationClasses(added);
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void notifyAllListeners(final Set<SchemaPath> added) {
-        final Iterator<ListenerRegistration<NotificationInterestListener>> listeners = interestListeners.iterator();
-        if (listeners.hasNext()) {
-            final Set<Class<? extends Notification>> baEvent = translate(added);
-            while (listeners.hasNext()) {
-                final NotificationInterestListener listenerRef = listeners.next().getInstance();
-                try {
-                    notifyListener(listenerRef, baEvent);
-                } catch (RuntimeException  e) {
-                    LOG.warn("Unhandled exception during invoking listener {}", listenerRef, e);
-                }
-            }
-        }
-    }
-
-    @Override
-    public <T extends Notification> ListenerRegistration<NotificationListener<T>> registerNotificationListener(
-            final Class<T> type, final NotificationListener<T> listener) {
-
-        final FunctionalNotificationListenerAdapter<T> adapter =
-                new FunctionalNotificationListenerAdapter<>(codec, type, listener);
-        final SchemaPath domType = SchemaPath.create(true, BindingReflections.findQName(type));
-        final ListenerRegistration<?> domReg = domService.registerNotificationListener(adapter, domType);
-        return new AbstractListenerRegistration<NotificationListener<T>>(listener) {
-            @Override
-            protected void removeRegistration() {
-                domReg.close();
-            }
-        };
-    }
-
-    private static void notifyListener(final NotificationInterestListener listener,
-            final Set<Class<? extends Notification>> baEvent) {
-        for (final Class<? extends Notification> event: baEvent) {
-            listener.onNotificationSubscribtion(event);
-        }
-    }
-
-    private final class Listener implements DOMNotificationSubscriptionListener {
-
-        private volatile Set<SchemaPath> allObserved = Collections.emptySet();
-
-        @Override
-        public void onSubscriptionChanged(final Set<SchemaPath> currentTypes) {
-            final Set<SchemaPath> added = Sets.difference(currentTypes, allObserved).immutableCopy();
-            notifyAllListeners(added);
-            allObserved = Sets.union(allObserved, added).immutableCopy();
-        }
-
-        Set<SchemaPath> getAllObserved() {
-            return allObserved;
-        }
-    }
-
-    @Override
-    public void close() {
-        super.close();
-        domListener.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumNotificationServiceAdapter.java
deleted file mode 100644 (file)
index f4c1020..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-public class HeliumNotificationServiceAdapter
-        implements org.opendaylight.controller.sal.binding.api.NotificationService, AutoCloseable {
-
-    private final NotificationService notificationService;
-
-    public HeliumNotificationServiceAdapter(NotificationService notificationService) {
-        this.notificationService = notificationService;
-    }
-
-    @Override
-    public <T extends Notification> ListenerRegistration<
-            org.opendaylight.controller.sal.binding.api.NotificationListener<T>> registerNotificationListener(
-            final Class<T> notificationType,
-            final org.opendaylight.controller.sal.binding.api.NotificationListener<T> listener) {
-        throw new UnsupportedOperationException("Not supported type of listener.");
-    }
-
-    @Override
-    public ListenerRegistration<NotificationListener> registerNotificationListener(
-            final NotificationListener listener) {
-        return notificationService.registerNotificationListener(listener);
-    }
-
-    @Override
-    public void close() throws Exception {
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumRpcProviderRegistry.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/HeliumRpcProviderRegistry.java
deleted file mode 100644 (file)
index 34f71e9..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public class HeliumRpcProviderRegistry implements RpcProviderRegistry {
-
-    private final RpcConsumerRegistry consumerRegistry;
-    private final BindingDOMRpcProviderServiceAdapter providerAdapter;
-
-    public HeliumRpcProviderRegistry(final RpcConsumerRegistry consumerRegistry,
-            final BindingDOMRpcProviderServiceAdapter providerAdapter) {
-        this.consumerRegistry = consumerRegistry;
-        this.providerAdapter = providerAdapter;
-    }
-
-    @Override
-    public <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(final Class<T> type, final T impl)
-            throws IllegalStateException {
-        return new CompositeRoutedRpcRegistration<>(type,impl,providerAdapter);
-    }
-
-    @Override
-    public <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T impl)
-            throws IllegalStateException {
-        final ObjectRegistration<T> reg = providerAdapter.registerRpcImplementation(type, impl);
-        return new DelegatedRootRpcRegistration<>(type,reg);
-    }
-
-    @Override
-    public <T extends RpcService> T getRpcService(final Class<T> type) {
-        return consumerRegistry.getRpcService(type);
-    }
-
-    @Override
-    public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L>
-            registerRouteChangeListener(final L listener) {
-        // FIXME: Implement this only if necessary
-        return null;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/ListenerMapGeneration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/ListenerMapGeneration.java
deleted file mode 100644 (file)
index e2cf3d7..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.ImmutableMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.stream.Collectors;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * An immutable view of the current generation of listeners.
- */
-final class ListenerMapGeneration {
-    private static final int CACHE_MAX_ENTRIES = 1000;
-
-    /**
-     * Constant map of notification type to subscribed listeners.
-     */
-    private final Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> typeToListeners;
-
-    /**
-     * Dynamic cache of notification implementation to matching listeners. This cache loads entries based on
-     * the contents of the {@link #typeToListeners} map.
-     */
-    private final LoadingCache<Class<?>, Iterable<NotificationListenerRegistration<?>>> implementationToListeners =
-            CacheBuilder.newBuilder()
-            .weakKeys()
-            .maximumSize(CACHE_MAX_ENTRIES)
-            .build(new CacheLoader<Class<?>, Iterable<NotificationListenerRegistration<?>>>() {
-                @Override
-                public Iterable<NotificationListenerRegistration<?>> load(final Class<?> key) {
-                    final Set<NotificationListenerRegistration<?>> regs = new HashSet<>();
-
-                    for (final Class<?> type : getNotificationTypes(key)) {
-                        @SuppressWarnings("unchecked")
-                        final Collection<NotificationListenerRegistration<?>> l =
-                                typeToListeners.get((Class<? extends Notification>) type);
-                        if (l != null) {
-                            regs.addAll(l);
-                        }
-                    }
-
-                    return ImmutableSet.copyOf(regs);
-                }
-            });
-
-    ListenerMapGeneration() {
-        typeToListeners = ImmutableMultimap.of();
-    }
-
-    ListenerMapGeneration(final Multimap<Class<? extends Notification>,
-            NotificationListenerRegistration<?>> listeners) {
-        this.typeToListeners = ImmutableMultimap.copyOf(listeners);
-    }
-
-    /**
-     * Current listeners. Exposed for creating the next generation.
-     *
-     * @return Current type-to-listener map.
-     */
-    Multimap<Class<? extends Notification>, NotificationListenerRegistration<?>> getListeners() {
-        return typeToListeners;
-    }
-
-    /**
-     * Look up the listeners which need to see this notification delivered.
-     *
-     * @param notification Notification object
-     * @return Iterable of listeners, guaranteed to be nonnull.
-     */
-    public Iterable<NotificationListenerRegistration<?>> listenersFor(final Notification notification) {
-        // Safe to use, as our loader does not throw checked exceptions
-        return implementationToListeners.getUnchecked(notification.getClass());
-    }
-
-    public Iterable<Class<? extends Notification>> getKnownTypes() {
-        return typeToListeners.keySet();
-    }
-
-    private static Iterable<Class<?>> getNotificationTypes(final Class<?> cls) {
-        final Class<?>[] ifaces = cls.getInterfaces();
-        return Arrays.stream(ifaces)
-                .filter(input -> !Notification.class.equals(input) && Notification.class.isAssignableFrom(input))
-                .collect(Collectors.toList());
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationInvoker.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationInvoker.java
deleted file mode 100644 (file)
index c2fef92..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.reflect.TypeToken;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import org.opendaylight.mdsal.binding.dom.adapter.invoke.NotificationListenerInvoker;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.common.QName;
-
-final class NotificationInvoker
-        implements org.opendaylight.controller.sal.binding.api.NotificationListener<Notification> {
-
-    private final NotificationListener delegate;
-    private final Map<Class<? extends Notification>,InvokerContext> invokers;
-
-
-    private NotificationInvoker(final NotificationListener listener) {
-        delegate = listener;
-        final Map<Class<? extends Notification>, InvokerContext> builder = new HashMap<>();
-        for (final TypeToken<?> ifaceToken : TypeToken.of(listener.getClass()).getTypes().interfaces()) {
-            final Class<?> iface = ifaceToken.getRawType();
-            if (NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
-                @SuppressWarnings("unchecked")
-                final Class<? extends NotificationListener> listenerType =
-                        (Class<? extends NotificationListener>) iface;
-                final NotificationListenerInvoker invoker = NotificationListenerInvoker.from(listenerType);
-                for (final Class<? extends Notification> type : getNotificationTypes(listenerType)) {
-                    builder.put(type, new InvokerContext(BindingReflections.findQName(type), invoker));
-                }
-            }
-        }
-        invokers = ImmutableMap.copyOf(builder);
-    }
-
-    public static NotificationInvoker invokerFor(final NotificationListener listener) {
-        return new NotificationInvoker(listener);
-    }
-
-    public Set<Class<? extends Notification>> getSupportedNotifications() {
-        return invokers.keySet();
-    }
-
-    @Override
-    public void onNotification(final Notification notification) {
-        getContext(notification.getImplementedInterface()).invoke(notification);
-    }
-
-    private InvokerContext getContext(final Class<?> type) {
-        return invokers.get(type);
-    }
-
-    @SuppressWarnings("unchecked")
-    private static Set<Class<? extends Notification>> getNotificationTypes(
-            final Class<? extends org.opendaylight.yangtools.yang.binding.NotificationListener> type) {
-        // TODO: Investigate possibility and performance impact if we cache this or expose
-        // it from NotificationListenerInvoker
-        final Set<Class<? extends Notification>> ret = new HashSet<>();
-        for (final Method method : type.getMethods()) {
-            if (BindingReflections.isNotificationCallback(method)) {
-                final Class<? extends Notification> notification =
-                        (Class<? extends Notification>) method.getParameterTypes()[0];
-                ret.add(notification);
-            }
-        }
-        return ret;
-    }
-
-    private final class InvokerContext {
-
-        private final QName name;
-        private final NotificationListenerInvoker invoker;
-
-        private InvokerContext(final QName name, final NotificationListenerInvoker invoker) {
-            this.name = name;
-            this.invoker = invoker;
-        }
-
-        public void invoke(final Notification notification) {
-            invoker.invokeNotification(delegate, name, notification);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationListenerRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/compat/NotificationListenerRegistration.java
deleted file mode 100644 (file)
index c6af237..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.compat;
-
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-/**
- * A registration of a {@link NotificationListener}. Allows query of the type
- * of the notification and dispatching the notification atomically with regard
- * to unregistration.
- *
- * @param <T> Type of notification
- */
-interface NotificationListenerRegistration<T extends Notification>
-        extends ListenerRegistration<NotificationListener<T>> {
-    /**
-     * Return the interface class of the notification type.
-     *
-     * @return Notification type.
-     */
-    Class<? extends Notification> getType();
-
-    /**
-     * Dispatch a notification to the listener.
-     *
-     * @param notification Notification to be dispatched
-     */
-    void notify(Notification notification);
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedDataBroker.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedDataBroker.java
deleted file mode 100644 (file)
index c34c737..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.Delegator;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractForwardedDataBroker implements Delegator<DOMDataBroker>, AutoCloseable {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractForwardedDataBroker.class);
-    // The Broker to whom we do all forwarding
-    private final DOMDataBroker domDataBroker;
-
-    private final BindingToNormalizedNodeCodec codec;
-
-    protected AbstractForwardedDataBroker(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec,
-            final DOMSchemaService schemaService) {
-        this.domDataBroker = domDataBroker;
-        this.codec = codec;
-    }
-
-    protected AbstractForwardedDataBroker(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec) {
-        this.domDataBroker = domDataBroker;
-        this.codec = codec;
-    }
-
-    protected BindingToNormalizedNodeCodec getCodec() {
-        return codec;
-    }
-
-    @Override
-    public DOMDataBroker getDelegate() {
-        return domDataBroker;
-    }
-
-    protected Map<InstanceIdentifier<?>, DataObject> toBinding(final InstanceIdentifier<?> path,
-            final Map<YangInstanceIdentifier, ? extends NormalizedNode<?, ?>> normalized) {
-        final Map<InstanceIdentifier<?>, DataObject> newMap = new HashMap<>();
-
-        for (final Map.Entry<YangInstanceIdentifier, ? extends NormalizedNode<?, ?>> entry : normalized.entrySet()) {
-            try {
-                final Optional<Entry<InstanceIdentifier<? extends DataObject>, DataObject>> potential =
-                        getCodec().toBinding(entry);
-                if (potential.isPresent()) {
-                    final Entry<InstanceIdentifier<? extends DataObject>, DataObject> binding = potential.get();
-                    newMap.put(binding.getKey(), binding.getValue());
-                }
-            } catch (final DeserializationException e) {
-                LOG.warn("Failed to transform {}, omitting it", entry, e);
-            }
-        }
-        return newMap;
-    }
-
-    protected Set<InstanceIdentifier<?>> toBinding(final InstanceIdentifier<?> path,
-            final Set<YangInstanceIdentifier> normalized) {
-        final Set<InstanceIdentifier<?>> hashSet = new HashSet<>();
-        for (final YangInstanceIdentifier normalizedPath : normalized) {
-            try {
-                final Optional<InstanceIdentifier<? extends DataObject>> potential =
-                        getCodec().toBinding(normalizedPath);
-                if (potential.isPresent()) {
-                    final InstanceIdentifier<? extends DataObject> binding = potential.get();
-                    hashSet.add(binding);
-                } else if (normalizedPath.getLastPathArgument()
-                        instanceof YangInstanceIdentifier.AugmentationIdentifier) {
-                    hashSet.add(path);
-                }
-            } catch (final DeserializationException e) {
-                LOG.warn("Failed to transform {}, omitting it", normalizedPath, e);
-            }
-        }
-        return hashSet;
-    }
-
-    protected Optional<DataObject> toBindingData(final InstanceIdentifier<?> path, final NormalizedNode<?, ?> data) {
-        if (path.isWildcarded()) {
-            return Optional.empty();
-        }
-        return (Optional<DataObject>) getCodec().deserializeFunction(path)
-                .apply(Optional.<NormalizedNode<?, ?>>of(data));
-    }
-
-    @Override
-    public void close() {
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedTransaction.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractForwardedTransaction.java
deleted file mode 100644 (file)
index 24b7ff0..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
-import org.opendaylight.yangtools.concepts.Delegator;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-abstract class AbstractForwardedTransaction<T extends AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>>>
-        implements Delegator<T>, Identifiable<Object> {
-
-    private final T delegate;
-    private final BindingToNormalizedNodeCodec codec;
-
-    AbstractForwardedTransaction(final T delegateTx, final BindingToNormalizedNodeCodec codec) {
-        this.delegate = Preconditions.checkNotNull(delegateTx, "Delegate must not be null");
-        this.codec = Preconditions.checkNotNull(codec, "Codec must not be null");
-    }
-
-
-    @Override
-    public final  Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public final  T getDelegate() {
-        return delegate;
-    }
-
-    @SuppressWarnings("unchecked")
-    protected final <S extends AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>>>
-            S getDelegateChecked(final Class<S> txType) {
-        Preconditions.checkState(txType.isInstance(delegate));
-        return (S) delegate;
-    }
-
-    protected final BindingToNormalizedNodeCodec getCodec() {
-        return codec;
-    }
-
-    protected final <D extends DataObject> CheckedFuture<Optional<D>, ReadFailedException> doRead(
-            final DOMDataReadTransaction readTx, final LogicalDatastoreType store,
-            final InstanceIdentifier<D> path) {
-        Preconditions.checkArgument(!path.isWildcarded(), "Invalid read of wildcarded path %s", path);
-
-        return MappingCheckedFuture.create(
-            Futures.transform(readTx.read(store, codec.toYangInstanceIdentifierBlocking(path)),
-                result -> Optional.fromJavaUtil(codec.deserializeFunction(path).apply(result.toJavaUtil())),
-                MoreExecutors.directExecutor()),
-            ReadFailedException.MAPPER);
-    }
-
-    protected final CheckedFuture<Boolean, ReadFailedException> doExists(
-            final DOMDataReadTransaction readTx, final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        Preconditions.checkArgument(!path.isWildcarded(), "Invalid read of wildcarded path %s", path);
-
-        return MappingCheckedFuture.create(readTx.exists(store, codec.toYangInstanceIdentifierBlocking(path)),
-            ReadFailedException.MAPPER);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractReadWriteTransaction.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractReadWriteTransaction.java
deleted file mode 100644 (file)
index c4ec8e4..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class AbstractReadWriteTransaction extends AbstractWriteTransaction<DOMDataReadWriteTransaction> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractReadWriteTransaction.class);
-
-    public AbstractReadWriteTransaction(final DOMDataReadWriteTransaction delegate,
-            final BindingToNormalizedNodeCodec codec) {
-        super(delegate, codec);
-    }
-
-    @Override
-    protected final void ensureParentsByMerge(final LogicalDatastoreType store,
-            final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier normalizedPath,
-            final InstanceIdentifier<?> path) {
-        List<PathArgument> currentArguments = new ArrayList<>();
-        DataNormalizationOperation<?> currentOp = getCodec().getDataNormalizer().getRootOperation();
-        Iterator<PathArgument> iterator = normalizedPath.getPathArguments().iterator();
-        while (iterator.hasNext()) {
-            PathArgument currentArg = iterator.next();
-            try {
-                currentOp = currentOp.getChild(currentArg);
-            } catch (DataNormalizationException e) {
-                throw new IllegalArgumentException(String.format("Invalid child encountered in path %s", path), e);
-            }
-            currentArguments.add(currentArg);
-            org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier currentPath =
-                    org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.create(currentArguments);
-
-            final Boolean exists;
-            try {
-                exists = getDelegate().exists(store, currentPath).checkedGet();
-            } catch (ReadFailedException e) {
-                LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
-                throw new IllegalStateException("Failed to read pre-existing data", e);
-            }
-
-            if (!exists && iterator.hasNext()) {
-                getDelegate().merge(store, currentPath, currentOp.createDefault(currentArg));
-            }
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractWriteTransaction.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractWriteTransaction.java
deleted file mode 100644 (file)
index bb9c98e..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map.Entry;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Abstract Base Transaction for transactions which are backed by {@link DOMDataWriteTransaction}.
- */
-public abstract class AbstractWriteTransaction<T extends DOMDataWriteTransaction> extends
-        AbstractForwardedTransaction<T> {
-
-    protected AbstractWriteTransaction(final T delegate, final BindingToNormalizedNodeCodec codec) {
-        super(delegate, codec);
-    }
-
-    public final <U extends DataObject> void put(final LogicalDatastoreType store,
-            final InstanceIdentifier<U> path, final U data, final boolean createParents) {
-        Preconditions.checkArgument(!path.isWildcarded(), "Cannot put data into wildcarded path %s", path);
-
-        final Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec().toNormalizedNode(path, data);
-        if (createParents) {
-            ensureParentsByMerge(store, normalized.getKey(), path);
-        } else {
-            ensureListParentIfNeeded(store,path,normalized);
-        }
-
-        getDelegate().put(store, normalized.getKey(), normalized.getValue());
-    }
-
-    public final <U extends DataObject> void merge(final LogicalDatastoreType store,
-            final InstanceIdentifier<U> path, final U data,final boolean createParents) {
-        Preconditions.checkArgument(!path.isWildcarded(), "Cannot merge data into wildcarded path %s", path);
-
-        final Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> normalized = getCodec().toNormalizedNode(path, data);
-        if (createParents) {
-            ensureParentsByMerge(store, normalized.getKey(), path);
-        } else {
-            ensureListParentIfNeeded(store,path,normalized);
-        }
-
-        getDelegate().merge(store, normalized.getKey(), normalized.getValue());
-    }
-
-    /**
-     * Ensures list parent if item is list, otherwise noop.
-     *
-     * <p>
-     * One of properties of binding specification is that it is imposible
-     * to represent list as a whole and thus it is impossible to write
-     * empty variation of MapNode without creating parent node, with
-     * empty list.
-     *
-     * <p>
-     * This actually makes writes such as
-     * <pre>
-     * put("Nodes", new NodesBuilder().build());
-     * put("Nodes/Node[key]", new NodeBuilder().setKey("key").build());
-     * </pre>
-     * To result in three DOM operations:
-     * <pre>
-     * put("/nodes",domNodes);
-     * merge("/nodes/node",domNodeList);
-     * put("/nodes/node/node[key]",domNode);
-     * </pre>
-     *
-     * <p>
-     * In order to allow that to be inserted if necessary, if we know
-     * item is list item, we will try to merge empty MapNode or OrderedNodeMap
-     * to ensure list exists.
-     *
-     * @param store Data Store type
-     * @param path Path to data (Binding Aware)
-     * @param normalized Normalized version of data to be written
-     */
-    private void ensureListParentIfNeeded(final LogicalDatastoreType store, final InstanceIdentifier<?> path,
-            final Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> normalized) {
-        if (Identifiable.class.isAssignableFrom(path.getTargetType())) {
-            YangInstanceIdentifier parentMapPath = normalized.getKey().getParent();
-            Preconditions.checkArgument(parentMapPath != null, "Map path %s does not have a parent", path);
-
-            NormalizedNode<?, ?> emptyParent = getCodec().getDefaultNodeFor(parentMapPath);
-            getDelegate().merge(store, parentMapPath, emptyParent);
-        }
-    }
-
-    /**
-     * Deprecated.
-     *
-     * @deprecated Use {@link YangInstanceIdentifier#getParent()} instead.
-     */
-    @Deprecated
-    protected static Optional<YangInstanceIdentifier> getParent(final YangInstanceIdentifier child) {
-        return Optional.fromNullable(child.getParent());
-    }
-
-    /**
-     * Subclasses of this class are required to implement creation of parent
-     * nodes based on behaviour of their underlying transaction.
-     */
-    protected abstract void ensureParentsByMerge(LogicalDatastoreType store,
-            YangInstanceIdentifier key, InstanceIdentifier<?> path);
-
-    protected final void doDelete(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        Preconditions.checkArgument(!path.isWildcarded(), "Cannot delete wildcarded path %s", path);
-
-        final YangInstanceIdentifier normalized = getCodec().toYangInstanceIdentifierBlocking(path);
-        getDelegate().delete(store, normalized);
-    }
-
-    protected final FluentFuture<? extends CommitInfo> doCommit() {
-        return getDelegate().commit();
-    }
-
-    protected final boolean doCancel() {
-        return getDelegate().cancel();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingBrokerWiring.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingBrokerWiring.java
deleted file mode 100644 (file)
index 782a018..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2018 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import javassist.ClassPool;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceWithInterestListeners;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterFactory;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListenerRegistry;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.api.ClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.util.JavassistUtils;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-
-/**
- * Provides the implementations of the APIs.
- *
- * <p>Intended to be usable in a standalone environment (non-OSGi/Karaf). Also
- * internally used by the Blueprint XML to expose the same as OSGi services.
- * This class does not require (depend on) the Guice dependency injection
- * framework, but can we used with it.
- *
- * @author Michael Vorburger.ch, partially based on refactored code originally by Thomas Pantelis
- */
-public class BindingBrokerWiring implements AutoCloseable {
-
-    private static final JavassistUtils JAVASSIST = JavassistUtils.forClassPool(ClassPool.getDefault());
-
-    private final BindingToNormalizedNodeCodec bindingToNormalizedNodeCodec;
-    private final ListenerRegistration<SchemaContextListener> mappingCodecListenerReg;
-    private final RpcProviderRegistry rpcProviderRegistry;
-    private final MountPointService mountPointService;
-    private final NotificationService notificationService;
-    private final NotificationPublishService notificationPublishService;
-    private final HeliumNotificationProviderServiceWithInterestListeners notificationAndProviderService;
-    private final AdapterFactory adapterFactory;
-    private final DataBroker dataBroker;
-    private final DataBroker pingPongDataBroker;
-
-    public BindingBrokerWiring(ClassLoadingStrategy classLoadingStrategy, DOMSchemaService schemaService,
-            DOMRpcService domRpcService, DOMRpcProviderService domRpcProviderService,
-            DOMMountPointService domMountPointService, DOMNotificationService domNotificationService,
-            DOMNotificationPublishService domNotificationPublishService,
-            DOMNotificationSubscriptionListenerRegistry domNotificationListenerRegistry, DOMDataBroker domDataBroker,
-            DOMDataBroker domPingPongDataBroker) {
-        // Runtime binding/normalized mapping service
-        BindingNormalizedNodeCodecRegistry codecRegistry
-            = new BindingNormalizedNodeCodecRegistry(StreamWriterGenerator.create(JAVASSIST));
-        bindingToNormalizedNodeCodec = new BindingToNormalizedNodeCodec(classLoadingStrategy, codecRegistry, true);
-
-        // Register the BindingToNormalizedNodeCodec with the SchemaService as a SchemaContextListener
-        mappingCodecListenerReg = schemaService.registerSchemaContextListener(bindingToNormalizedNodeCodec);
-
-        // Binding RPC Registry Service
-        BindingDOMRpcServiceAdapter bindingDOMRpcServiceAdapter
-            = new BindingDOMRpcServiceAdapter(domRpcService, bindingToNormalizedNodeCodec);
-        BindingDOMRpcProviderServiceAdapter bindingDOMRpcProviderServiceAdapter
-            = new BindingDOMRpcProviderServiceAdapter(domRpcProviderService, bindingToNormalizedNodeCodec);
-        rpcProviderRegistry
-            = new HeliumRpcProviderRegistry(bindingDOMRpcServiceAdapter, bindingDOMRpcProviderServiceAdapter);
-
-        // Binding MountPoint Service
-        mountPointService = new BindingDOMMountPointServiceAdapter(domMountPointService, bindingToNormalizedNodeCodec);
-
-        // Binding Notification Service
-        BindingDOMNotificationServiceAdapter notificationServiceImpl = new BindingDOMNotificationServiceAdapter(
-                bindingToNormalizedNodeCodec.getCodecRegistry(), domNotificationService);
-        notificationService = notificationServiceImpl;
-        BindingDOMNotificationPublishServiceAdapter notificationPublishServiceImpl =
-                new BindingDOMNotificationPublishServiceAdapter(
-                        bindingToNormalizedNodeCodec, domNotificationPublishService);
-        notificationPublishService = notificationPublishServiceImpl;
-        notificationAndProviderService = new HeliumNotificationProviderServiceWithInterestListeners(
-                notificationPublishServiceImpl, notificationServiceImpl, domNotificationListenerRegistry);
-
-        adapterFactory = new BindingToDOMAdapterFactory(bindingToNormalizedNodeCodec);
-
-        // Binding DataBroker
-        dataBroker = adapterFactory.createDataBroker(domDataBroker);
-
-        // Binding PingPong DataBroker
-        pingPongDataBroker = adapterFactory.createDataBroker(domPingPongDataBroker);
-    }
-
-    @Override
-    public void close() throws Exception {
-        mappingCodecListenerReg.close();
-    }
-
-    public BindingToNormalizedNodeCodec getBindingToNormalizedNodeCodec() {
-        return bindingToNormalizedNodeCodec;
-    }
-
-    public AdapterFactory getAdapterFactory() {
-        return adapterFactory;
-    }
-
-    public RpcProviderRegistry getRpcProviderRegistry() {
-        return rpcProviderRegistry;
-    }
-
-    public MountPointService getMountPointService() {
-        return mountPointService;
-    }
-
-    public NotificationService getNotificationService() {
-        return notificationService;
-    }
-
-    public NotificationPublishService getNotificationPublishService() {
-        return notificationPublishService;
-    }
-
-    @Deprecated
-    public NotificationProviderService getNotificationProviderService() {
-        return notificationAndProviderService;
-    }
-
-    @Deprecated
-    public org.opendaylight.controller.sal.binding.api.NotificationService getDeprecatedNotificationService() {
-        return notificationAndProviderService;
-    }
-
-    public DataBroker getDataBroker() {
-        return dataBroker;
-    }
-
-    public DataBroker getPingPongDataBroker() {
-        return pingPongDataBroker;
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingClusteredDOMDataTreeChangeListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingClusteredDOMDataTreeChangeListenerAdapter.java
deleted file mode 100644 (file)
index 406af3a..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * Adapter wrapping Binding {@link ClusteredDataTreeChangeListener} and exposing
- * it as {@link ClusteredDOMDataTreeChangeListener} and translated DOM events
- * to their Binding equivalent.
- *
- * @author Thomas Pantelis
- */
-final class BindingClusteredDOMDataTreeChangeListenerAdapter<T extends DataObject>
-        extends BindingDOMDataTreeChangeListenerAdapter<T> implements ClusteredDOMDataTreeChangeListener {
-    BindingClusteredDOMDataTreeChangeListenerAdapter(BindingToNormalizedNodeCodec codec,
-            ClusteredDataTreeChangeListener<T> listener, LogicalDatastoreType store) {
-        super(codec, listener, store);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterBuilder.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterBuilder.java
deleted file mode 100644 (file)
index d460c48..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ClassToInstanceMap;
-import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterBuilder;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-
-abstract class BindingDOMAdapterBuilder<T extends BindingService> extends AdapterBuilder<T, DOMService> {
-
-    interface Factory<T extends BindingService> {
-
-        BindingDOMAdapterBuilder<T> newBuilder();
-
-    }
-
-    private BindingToNormalizedNodeCodec codec;
-
-    public void setCodec(final BindingToNormalizedNodeCodec codec) {
-        this.codec = codec;
-    }
-
-    @Override
-    protected final T createInstance(final ClassToInstanceMap<DOMService> delegates) {
-        Preconditions.checkState(codec != null);
-        return createInstance(codec,delegates);
-    }
-
-    protected abstract T createInstance(BindingToNormalizedNodeCodec codec2, ClassToInstanceMap<DOMService> delegates);
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterLoader.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMAdapterLoader.java
deleted file mode 100644 (file)
index 5f2b6fd..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.binding.api.ActionProviderService;
-import org.opendaylight.controller.md.sal.binding.api.ActionService;
-import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterBuilder;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterLoader;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-
-public abstract class BindingDOMAdapterLoader extends AdapterLoader<BindingService, DOMService> {
-    private static final Map<Class<?>, BindingDOMAdapterBuilder.Factory<?>> FACTORIES =
-        ImmutableMap.<Class<?>, BindingDOMAdapterBuilder.Factory<?>>builder()
-            .put(NotificationService.class, BindingDOMNotificationServiceAdapter.BUILDER_FACTORY)
-            .put(NotificationPublishService.class, BindingDOMNotificationPublishServiceAdapter.BUILDER_FACTORY)
-            .put(DataBroker.class, BindingDOMDataBrokerAdapter.BUILDER_FACTORY)
-            .put(RpcConsumerRegistry.class, BindingDOMRpcServiceAdapter.BUILDER_FACTORY)
-            .put(ActionProviderService.class, ControllerActionProviderServiceAdapter.BUILDER_FACTORY)
-            .put(ActionService.class,  ControllerActionServiceAdapter.BUILDER_FACTORY)
-            .build();
-
-    private final BindingToNormalizedNodeCodec codec;
-
-    public BindingDOMAdapterLoader(final BindingToNormalizedNodeCodec codec) {
-        this.codec = codec;
-    }
-
-    @Override
-    protected final AdapterBuilder<? extends BindingService, DOMService> createBuilder(
-            final Class<? extends BindingService> key) {
-        final Factory<?> factory = FACTORIES.get(key);
-        Preconditions.checkArgument(factory != null, "Unsupported service type %s", key);
-        final BindingDOMAdapterBuilder<?> builder = factory.newBuilder();
-        builder.setCodec(codec);
-        return builder;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataBrokerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataBrokerAdapter.java
deleted file mode 100644 (file)
index 7b4c549..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-/**
- * The DataBrokerImpl simply defers to the DOMDataBroker for all its operations.
- * All transactions and listener registrations are wrapped by the DataBrokerImpl
- * to allow binding aware components to use the DataBroker transparently.
- *
- * <p>
- * Besides this the DataBrokerImpl and it's collaborators also cache data that
- * is already transformed from the binding independent to binding aware format
- */
-public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker
-        implements DataBroker, DataTreeChangeService {
-    static final Factory<DataBroker> BUILDER_FACTORY = Builder::new;
-    private final DataTreeChangeService treeChangeService;
-
-    public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec) {
-        super(domDataBroker, codec);
-        final DOMDataTreeChangeService domTreeChange = (DOMDataTreeChangeService) domDataBroker
-                .getSupportedExtensions().get(DOMDataTreeChangeService.class);
-        if (domTreeChange != null) {
-            treeChangeService = BindingDOMDataTreeChangeServiceAdapter.create(codec, domTreeChange);
-        } else {
-            treeChangeService = null;
-        }
-    }
-
-    @Override
-
-    public ReadOnlyTransaction newReadOnlyTransaction() {
-        return new BindingDOMReadTransactionAdapter(getDelegate().newReadOnlyTransaction(),getCodec());
-    }
-
-    @Override
-    public ReadWriteTransaction newReadWriteTransaction() {
-        return new BindingDOMReadWriteTransactionAdapter(getDelegate().newReadWriteTransaction(),getCodec());
-    }
-
-    @Override
-    public WriteTransaction newWriteOnlyTransaction() {
-        return new BindingDOMWriteTransactionAdapter<>(getDelegate().newWriteOnlyTransaction(),getCodec());
-    }
-
-    @Override
-    public BindingTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        return new BindingDOMTransactionChainAdapter(getDelegate(), getCodec(), listener);
-    }
-
-    @Override
-    public String toString() {
-        return "BindingDOMDataBrokerAdapter for " + getDelegate();
-    }
-
-    private static class Builder extends BindingDOMAdapterBuilder<DataBroker> {
-
-        @Override
-        public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.of(DOMDataBroker.class);
-        }
-
-        @Override
-        protected DataBroker createInstance(final BindingToNormalizedNodeCodec codec,
-                final ClassToInstanceMap<DOMService> delegates) {
-            final DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
-            return new BindingDOMDataBrokerAdapter(domDataBroker, codec);
-        }
-
-    }
-
-    @Override
-    public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L>
-            registerDataTreeChangeListener(final DataTreeIdentifier<T> treeId, final L listener) {
-        if (treeChangeService == null) {
-            throw new UnsupportedOperationException("Underlying data broker does not expose DOMDataTreeChangeService.");
-        }
-        return treeChangeService.registerDataTreeChangeListener(treeId, listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java
deleted file mode 100644 (file)
index fa748f9..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-
-/**
- * Adapter wrapping Binding {@link DataTreeChangeListener} and exposing
- * it as {@link DOMDataTreeChangeListener} and translated DOM events
- * to their Binding equivalent.
- *
- */
-class BindingDOMDataTreeChangeListenerAdapter<T extends DataObject> implements DOMDataTreeChangeListener {
-
-    private final BindingToNormalizedNodeCodec codec;
-    private final DataTreeChangeListener<T> listener;
-    private final LogicalDatastoreType store;
-
-    BindingDOMDataTreeChangeListenerAdapter(final BindingToNormalizedNodeCodec codec,
-            final DataTreeChangeListener<T> listener, final LogicalDatastoreType store) {
-        this.codec = Preconditions.checkNotNull(codec);
-        this.listener = Preconditions.checkNotNull(listener);
-        this.store = Preconditions.checkNotNull(store);
-    }
-
-    @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> domChanges) {
-        final Collection<DataTreeModification<T>> bindingChanges =
-                LazyDataTreeModification.from(codec, domChanges, store);
-        listener.onDataTreeChanged(bindingChanges);
-    }
-
-    @Override
-    public String toString() {
-        return listener.toString();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java
deleted file mode 100644 (file)
index 13b09d1..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-
-/**
- * Adapter exposing Binding {@link DataTreeChangeService} and wrapping
- * {@link DOMDataTreeChangeService} and is responsible for translation
- * and instantiation of {@link BindingDOMDataTreeChangeListenerAdapter}
- * adapters.
- *
- * <p>
- * Each registered {@link DataTreeChangeListener} is wrapped using
- * adapter and registered directly to DOM service.
- */
-final class BindingDOMDataTreeChangeServiceAdapter implements DataTreeChangeService {
-
-    private final BindingToNormalizedNodeCodec codec;
-    private final DOMDataTreeChangeService dataTreeChangeService;
-
-    private BindingDOMDataTreeChangeServiceAdapter(final BindingToNormalizedNodeCodec codec,
-            final DOMDataTreeChangeService dataTreeChangeService) {
-        this.codec = Preconditions.checkNotNull(codec);
-        this.dataTreeChangeService = Preconditions.checkNotNull(dataTreeChangeService);
-    }
-
-    static DataTreeChangeService create(final BindingToNormalizedNodeCodec codec,
-            final DOMDataTreeChangeService dataTreeChangeService) {
-        return new BindingDOMDataTreeChangeServiceAdapter(codec, dataTreeChangeService);
-    }
-
-    @Override
-    public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L>
-            registerDataTreeChangeListener(final DataTreeIdentifier<T> treeId, final L listener) {
-        final DOMDataTreeIdentifier domIdentifier = toDomTreeIdentifier(treeId);
-
-        @SuppressWarnings({ "rawtypes", "unchecked" })
-        final BindingDOMDataTreeChangeListenerAdapter<T> domListener =
-                listener instanceof ClusteredDataTreeChangeListener
-                        ? new BindingClusteredDOMDataTreeChangeListenerAdapter<>(
-                                codec, (ClusteredDataTreeChangeListener) listener, treeId.getDatastoreType())
-                        : new BindingDOMDataTreeChangeListenerAdapter<>(codec, listener, treeId.getDatastoreType());
-
-        final ListenerRegistration<BindingDOMDataTreeChangeListenerAdapter<T>> domReg =
-                dataTreeChangeService.registerDataTreeChangeListener(domIdentifier, domListener);
-        return new BindingDataTreeChangeListenerRegistration<>(listener,domReg);
-    }
-
-    private DOMDataTreeIdentifier toDomTreeIdentifier(final DataTreeIdentifier<?> treeId) {
-        final YangInstanceIdentifier domPath = codec.toYangInstanceIdentifierBlocking(treeId.getRootIdentifier());
-        return new DOMDataTreeIdentifier(treeId.getDatastoreType(), domPath);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointListenerAdapter.java
deleted file mode 100644 (file)
index ef80c78..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import java.util.Optional;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService.MountPointListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.mdsal.dom.api.DOMMountPointListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class BindingDOMMountPointListenerAdapter<T extends MountPointListener>
-        implements ListenerRegistration<T>, DOMMountPointListener {
-    private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointListenerAdapter.class);
-
-    private final T listener;
-    private final ListenerRegistration<DOMMountPointListener> registration;
-    private final BindingToNormalizedNodeCodec codec;
-
-    BindingDOMMountPointListenerAdapter(final T listener, final BindingToNormalizedNodeCodec codec,
-            final DOMMountPointService mountService) {
-        this.listener = listener;
-        this.codec = codec;
-        this.registration = mountService.registerProvisionListener(this);
-    }
-
-    @Override
-    public T getInstance() {
-        return listener;
-    }
-
-    @Override
-    public void close() {
-        registration.close();
-    }
-
-    @Override
-    public void onMountPointCreated(final YangInstanceIdentifier path) {
-        try {
-            final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
-            listener.onMountPointCreated(bindingPath);
-        } catch (final DeserializationException e) {
-            LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
-        }
-
-    }
-
-    private InstanceIdentifier<? extends DataObject> toBinding(final YangInstanceIdentifier path)
-            throws DeserializationException {
-        final Optional<InstanceIdentifier<? extends DataObject>> instanceIdentifierOptional = codec.toBinding(path);
-        if (instanceIdentifierOptional.isPresent()) {
-            return instanceIdentifierOptional.get();
-        } else {
-            throw new DeserializationException("Deserialization unsuccessful, " + instanceIdentifierOptional);
-        }
-    }
-
-    @Override
-    public void onMountPointRemoved(final YangInstanceIdentifier path) {
-        try {
-            final InstanceIdentifier<? extends DataObject> bindingPath = toBinding(path);
-            listener.onMountPointRemoved(bindingPath);
-        } catch (final DeserializationException e) {
-            LOG.error("Unable to translate mountPoint path {}. Omitting event.", path, e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMMountPointServiceAdapter.java
deleted file mode 100644 (file)
index 244ab51..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BindingDOMMountPointServiceAdapter implements MountPointService {
-    private static final Logger LOG = LoggerFactory.getLogger(BindingDOMMountPointServiceAdapter.class);
-
-    private final BindingToNormalizedNodeCodec codec;
-    private final DOMMountPointService mountService;
-    private final LoadingCache<DOMMountPoint, BindingMountPointAdapter> bindingMountpoints = CacheBuilder.newBuilder()
-            .weakKeys().build(new CacheLoader<DOMMountPoint, BindingMountPointAdapter>() {
-                @Override
-                public BindingMountPointAdapter load(final DOMMountPoint key) {
-                    return new BindingMountPointAdapter(codec, key);
-                }
-            });
-
-    public BindingDOMMountPointServiceAdapter(final DOMMountPointService mountService,
-            final BindingToNormalizedNodeCodec codec) {
-        this.codec = codec;
-        this.mountService = mountService;
-    }
-
-    @Override
-    public Optional<MountPoint> getMountPoint(final InstanceIdentifier<?> mountPoint) {
-
-        YangInstanceIdentifier domPath = codec.toYangInstanceIdentifierBlocking(mountPoint);
-        Optional<DOMMountPoint> domMount = mountService.getMountPoint(domPath);
-        if (domMount.isPresent()) {
-            return Optional.<MountPoint>fromNullable(bindingMountpoints.getUnchecked(domMount.get()));
-        }
-        return Optional.absent();
-    }
-
-    @Override
-    public <T extends MountPointListener> ListenerRegistration<T> registerListener(final InstanceIdentifier<?> path,
-            final T listener) {
-        return new BindingDOMMountPointListenerAdapter<>(listener, codec, mountService);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationListenerAdapter.java
deleted file mode 100644 (file)
index f749cb2..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.reflect.TypeToken;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.mdsal.binding.dom.adapter.invoke.NotificationListenerInvoker;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-class BindingDOMNotificationListenerAdapter implements DOMNotificationListener {
-
-    private final BindingNormalizedNodeSerializer codec;
-    private final NotificationListener delegate;
-    private final Map<SchemaPath,NotificationListenerInvoker> invokers;
-
-    BindingDOMNotificationListenerAdapter(final BindingNormalizedNodeSerializer codec,
-            final NotificationListener delegate) {
-        this.codec = codec;
-        this.delegate = delegate;
-        this.invokers = createInvokerMapFor(delegate.getClass());
-    }
-
-    @Override
-    public void onNotification(final DOMNotification notification) {
-        final Notification baNotification = deserialize(notification);
-        final QName notificationQName = notification.getType().getLastComponent();
-        getInvoker(notification.getType()).invokeNotification(delegate, notificationQName, baNotification);
-    }
-
-    private Notification deserialize(final DOMNotification notification) {
-        if (notification instanceof LazySerializedDOMNotification) {
-            return ((LazySerializedDOMNotification) notification).getBindingData();
-        }
-        return codec.fromNormalizedNodeNotification(notification.getType(), notification.getBody());
-    }
-
-    private NotificationListenerInvoker getInvoker(final SchemaPath type) {
-        return invokers.get(type);
-    }
-
-    protected Set<SchemaPath> getSupportedNotifications() {
-        return invokers.keySet();
-    }
-
-    public static Map<SchemaPath, NotificationListenerInvoker> createInvokerMapFor(
-            final Class<? extends NotificationListener> implClz) {
-        final Map<SchemaPath, NotificationListenerInvoker> builder = new HashMap<>();
-        for (final TypeToken<?> ifaceToken : TypeToken.of(implClz).getTypes().interfaces()) {
-            Class<?> iface = ifaceToken.getRawType();
-            if (NotificationListener.class.isAssignableFrom(iface) && BindingReflections.isBindingClass(iface)) {
-                @SuppressWarnings("unchecked")
-                final Class<? extends NotificationListener> listenerType =
-                        (Class<? extends NotificationListener>) iface;
-                final NotificationListenerInvoker invoker = NotificationListenerInvoker.from(listenerType);
-                for (final SchemaPath path : getNotificationTypes(listenerType)) {
-                    builder.put(path, invoker);
-                }
-            }
-        }
-        return ImmutableMap.copyOf(builder);
-    }
-
-    private static Set<SchemaPath> getNotificationTypes(final Class<? extends NotificationListener> type) {
-        // TODO: Investigate possibility and performance impact if we cache this or expose
-        // it from NotificationListenerInvoker
-        final Set<SchemaPath> ret = new HashSet<>();
-        for (final Method method : type.getMethods()) {
-            if (BindingReflections.isNotificationCallback(method)) {
-                final Class<?> notification = method.getParameterTypes()[0];
-                final QName name = BindingReflections.findQName(notification);
-                ret.add(SchemaPath.create(true, name));
-            }
-        }
-        return ret;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationPublishServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationPublishServiceAdapter.java
deleted file mode 100644 (file)
index c2558c3..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public class BindingDOMNotificationPublishServiceAdapter implements NotificationPublishService, AutoCloseable {
-
-    static final Factory<NotificationPublishService> BUILDER_FACTORY = Builder::new;
-
-    private final BindingToNormalizedNodeCodec codecRegistry;
-    private final DOMNotificationPublishService domPublishService;
-
-    public BindingDOMNotificationPublishServiceAdapter(final BindingToNormalizedNodeCodec codec,
-            final DOMNotificationPublishService domPublishService) {
-        this.codecRegistry = codec;
-        this.domPublishService = domPublishService;
-    }
-
-    public BindingToNormalizedNodeCodec getCodecRegistry() {
-        return codecRegistry;
-    }
-
-    public DOMNotificationPublishService getDomPublishService() {
-        return domPublishService;
-    }
-
-    @Override
-    public void putNotification(final Notification notification) throws InterruptedException {
-        domPublishService.putNotification(toDomNotification(notification));
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final Notification notification) {
-        ListenableFuture<?> offerResult = domPublishService.offerNotification(toDomNotification(notification));
-        return DOMNotificationPublishService.REJECTED.equals(offerResult)
-                ? NotificationPublishService.REJECTED
-                : offerResult;
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final Notification notification, final int timeout,
-            final TimeUnit unit) throws InterruptedException {
-        ListenableFuture<?> offerResult = domPublishService.offerNotification(toDomNotification(notification),
-                timeout, unit);
-        return DOMNotificationPublishService.REJECTED.equals(offerResult)
-                ? NotificationPublishService.REJECTED
-                : offerResult;
-    }
-
-    private DOMNotification toDomNotification(final Notification notification) {
-        return LazySerializedDOMNotification.create(codecRegistry, notification);
-    }
-
-    @Override
-    public void close() {
-
-    }
-
-    protected static class Builder extends BindingDOMAdapterBuilder<NotificationPublishService> {
-
-        @Override
-        public Set<Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.<Class<? extends DOMService>>of(DOMNotificationPublishService.class);
-        }
-
-        @Override
-        protected NotificationPublishService createInstance(final BindingToNormalizedNodeCodec codec,
-                final ClassToInstanceMap<DOMService> delegates) {
-            final DOMNotificationPublishService domPublish = delegates.getInstance(DOMNotificationPublishService.class);
-            return new BindingDOMNotificationPublishServiceAdapter(codec, domPublish);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMNotificationServiceAdapter.java
deleted file mode 100644 (file)
index 684122c..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-public class BindingDOMNotificationServiceAdapter implements NotificationService, AutoCloseable {
-
-    public static final Factory<NotificationService> BUILDER_FACTORY = Builder::new;
-    private final BindingNormalizedNodeSerializer codec;
-    private final DOMNotificationService domNotifService;
-
-    public BindingDOMNotificationServiceAdapter(final BindingNormalizedNodeSerializer codec,
-            final DOMNotificationService domNotifService) {
-        this.codec = codec;
-        this.domNotifService = domNotifService;
-    }
-
-    @Override
-    public <T extends NotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener) {
-        final BindingDOMNotificationListenerAdapter domListener =
-                new BindingDOMNotificationListenerAdapter(codec, listener);
-        final ListenerRegistration<BindingDOMNotificationListenerAdapter> domRegistration =
-                domNotifService.registerNotificationListener(domListener, domListener.getSupportedNotifications());
-        return new ListenerRegistrationImpl<>(listener, domRegistration);
-    }
-
-    @Override
-    public void close() {
-
-    }
-
-    private static class ListenerRegistrationImpl<T extends NotificationListener>
-            extends AbstractListenerRegistration<T> {
-        private final ListenerRegistration<?> listenerRegistration;
-
-        ListenerRegistrationImpl(final T listener, final ListenerRegistration<?> listenerRegistration) {
-            super(listener);
-            this.listenerRegistration = listenerRegistration;
-        }
-
-        @Override
-        protected void removeRegistration() {
-            listenerRegistration.close();
-        }
-    }
-
-    private static class Builder extends BindingDOMAdapterBuilder<NotificationService> {
-
-        @Override
-        protected NotificationService createInstance(final BindingToNormalizedNodeCodec codec,
-                final ClassToInstanceMap<DOMService> delegates) {
-            final DOMNotificationService domNotification = delegates.getInstance(DOMNotificationService.class);
-            return new BindingDOMNotificationServiceAdapter(codec.getCodecRegistry(), domNotification);
-        }
-
-        @Override
-        public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.of(DOMNotificationService.class);
-        }
-    }
-
-    public DOMNotificationService getDomService() {
-        return domNotifService;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadTransactionAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadTransactionAdapter.java
deleted file mode 100644 (file)
index 7a8414c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-class BindingDOMReadTransactionAdapter extends AbstractForwardedTransaction<DOMDataReadOnlyTransaction> implements
-        ReadOnlyTransaction {
-
-    protected BindingDOMReadTransactionAdapter(final DOMDataReadOnlyTransaction delegate,
-            final BindingToNormalizedNodeCodec codec) {
-        super(delegate, codec);
-    }
-
-    @Override
-    public <T extends DataObject> CheckedFuture<Optional<T>, ReadFailedException> read(
-            final LogicalDatastoreType store, final InstanceIdentifier<T> path) {
-        return doRead(getDelegate(),store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        return doExists(getDelegate(), store, path);
-    }
-
-    @Override
-    public void close() {
-        getDelegate().close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadWriteTransactionAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMReadWriteTransactionAdapter.java
deleted file mode 100644 (file)
index d3764fb..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-class BindingDOMReadWriteTransactionAdapter extends
-        BindingDOMWriteTransactionAdapter<DOMDataReadWriteTransaction> implements ReadWriteTransaction {
-
-    protected BindingDOMReadWriteTransactionAdapter(final DOMDataReadWriteTransaction delegate,
-            final BindingToNormalizedNodeCodec codec) {
-        super(delegate, codec);
-    }
-
-    @Override
-    public <T extends DataObject> CheckedFuture<Optional<T>,ReadFailedException> read(
-            final LogicalDatastoreType store, final InstanceIdentifier<T> path) {
-        return doRead(getDelegate(), store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final InstanceIdentifier<?> path) {
-        return doExists(getDelegate(), store, path);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcAdapterRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcAdapterRegistration.java
deleted file mode 100644 (file)
index fa45b8e..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-class BindingDOMRpcAdapterRegistration<T extends RpcService> extends AbstractObjectRegistration<T> {
-
-    private final DOMRpcImplementationRegistration<?> reg;
-
-    BindingDOMRpcAdapterRegistration(T instance, DOMRpcImplementationRegistration<?> reg) {
-        super(instance);
-        this.reg = reg;
-    }
-
-    @Override
-    protected void removeRegistration() {
-        reg.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcImplementationAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcImplementationAdapter.java
deleted file mode 100644 (file)
index 72ee08f..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingDataAware;
-import org.opendaylight.mdsal.binding.dom.adapter.invoke.RpcServiceInvoker;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class BindingDOMRpcImplementationAdapter implements DOMRpcImplementation {
-
-    private static final Cache<Class<?>, RpcServiceInvoker> SERVICE_INVOKERS =
-            CacheBuilder.newBuilder().weakKeys().build();
-
-    private final BindingNormalizedNodeSerializer codec;
-    private final RpcServiceInvoker invoker;
-    private final RpcService delegate;
-    private final QName inputQname;
-
-    <T extends RpcService> BindingDOMRpcImplementationAdapter(final BindingNormalizedNodeSerializer codec,
-            final Class<T> type, final Map<SchemaPath, Method> localNameToMethod, final T delegate) {
-        try {
-            this.invoker = SERVICE_INVOKERS.get(type, () -> {
-                final Map<QName, Method> map = new HashMap<>();
-                for (Entry<SchemaPath, Method> e : localNameToMethod.entrySet()) {
-                    map.put(e.getKey().getLastComponent(), e.getValue());
-                }
-
-                return RpcServiceInvoker.from(map);
-            });
-        } catch (ExecutionException e) {
-            throw new IllegalArgumentException("Failed to create invokers for type " + type, e);
-        }
-
-        this.codec = requireNonNull(codec);
-        this.delegate = requireNonNull(delegate);
-        inputQname = QName.create(BindingReflections.getQNameModule(type), "input").intern();
-    }
-
-    @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final DOMRpcIdentifier rpc,
-            final NormalizedNode<?, ?> input) {
-        final SchemaPath schemaPath = rpc.getType();
-        final DataObject bindingInput = input != null ? deserialize(rpc.getType(), input) : null;
-        final ListenableFuture<RpcResult<?>> bindingResult = invoke(schemaPath, bindingInput);
-        return transformResult(bindingResult);
-    }
-
-    private DataObject deserialize(final SchemaPath rpcPath, final NormalizedNode<?, ?> input) {
-        if (input instanceof BindingDataAware) {
-            return ((BindingDataAware) input).bindingData();
-        }
-        final SchemaPath inputSchemaPath = rpcPath.createChild(inputQname);
-        return codec.fromNormalizedNodeRpcData(inputSchemaPath, (ContainerNode) input);
-    }
-
-    private ListenableFuture<RpcResult<?>> invoke(final SchemaPath schemaPath, final DataObject input) {
-        return invoker.invokeRpc(delegate, schemaPath.getLastComponent(), input);
-    }
-
-    private CheckedFuture<DOMRpcResult, DOMRpcException> transformResult(
-            final ListenableFuture<RpcResult<?>> bindingResult) {
-        return LazyDOMRpcResultFuture.create(codec, bindingResult);
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcProviderServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcProviderServiceAdapter.java
deleted file mode 100644 (file)
index 252a17f..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ImmutableSet;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class BindingDOMRpcProviderServiceAdapter {
-
-    private static final Set<YangInstanceIdentifier> GLOBAL = ImmutableSet.of(YangInstanceIdentifier.EMPTY);
-    private final BindingToNormalizedNodeCodec codec;
-    private final DOMRpcProviderService domRpcRegistry;
-
-    public BindingDOMRpcProviderServiceAdapter(final DOMRpcProviderService domRpcRegistry,
-            final BindingToNormalizedNodeCodec codec) {
-        this.codec = codec;
-        this.domRpcRegistry = domRpcRegistry;
-    }
-
-    public <S extends RpcService, T extends S> ObjectRegistration<T> registerRpcImplementation(final Class<S> type,
-            final T implementation) {
-        return register(type, implementation, GLOBAL);
-    }
-
-    public <S extends RpcService, T extends S> ObjectRegistration<T> registerRpcImplementation(final Class<S> type,
-            final T implementation, final Set<InstanceIdentifier<?>> paths) {
-        return register(type, implementation, toYangInstanceIdentifiers(paths));
-    }
-
-    private <S extends RpcService, T extends S> ObjectRegistration<T> register(final Class<S> type,
-            final T implementation, final Collection<YangInstanceIdentifier> rpcContextPaths) {
-        final Map<SchemaPath, Method> rpcs = codec.getRpcMethodToSchemaPath(type).inverse();
-
-        final BindingDOMRpcImplementationAdapter adapter = new BindingDOMRpcImplementationAdapter(
-                codec.getCodecRegistry(), type, rpcs, implementation);
-        final Set<DOMRpcIdentifier> domRpcs = createDomRpcIdentifiers(rpcs.keySet(), rpcContextPaths);
-        final DOMRpcImplementationRegistration<?> domReg = domRpcRegistry.registerRpcImplementation(adapter, domRpcs);
-        return new BindingRpcAdapterRegistration<>(implementation, domReg);
-    }
-
-    private static Set<DOMRpcIdentifier> createDomRpcIdentifiers(final Set<SchemaPath> rpcs,
-            final Collection<YangInstanceIdentifier> paths) {
-        final Set<DOMRpcIdentifier> ret = new HashSet<>();
-        for (final YangInstanceIdentifier path : paths) {
-            for (final SchemaPath rpc : rpcs) {
-                ret.add(DOMRpcIdentifier.create(rpc, path));
-            }
-        }
-        return ret;
-    }
-
-    private Collection<YangInstanceIdentifier> toYangInstanceIdentifiers(final Set<InstanceIdentifier<?>> identifiers) {
-        final Collection<YangInstanceIdentifier> ret = new ArrayList<>(identifiers.size());
-        for (final InstanceIdentifier<?> binding : identifiers) {
-            ret.add(codec.toYangInstanceIdentifierCached(binding));
-        }
-        return ret;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMRpcServiceAdapter.java
deleted file mode 100644 (file)
index b58c56c..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public class BindingDOMRpcServiceAdapter implements RpcConsumerRegistry {
-
-    protected static final Factory<RpcConsumerRegistry> BUILDER_FACTORY = Builder::new;
-
-    private final LoadingCache<Class<? extends RpcService>, RpcServiceAdapter> proxies = CacheBuilder.newBuilder()
-            .weakKeys()
-            .build(new CacheLoader<Class<? extends RpcService>, RpcServiceAdapter>() {
-
-                @Override
-                public RpcServiceAdapter load(final Class<? extends RpcService> key) {
-                    return createProxy(key);
-                }
-
-            });
-
-    private final DOMRpcService domService;
-    private final BindingToNormalizedNodeCodec codec;
-
-    public BindingDOMRpcServiceAdapter(final DOMRpcService domService, final BindingToNormalizedNodeCodec codec) {
-        this.domService = domService;
-        this.codec = codec;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public <T extends RpcService> T getRpcService(final Class<T> rpcService) {
-        Preconditions.checkArgument(rpcService != null, "Rpc Service needs to be specied.");
-        return (T) proxies.getUnchecked(rpcService).getProxy();
-    }
-
-    private RpcServiceAdapter createProxy(final Class<? extends RpcService> key) {
-        Preconditions.checkArgument(BindingReflections.isBindingClass(key));
-        Preconditions.checkArgument(key.isInterface(), "Supplied RPC service type must be interface.");
-        return new RpcServiceAdapter(key, codec, domService);
-    }
-
-    private static final class Builder extends BindingDOMAdapterBuilder<RpcConsumerRegistry> {
-
-        @Override
-        protected RpcConsumerRegistry createInstance(final BindingToNormalizedNodeCodec codec,
-                final ClassToInstanceMap<DOMService> delegates) {
-            final DOMRpcService domRpc  = delegates.getInstance(DOMRpcService.class);
-            return new BindingDOMRpcServiceAdapter(domRpc  , codec);
-        }
-
-        @Override
-        public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.of(DOMRpcService.class);
-        }
-
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMTransactionChainAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMTransactionChainAdapter.java
deleted file mode 100644 (file)
index 3358c9e..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.concepts.Delegator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class BindingDOMTransactionChainAdapter implements BindingTransactionChain, Delegator<DOMTransactionChain> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(BindingDOMTransactionChainAdapter.class);
-
-    private final DOMTransactionChain delegate;
-    private final BindingToNormalizedNodeCodec codec;
-    private final DelegateChainListener domListener;
-    private final TransactionChainListener bindingListener;
-
-    BindingDOMTransactionChainAdapter(final DOMDataBroker chainFactory,
-            final BindingToNormalizedNodeCodec codec, final TransactionChainListener listener) {
-        Preconditions.checkNotNull(chainFactory, "DOM Transaction chain factory must not be null");
-        this.domListener = new DelegateChainListener();
-        this.bindingListener = listener;
-        this.delegate = chainFactory.createTransactionChain(domListener);
-        this.codec = codec;
-    }
-
-    @Override
-    public DOMTransactionChain getDelegate() {
-        return delegate;
-    }
-
-    @Override
-    public ReadOnlyTransaction newReadOnlyTransaction() {
-        final DOMDataReadOnlyTransaction delegateTx = delegate.newReadOnlyTransaction();
-        return new BindingDOMReadTransactionAdapter(delegateTx, codec);
-    }
-
-    @Override
-    public ReadWriteTransaction newReadWriteTransaction() {
-        final DOMDataReadWriteTransaction delegateTx = delegate.newReadWriteTransaction();
-        return new BindingDOMReadWriteTransactionAdapter(delegateTx, codec) {
-
-            @Override
-            public FluentFuture<? extends CommitInfo> commit() {
-                return listenForFailure(this, super.commit());
-            }
-
-        };
-    }
-
-    @Override
-    public WriteTransaction newWriteOnlyTransaction() {
-        final DOMDataWriteTransaction delegateTx = delegate.newWriteOnlyTransaction();
-        return new BindingDOMWriteTransactionAdapter<DOMDataWriteTransaction>(delegateTx, codec) {
-
-            @Override
-            public FluentFuture<? extends CommitInfo> commit() {
-                return listenForFailure(this, super.commit());
-            }
-
-        };
-    }
-
-    private FluentFuture<? extends CommitInfo> listenForFailure(
-            final WriteTransaction tx, final FluentFuture<? extends CommitInfo> future) {
-        future.addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onFailure(final Throwable ex) {
-                failTransactionChain(tx,ex);
-            }
-
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                // Intentionally NOOP
-            }
-        }, MoreExecutors.directExecutor());
-
-        return future;
-    }
-
-    private void failTransactionChain(final WriteTransaction tx, final Throwable ex) {
-        /*
-         *  We asume correct state change for underlaying transaction
-         *
-         * chain, so we are not changing any of our internal state
-         * to mark that we failed.
-         */
-        this.bindingListener.onTransactionChainFailed(this, tx, ex);
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-
-    private final class DelegateChainListener implements TransactionChainListener {
-
-        @Override
-        public void onTransactionChainFailed(final TransactionChain<?, ?> chain,
-                final AsyncTransaction<?, ?> transaction, final Throwable cause) {
-            Preconditions.checkState(delegate.equals(chain),
-                    "Illegal state - listener for %s was invoked for incorrect chain %s.", delegate, chain);
-            /*
-             * Intentionally NOOP, callback for failure, since we
-             * are also listening on each transaction future for failure,
-             * in order to have reference to Binding Transaction (which was seen by client
-             * of this transaction chain), instead of DOM transaction
-             * which is known only to this chain, binding transaction implementation
-             * and underlying transaction chain.
-             *
-             */
-            LOG.debug("Transaction chain {} failed. Failed DOM Transaction {}",this,transaction,cause);
-        }
-
-        @Override
-        public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
-            Preconditions.checkState(delegate.equals(chain),
-                    "Illegal state - listener for %s was invoked for incorrect chain %s.", delegate, chain);
-            bindingListener.onTransactionChainSuccessful(BindingDOMTransactionChainAdapter.this);
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMWriteTransactionAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMWriteTransactionAdapter.java
deleted file mode 100644 (file)
index eeb519f..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.ArrayList;
-import java.util.List;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-class BindingDOMWriteTransactionAdapter<T extends DOMDataWriteTransaction> extends
-        AbstractWriteTransaction<T> implements WriteTransaction {
-
-    protected BindingDOMWriteTransactionAdapter(final T delegateTx, final BindingToNormalizedNodeCodec codec) {
-        super(delegateTx, codec);
-    }
-
-    @Override
-    public <U extends DataObject> void put(final LogicalDatastoreType store, final InstanceIdentifier<U> path,
-                                           final U data) {
-        put(store, path, data,false);
-    }
-
-    @Override
-    public <D extends DataObject> void merge(final LogicalDatastoreType store, final InstanceIdentifier<D> path,
-                                             final D data) {
-        merge(store, path, data,false);
-    }
-
-
-    @Override
-    protected void ensureParentsByMerge(final LogicalDatastoreType store,
-            final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier normalizedPath,
-            final InstanceIdentifier<?> path) {
-        List<PathArgument> currentArguments = new ArrayList<>();
-        DataNormalizationOperation<?> currentOp = getCodec().getDataNormalizer().getRootOperation();
-        for (PathArgument currentArg : normalizedPath.getPathArguments()) {
-            try {
-                currentOp = currentOp.getChild(currentArg);
-            } catch (DataNormalizationException e) {
-                throw new IllegalArgumentException(String.format("Invalid child encountered in path %s", path), e);
-            }
-            currentArguments.add(currentArg);
-            YangInstanceIdentifier currentPath = YangInstanceIdentifier.create(
-                    currentArguments);
-
-            getDelegate().merge(store, currentPath, currentOp.createDefault(currentArg));
-        }
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final InstanceIdentifier<?> path) {
-        doDelete(store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return doCommit();
-    }
-
-    @Override
-    public boolean cancel() {
-        return doCancel();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java
deleted file mode 100644 (file)
index c176e2d..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-class BindingDataTreeChangeListenerRegistration<L extends DataTreeChangeListener<?>>
-        extends AbstractListenerRegistration<L> {
-
-    private final ListenerRegistration<?> domReg;
-
-    BindingDataTreeChangeListenerRegistration(final L listener, final ListenerRegistration<?> domReg) {
-        super(listener);
-        this.domReg = Preconditions.checkNotNull(domReg);
-    }
-
-    @Override
-    protected void removeRegistration() {
-        domReg.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingMountPointAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingMountPointAdapter.java
deleted file mode 100644 (file)
index ebcfcc6..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.LoadingCache;
-import org.opendaylight.controller.md.sal.binding.api.BindingService;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class BindingMountPointAdapter implements MountPoint {
-
-    private final InstanceIdentifier<?> identifier;
-    private LoadingCache<Class<? extends BindingService>, Optional<BindingService>> services;
-
-    public BindingMountPointAdapter(final BindingToNormalizedNodeCodec codec, final DOMMountPoint domMountPoint) {
-        identifier = codec.getCodecRegistry().fromYangInstanceIdentifier(domMountPoint.getIdentifier());
-        services = CacheBuilder.newBuilder().build(new BindingDOMAdapterLoader(codec) {
-
-            @Override
-            protected DOMService getDelegate(Class<? extends DOMService> reqDeleg) {
-                return domMountPoint.getService(reqDeleg).orNull();
-            }
-        });
-    }
-
-    @Override
-    public InstanceIdentifier<?> getIdentifier() {
-        return identifier;
-    }
-
-    @Override
-    public <T extends BindingService> Optional<T> getService(Class<T> service) {
-        Optional<BindingService> potential = services.getUnchecked(service);
-        if (potential.isPresent()) {
-            return Optional.of(service.cast(potential.get()));
-        }
-        return Optional.absent();
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingRpcAdapterRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingRpcAdapterRegistration.java
deleted file mode 100644 (file)
index 7285d9c..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-class BindingRpcAdapterRegistration<T extends RpcService> extends AbstractObjectRegistration<T> {
-
-    private final DOMRpcImplementationRegistration<?> reg;
-
-    BindingRpcAdapterRegistration(T instance, DOMRpcImplementationRegistration<?> reg) {
-        super(instance);
-        this.reg = reg;
-    }
-
-    @Override
-    protected void removeRegistration() {
-        reg.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToDOMAdapterFactory.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToDOMAdapterFactory.java
deleted file mode 100644 (file)
index f1da15c..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNullByDefault;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.spi.AdapterFactory;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-
-@Beta
-@NonNullByDefault
-public final class BindingToDOMAdapterFactory implements AdapterFactory {
-    private final BindingToNormalizedNodeCodec codec;
-
-    public BindingToDOMAdapterFactory(final BindingToNormalizedNodeCodec codec) {
-        this.codec = requireNonNull(codec);
-    }
-
-    @Override
-    public DataBroker createDataBroker(final DOMDataBroker domBroker) {
-        return new BindingDOMDataBrokerAdapter(requireNonNull(domBroker), codec);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToNormalizedNodeCodec.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToNormalizedNodeCodec.java
deleted file mode 100644 (file)
index d8ba10c..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.collect.ImmutableBiMap;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.lang.reflect.Method;
-import java.util.Iterator;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
-import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTreeNode;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.api.ClassLoadingStrategy;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_SUPERCLASS", justification = "Migration path")
-public class BindingToNormalizedNodeCodec
-        extends org.opendaylight.mdsal.binding.dom.adapter.BindingToNormalizedNodeCodec {
-
-    private DataNormalizer legacyToNormalized = null;
-
-    public BindingToNormalizedNodeCodec(final ClassLoadingStrategy classLoadingStrategy,
-            final BindingNormalizedNodeCodecRegistry codecRegistry) {
-        super(classLoadingStrategy, codecRegistry);
-    }
-
-    public BindingToNormalizedNodeCodec(final ClassLoadingStrategy classLoadingStrategy,
-            final BindingNormalizedNodeCodecRegistry codecRegistry, final boolean waitForSchema) {
-        super(classLoadingStrategy, codecRegistry, waitForSchema);
-    }
-
-    DataNormalizer getDataNormalizer() {
-        return this.legacyToNormalized;
-    }
-
-    @Override
-    public YangInstanceIdentifier toYangInstanceIdentifierBlocking(
-            final InstanceIdentifier<? extends DataObject> binding) {
-        return super.toYangInstanceIdentifierBlocking(binding);
-    }
-
-    @Override
-    public YangInstanceIdentifier toYangInstanceIdentifierCached(final InstanceIdentifier<?> binding) {
-        return super.toYangInstanceIdentifierCached(binding);
-    }
-
-    @Override
-    public void onGlobalContextUpdated(final SchemaContext schemaContext) {
-        this.legacyToNormalized = new DataNormalizer(schemaContext);
-        super.onGlobalContextUpdated(schemaContext);
-    }
-
-    /**
-     * Returns an default object according to YANG schema for supplied path.
-     *
-     * @param path DOM Path
-     * @return Node with defaults set on.
-     */
-    @Override
-    public NormalizedNode<?, ?> getDefaultNodeFor(final YangInstanceIdentifier path) {
-        final Iterator<PathArgument> iterator = path.getPathArguments().iterator();
-        DataNormalizationOperation<?> currentOp = this.legacyToNormalized.getRootOperation();
-        while (iterator.hasNext()) {
-            final PathArgument currentArg = iterator.next();
-            try {
-                currentOp = currentOp.getChild(currentArg);
-            } catch (final DataNormalizationException e) {
-                throw new IllegalArgumentException(String.format("Invalid child encountered in path %s", path), e);
-            }
-        }
-        return currentOp.createDefault(path.getLastPathArgument());
-    }
-
-    @Override
-    public ImmutableBiMap<Method, RpcDefinition> getRpcMethodToSchema(final Class<? extends RpcService> key) {
-        return super.getRpcMethodToSchema(key);
-    }
-
-    @Override
-    public Map.Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> getSubtreeCodec(
-            final YangInstanceIdentifier domIdentifier) {
-        return super.getSubtreeCodec(domIdentifier);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ContextReferenceExtractor.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ContextReferenceExtractor.java
deleted file mode 100644 (file)
index 043501c..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import java.lang.reflect.Method;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-abstract class ContextReferenceExtractor {
-    private static final Logger LOG = LoggerFactory.getLogger(ContextReferenceExtractor.class);
-    private static final ContextReferenceExtractor NULL_EXTRACTOR = new ContextReferenceExtractor() {
-
-        @Override
-        InstanceIdentifier<?> extract(final DataObject obj) {
-            return null;
-        }
-    };
-
-    private static final LoadingCache<Class<?>, ContextReferenceExtractor> EXTRACTORS = CacheBuilder.newBuilder()
-            .weakKeys().build(new CacheLoader<Class<?>, ContextReferenceExtractor>() {
-
-                @Override
-                public ContextReferenceExtractor load(final Class<?> key) {
-                    return create(key);
-                }
-            });
-
-
-    private static final String GET_VALUE_NAME = "getValue";
-
-    static ContextReferenceExtractor from(final Class<?> obj) {
-        return EXTRACTORS.getUnchecked(obj);
-    }
-
-    /**
-     * Extract context-reference (Instance Identifier) from
-     * Binding DataObject.
-     *
-     * @param obj DataObject from which context reference should be extracted.
-     *
-     * @return Instance Identifier representing context reference or null, if data object does not contain
-     *     context reference.
-     */
-    abstract @Nullable InstanceIdentifier<?> extract(DataObject obj);
-
-    private static @NonNull ContextReferenceExtractor create(final Class<?> key) {
-        final Method contextGetter = getContextGetter(key);
-        if (contextGetter == null) {
-            return NULL_EXTRACTOR;
-        }
-        final Class<?> returnType = contextGetter.getReturnType();
-        try {
-            if (InstanceIdentifier.class.isAssignableFrom(returnType)) {
-                return DirectGetterRouteContextExtractor.create(contextGetter);
-            }
-            final Method getValueMethod = findGetValueMethod(returnType,InstanceIdentifier.class);
-            if (getValueMethod != null) {
-                return GetValueRouteContextExtractor.create(contextGetter, getValueMethod);
-            } else {
-                LOG.warn("Class {} can not be used to determine context, falling back to NULL_EXTRACTOR.",returnType);
-            }
-        } catch (final IllegalAccessException e) {
-            LOG.warn("Class {} does not conform to Binding Specification v1. Falling back to NULL_EXTRACTOR", e);
-        }
-        return NULL_EXTRACTOR;
-    }
-
-    private static @Nullable Method findGetValueMethod(final Class<?> type, final Class<?> returnType) {
-        try {
-            final Method method = type.getMethod(GET_VALUE_NAME);
-            if (returnType.equals(method.getReturnType())) {
-                return method;
-            }
-        } catch (final NoSuchMethodException e) {
-            LOG.warn("Value class {} does not comform to Binding Specification v1.", type, e);
-        }
-        return null;
-    }
-
-    private static Method getContextGetter(final Class<?> key) {
-        for (final Method method : key.getMethods()) {
-            if (method.getAnnotation(RoutingContext.class) != null) {
-                return method;
-            }
-        }
-        return null;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionProviderServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionProviderServiceAdapter.java
deleted file mode 100644 (file)
index ededf4e..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.binding.api.ActionProviderService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.dom.adapter.ActionProviderServiceAdapter;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.binding.Action;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-final class ControllerActionProviderServiceAdapter implements ActionProviderService {
-    private static final class Builder extends BindingDOMAdapterBuilder<ActionProviderService> {
-        @Override
-        public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.of(DOMActionProviderService.class);
-        }
-
-        @Override
-        protected ActionProviderService createInstance(BindingToNormalizedNodeCodec codec,
-                ClassToInstanceMap<DOMService> delegates) {
-            final DOMActionProviderService domAction = delegates.getInstance(DOMActionProviderService.class);
-            return new ControllerActionProviderServiceAdapter(requireNonNull(codec), domAction);
-        }
-    }
-
-    static final Factory<ActionProviderService> BUILDER_FACTORY = Builder::new;
-
-    private final ActionProviderServiceAdapter delegate;
-
-    ControllerActionProviderServiceAdapter(BindingToNormalizedNodeCodec codec, DOMActionProviderService domService) {
-        this.delegate = ActionProviderServiceAdapter.create(codec, domService);
-    }
-
-    @Override
-    public <O extends @NonNull DataObject, P extends @NonNull InstanceIdentifier<O>, T extends @NonNull Action<P, ?, ?>,
-            S extends T> ObjectRegistration<S> registerImplementation(Class<T> actionInterface, S implementation,
-                    LogicalDatastoreType datastore, Set<DataTreeIdentifier<O>> validNodes) {
-        return delegate.registerImplementation(actionInterface, implementation, datastore, validNodes);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/ControllerActionServiceAdapter.java
deleted file mode 100644 (file)
index f209b3a..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.binding.api.ActionService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
-import org.opendaylight.mdsal.binding.dom.adapter.ActionServiceAdapter;
-import org.opendaylight.yangtools.yang.binding.Action;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-
-final class ControllerActionServiceAdapter implements ActionService {
-    private static final class Builder extends BindingDOMAdapterBuilder<ActionService> {
-        @Override
-        public Set<? extends Class<? extends DOMService>> getRequiredDelegates() {
-            return ImmutableSet.of(DOMActionService.class);
-        }
-
-        @Override
-        protected ActionService createInstance(final BindingToNormalizedNodeCodec codec,
-                final ClassToInstanceMap<DOMService> delegates) {
-            final DOMActionService domAction = delegates.getInstance(DOMActionService.class);
-            return new ControllerActionServiceAdapter(requireNonNull(codec), domAction);
-        }
-    }
-
-    static final Factory<ActionService> BUILDER_FACTORY = Builder::new;
-    private final ActionServiceAdapter delegate;
-
-    ControllerActionServiceAdapter(final BindingToNormalizedNodeCodec codec, final DOMActionService domService) {
-        this.delegate = ActionServiceAdapter.create(codec, domService);
-    }
-
-    @Override
-    public <O extends @NonNull DataObject, T extends @NonNull Action<?, ?, ?>> T getActionHandle(
-            final Class<T> actionInterface, final Set<DataTreeIdentifier<O>> validNodes) {
-        return delegate.getActionHandle(actionInterface, validNodes);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/DirectGetterRouteContextExtractor.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/DirectGetterRouteContextExtractor.java
deleted file mode 100644 (file)
index cb02bfd..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Throwables;
-import java.lang.invoke.MethodHandle;
-import java.lang.invoke.MethodHandles;
-import java.lang.invoke.MethodHandles.Lookup;
-import java.lang.invoke.MethodType;
-import java.lang.reflect.Method;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-final class DirectGetterRouteContextExtractor extends ContextReferenceExtractor {
-
-    private static final Lookup PUBLIC_LOOKUP = MethodHandles.publicLookup();
-    private final MethodHandle handle;
-
-    private DirectGetterRouteContextExtractor(final MethodHandle rawHandle) {
-        handle = rawHandle.asType(MethodType.methodType(InstanceIdentifier.class, DataObject.class));
-    }
-
-    static ContextReferenceExtractor create(final Method getterMethod) throws IllegalAccessException {
-        final MethodHandle getterHandle = PUBLIC_LOOKUP.unreflect(getterMethod);
-        return new DirectGetterRouteContextExtractor(getterHandle);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    InstanceIdentifier<?> extract(final DataObject obj) {
-        try {
-            return (InstanceIdentifier<?>) handle.invokeExact(obj);
-        } catch (Throwable e) {
-            throw Throwables.propagate(e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/GetValueRouteContextExtractor.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/GetValueRouteContextExtractor.java
deleted file mode 100644 (file)
index f436c5e..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Throwables;
-import java.lang.invoke.MethodHandle;
-import java.lang.invoke.MethodHandles;
-import java.lang.invoke.MethodHandles.Lookup;
-import java.lang.invoke.MethodType;
-import java.lang.reflect.Method;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-final class GetValueRouteContextExtractor extends ContextReferenceExtractor {
-
-    private static final Lookup PUBLIC_LOOKUP = MethodHandles.publicLookup();
-    private final MethodHandle contextHandle;
-    private final MethodHandle valueHandle;
-
-    private GetValueRouteContextExtractor(final MethodHandle rawContextHandle, final MethodHandle rawValueHandle) {
-        contextHandle = rawContextHandle.asType(MethodType.methodType(Object.class, DataObject.class));
-        valueHandle = rawValueHandle.asType(MethodType.methodType(InstanceIdentifier.class, Object.class));
-    }
-
-    public static ContextReferenceExtractor create(final Method contextGetter, final Method getValueMethod)
-            throws IllegalAccessException {
-        final MethodHandle rawContextHandle = PUBLIC_LOOKUP.unreflect(contextGetter);
-        final MethodHandle rawValueHandle = PUBLIC_LOOKUP.unreflect(getValueMethod);
-        return new GetValueRouteContextExtractor(rawContextHandle, rawValueHandle);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    InstanceIdentifier<?> extract(final DataObject obj) {
-        try {
-            final Object ctx = contextHandle.invokeExact(obj);
-            if (ctx != null) {
-                return (InstanceIdentifier<?>) valueHandle.invokeExact(ctx);
-            }
-            return null;
-        } catch (Throwable e) {
-            throw Throwables.propagate(e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDOMRpcResultFuture.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDOMRpcResultFuture.java
deleted file mode 100644 (file)
index 173477c..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingRpcFutureAware;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-final class LazyDOMRpcResultFuture implements CheckedFuture<DOMRpcResult, DOMRpcException>, BindingRpcFutureAware {
-
-    private final ListenableFuture<RpcResult<?>> bindingFuture;
-    private final BindingNormalizedNodeSerializer codec;
-    private volatile DOMRpcResult result;
-
-    private LazyDOMRpcResultFuture(final ListenableFuture<RpcResult<?>> delegate,
-            final BindingNormalizedNodeSerializer codec) {
-        this.bindingFuture = Preconditions.checkNotNull(delegate, "delegate");
-        this.codec = Preconditions.checkNotNull(codec, "codec");
-    }
-
-    static CheckedFuture<DOMRpcResult, DOMRpcException> create(final BindingNormalizedNodeSerializer codec,
-            final ListenableFuture<RpcResult<?>> bindingResult) {
-        return new LazyDOMRpcResultFuture(bindingResult, codec);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<?>> getBindingFuture() {
-        return bindingFuture;
-    }
-
-    @Override
-    public boolean cancel(final boolean mayInterruptIfRunning) {
-        return bindingFuture.cancel(mayInterruptIfRunning);
-    }
-
-    @Override
-    public void addListener(final Runnable listener, final Executor executor) {
-        bindingFuture.addListener(listener, executor);
-    }
-
-    @Override
-    public DOMRpcResult get() throws InterruptedException, ExecutionException {
-        if (result != null) {
-            return result;
-        }
-        return transformIfNecessary(bindingFuture.get());
-    }
-
-    @Override
-    public DOMRpcResult get(final long timeout, final TimeUnit unit) throws InterruptedException, ExecutionException,
-            TimeoutException {
-        if (result != null) {
-            return result;
-        }
-        return transformIfNecessary(bindingFuture.get(timeout, unit));
-    }
-
-    @Override
-    public DOMRpcResult checkedGet() {
-        try {
-            return get();
-        } catch (InterruptedException | ExecutionException e) {
-            // FIXME: Add exception mapping
-            throw Throwables.propagate(e);
-        }
-    }
-
-    @Override
-    public DOMRpcResult checkedGet(final long timeout, final TimeUnit unit) throws TimeoutException {
-        try {
-            return get(timeout, unit);
-        } catch (InterruptedException | ExecutionException e) {
-            // FIXME: Add exception mapping
-            throw Throwables.propagate(e);
-        }
-    }
-
-    @Override
-    public boolean isCancelled() {
-        return bindingFuture.isCancelled();
-    }
-
-    @Override
-    public boolean isDone() {
-        return bindingFuture.isDone();
-    }
-
-    private synchronized DOMRpcResult transformIfNecessary(final RpcResult<?> input) {
-        if (result == null) {
-            result = transform(input);
-        }
-        return result;
-    }
-
-    private DOMRpcResult transform(final RpcResult<?> input) {
-        if (input.isSuccessful()) {
-            final Object inputData = input.getResult();
-            if (inputData instanceof DataContainer) {
-                return new DefaultDOMRpcResult(codec.toNormalizedNodeRpcData((DataContainer) inputData));
-            } else {
-                return new DefaultDOMRpcResult((NormalizedNode<?, ?>) null);
-            }
-        }
-        return new DefaultDOMRpcResult(input.getErrors());
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java
deleted file mode 100644 (file)
index 1a21b98..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static java.util.Objects.requireNonNull;
-import static org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType.UNMODIFIED;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Optional;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingStructuralType;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTreeNode;
-import org.opendaylight.yangtools.yang.binding.Augmentation;
-import org.opendaylight.yangtools.yang.binding.ChildOf;
-import org.opendaylight.yangtools.yang.binding.ChoiceIn;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-import org.opendaylight.yangtools.yang.binding.Identifier;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.IdentifiableItem;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.Item;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Lazily translated {@link DataObjectModification} based on {@link DataTreeCandidateNode}.
- *
- * {@link LazyDataObjectModification} represents Data tree change event,
- * but whole tree is not translated or resolved eagerly, but only child nodes
- * which are directly accessed by user of data object modification.
- *
- * @param <T> Type of Binding Data Object
- */
-final class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
-
-    private final BindingCodecTreeNode<T> codec;
-    private final DataTreeCandidateNode domData;
-    private final PathArgument identifier;
-
-    private volatile Collection<LazyDataObjectModification<? extends DataObject>> childNodesCache;
-    private volatile ModificationType modificationType;
-
-    private LazyDataObjectModification(final BindingCodecTreeNode<T> codec, final DataTreeCandidateNode domData) {
-        this.codec = requireNonNull(codec);
-        this.domData = requireNonNull(domData);
-        this.identifier = codec.deserializePathArgument(domData.getIdentifier());
-    }
-
-    static <T extends DataObject> LazyDataObjectModification<T> create(final BindingCodecTreeNode<T> codec,
-            final DataTreeCandidateNode domData) {
-        return new LazyDataObjectModification<>(codec, domData);
-    }
-
-    private static Collection<LazyDataObjectModification<? extends DataObject>> from(
-            final BindingCodecTreeNode<?> parentCodec, final Collection<DataTreeCandidateNode> domChildNodes) {
-        final List<LazyDataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
-        populateList(result, parentCodec, domChildNodes);
-        return result;
-    }
-
-    private static void populateList(final List<LazyDataObjectModification<? extends DataObject>> result,
-            final BindingCodecTreeNode<?> parentCodec, final Collection<DataTreeCandidateNode> domChildNodes) {
-        for (final DataTreeCandidateNode domChildNode : domChildNodes) {
-            if (domChildNode.getModificationType() != UNMODIFIED) {
-                final BindingStructuralType type = BindingStructuralType.from(domChildNode);
-                if (type != BindingStructuralType.NOT_ADDRESSABLE) {
-                    /*
-                     *  Even if type is UNKNOWN, from perspective of BindingStructuralType
-                     *  we try to load codec for it. We will use that type to further specify
-                     *  debug log.
-                     */
-                    try {
-                        final BindingCodecTreeNode<?> childCodec =
-                                parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
-                        populateList(result, type, childCodec, domChildNode);
-                    } catch (final IllegalArgumentException e) {
-                        if (type == BindingStructuralType.UNKNOWN) {
-                            LOG.debug("Unable to deserialize unknown DOM node {}", domChildNode, e);
-                        } else {
-                            LOG.debug("Binding representation for DOM node {} was not found", domChildNode, e);
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    private static void populateList(final List<LazyDataObjectModification<? extends DataObject>> result,
-            final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
-            final DataTreeCandidateNode domChildNode) {
-        switch (type) {
-            case INVISIBLE_LIST:
-                // We use parent codec intentionally.
-                populateListWithSingleCodec(result, childCodec, domChildNode.getChildNodes());
-                break;
-            case INVISIBLE_CONTAINER:
-                populateList(result, childCodec, domChildNode.getChildNodes());
-                break;
-            case UNKNOWN:
-            case VISIBLE_CONTAINER:
-                result.add(create(childCodec, domChildNode));
-                break;
-            default:
-                break;
-        }
-    }
-
-    private static void populateListWithSingleCodec(final List<LazyDataObjectModification<? extends DataObject>> result,
-            final BindingCodecTreeNode<?> codec, final Collection<DataTreeCandidateNode> childNodes) {
-        for (final DataTreeCandidateNode child : childNodes) {
-            if (child.getModificationType() != UNMODIFIED) {
-                result.add(create(codec, child));
-            }
-        }
-    }
-
-    @Override
-    public T getDataBefore() {
-        return deserialize(domData.getDataBefore());
-    }
-
-    @Override
-    public T getDataAfter() {
-        return deserialize(domData.getDataAfter());
-    }
-
-    @Override
-    public Class<T> getDataType() {
-        return codec.getBindingClass();
-    }
-
-    @Override
-    public PathArgument getIdentifier() {
-        return identifier;
-    }
-
-    @Override
-    public ModificationType getModificationType() {
-        ModificationType localType = modificationType;
-        if (localType != null) {
-            return localType;
-        }
-
-        switch (domData.getModificationType()) {
-            case APPEARED:
-            case WRITE:
-                localType = ModificationType.WRITE;
-                break;
-            case DISAPPEARED:
-            case DELETE:
-                localType = ModificationType.DELETE;
-                break;
-            case SUBTREE_MODIFIED:
-                localType = resolveSubtreeModificationType();
-                break;
-            default:
-                // TODO: Should we lie about modification type instead of exception?
-                throw new IllegalStateException("Unsupported DOM Modification type " + domData.getModificationType());
-        }
-
-        modificationType = localType;
-        return localType;
-    }
-
-    private ModificationType resolveSubtreeModificationType() {
-        switch (codec.getChildAddressabilitySummary()) {
-            case ADDRESSABLE:
-                // All children are addressable, it is safe to report SUBTREE_MODIFIED
-                return ModificationType.SUBTREE_MODIFIED;
-            case UNADDRESSABLE:
-                // All children are non-addressable, report WRITE
-                return ModificationType.WRITE;
-            case MIXED:
-                // This case is not completely trivial, as we may have NOT_ADDRESSABLE nodes underneath us. If that
-                // is the case, we need to turn this modification into a WRITE operation, so that the user is able
-                // to observe those nodes being introduced. This is not efficient, but unfortunately unavoidable,
-                // as we cannot accurately represent such changes.
-                for (DataTreeCandidateNode child : domData.getChildNodes()) {
-                    if (BindingStructuralType.recursiveFrom(child) == BindingStructuralType.NOT_ADDRESSABLE) {
-                        // We have a non-addressable child, turn this modification into a write
-                        return ModificationType.WRITE;
-                    }
-                }
-
-                // No unaddressable children found, proceed in addressed mode
-                return ModificationType.SUBTREE_MODIFIED;
-            default:
-                throw new IllegalStateException("Unsupported child addressability summary "
-                        + codec.getChildAddressabilitySummary());
-        }
-    }
-
-    @Override
-    public Collection<LazyDataObjectModification<? extends DataObject>> getModifiedChildren() {
-        Collection<LazyDataObjectModification<? extends DataObject>> local = childNodesCache;
-        if (local == null) {
-            childNodesCache = local = from(codec, domData.getChildNodes());
-        }
-        return local;
-    }
-
-    @Override
-    public <H extends ChoiceIn<? super T> & DataObject, C extends ChildOf<? super H>>
-            Collection<DataObjectModification<C>> getModifiedChildren(final Class<H> caseType,
-                    final Class<C> childType) {
-        return streamModifiedChildren(childType)
-                .filter(child -> caseType.equals(child.identifier.getCaseType().orElse(null)))
-                .collect(Collectors.toList());
-    }
-
-    @SuppressWarnings("unchecked")
-    private <C extends DataObject> Stream<LazyDataObjectModification<C>> streamModifiedChildren(
-            final Class<C> childType) {
-        return getModifiedChildren().stream()
-                .filter(child -> childType.isAssignableFrom(child.getDataType()))
-                .map(child -> (LazyDataObjectModification<C>) child);
-    }
-
-    @Override
-    public DataObjectModification<? extends DataObject> getModifiedChild(final PathArgument arg) {
-        final List<YangInstanceIdentifier.PathArgument> domArgumentList = new ArrayList<>();
-        final BindingCodecTreeNode<?> childCodec = codec.bindingPathArgumentChild(arg, domArgumentList);
-        final Iterator<YangInstanceIdentifier.PathArgument> toEnter = domArgumentList.iterator();
-        DataTreeCandidateNode current = domData;
-        while (toEnter.hasNext() && current != null) {
-            current = current.getModifiedChild(toEnter.next());
-        }
-        return current != null && current.getModificationType() != UNMODIFIED ? create(childCodec, current) : null;
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C>
-            getModifiedChildListItem(final Class<C> listItem, final K listKey) {
-        return (DataObjectModification<C>) getModifiedChild(IdentifiableItem.of(listItem, listKey));
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public <H extends ChoiceIn<? super T> & DataObject, C extends Identifiable<K> & ChildOf<? super H>,
-            K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(final Class<H> caseType,
-                        final Class<C> listItem, final K listKey) {
-        return (DataObjectModification<C>) getModifiedChild(IdentifiableItem.of(caseType, listItem, listKey));
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(final Class<C> child) {
-        return (DataObjectModification<C>) getModifiedChild(Item.of(child));
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public <H extends ChoiceIn<? super T> & DataObject, C extends ChildOf<? super H>> DataObjectModification<C>
-            getModifiedChildContainer(final Class<H> caseType, final Class<C> child) {
-        return (DataObjectModification<C>) getModifiedChild(Item.of(caseType, child));
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(
-            final Class<C> augmentation) {
-        return (DataObjectModification<C>) getModifiedChild(Item.of(augmentation));
-    }
-
-    private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
-        return dataAfter.map(codec::deserialize).orElse(null);
-    }
-
-    @Override
-    public String toString() {
-        return getClass().getSimpleName() + "{identifier = " + identifier + ", domData = " + domData + "}";
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java
deleted file mode 100644 (file)
index 3b14012..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTreeNode;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-
-/**
- * Lazily translated {@link DataTreeModification} based on {@link DataTreeCandidate}.
- *
- * {@link DataTreeModification} represents Data tree change event,
- * but whole tree is not translated or resolved eagerly, but only child nodes
- * which are directly accessed by user of data object modification.
- *
- */
-class LazyDataTreeModification<T extends DataObject> implements DataTreeModification<T> {
-
-    private final DataTreeIdentifier<T> path;
-    private final DataObjectModification<T> rootNode;
-
-    LazyDataTreeModification(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> path,
-            final BindingCodecTreeNode<T> codec, final DataTreeCandidate domChange) {
-        this.path = new DataTreeIdentifier<>(datastoreType, path);
-        this.rootNode = LazyDataObjectModification.create(codec, domChange.getRootNode());
-    }
-
-    @Override
-    public DataObjectModification<T> getRootNode() {
-        return rootNode;
-    }
-
-    @Override
-    public DataTreeIdentifier<T> getRootPath() {
-        return path;
-    }
-
-    @SuppressWarnings({"unchecked", "rawtypes"})
-    static <T extends DataObject> DataTreeModification<T> create(final BindingToNormalizedNodeCodec codec,
-            final DataTreeCandidate domChange, final LogicalDatastoreType datastoreType) {
-        final Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> codecCtx =
-                codec.getSubtreeCodec(domChange.getRootPath());
-        return new LazyDataTreeModification(datastoreType, codecCtx.getKey(), codecCtx.getValue(), domChange);
-    }
-
-    static <T extends DataObject> Collection<DataTreeModification<T>> from(final BindingToNormalizedNodeCodec codec,
-            final Collection<DataTreeCandidate> domChanges, final LogicalDatastoreType datastoreType) {
-        final List<DataTreeModification<T>> result = new ArrayList<>(domChanges.size());
-        for (final DataTreeCandidate domChange : domChanges) {
-            result.add(LazyDataTreeModification.<T>create(codec, domChange, datastoreType));
-        }
-        return result;
-    }
-
-    @Override
-    public String toString() {
-        return getClass().getSimpleName() + "{path = " + path + ", rootNode = " + rootNode + "}";
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedContainerNode.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedContainerNode.java
deleted file mode 100644 (file)
index bed7d31..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import java.util.Collection;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingDataAware;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
- // FIXME: Should be this moved to binding-data-codec?
-class LazySerializedContainerNode implements ContainerNode, BindingDataAware {
-
-    private final NodeIdentifier identifier;
-    private final DataObject bindingData;
-
-    private BindingNormalizedNodeSerializer registry;
-    private ContainerNode domData;
-
-    private LazySerializedContainerNode(final QName identifier, final DataObject binding,
-            final BindingNormalizedNodeSerializer registry) {
-        this.identifier = new NodeIdentifier(identifier);
-        this.bindingData = binding;
-        this.registry = registry;
-        this.domData = null;
-    }
-
-    static NormalizedNode<?, ?> create(final SchemaPath rpcName, final DataObject data,
-            final BindingNormalizedNodeSerializer codec) {
-        return new LazySerializedContainerNode(rpcName.getLastComponent(), data, codec);
-    }
-
-    static NormalizedNode<?, ?> withContextRef(final SchemaPath rpcName, final DataObject data,
-            final LeafNode<?> contextRef, final BindingNormalizedNodeSerializer codec) {
-        return new WithContextRef(rpcName.getLastComponent(), data, contextRef, codec);
-    }
-
-    @Override
-    public Map<QName, String> getAttributes() {
-        return delegate().getAttributes();
-    }
-
-    private ContainerNode delegate() {
-        if (domData == null) {
-            domData = registry.toNormalizedNodeRpcData(bindingData);
-            registry = null;
-        }
-        return domData;
-    }
-
-    @Override
-    public final QName getNodeType() {
-        return identifier.getNodeType();
-    }
-
-    @Override
-    public final Collection<DataContainerChild<? extends PathArgument, ?>> getValue() {
-        return delegate().getValue();
-    }
-
-    @Override
-    public final NodeIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    @Override
-    public Optional<DataContainerChild<? extends PathArgument, ?>> getChild(final PathArgument child) {
-        return delegate().getChild(child);
-    }
-
-    @Override
-    public final Object getAttributeValue(final QName name) {
-        return delegate().getAttributeValue(name);
-    }
-
-    @Override
-    public final DataObject bindingData() {
-        return bindingData;
-    }
-
-    /**
-     * Lazy Serialized Node with pre-cached serialized leaf holding routing information.
-     *
-     */
-    private static final class WithContextRef extends LazySerializedContainerNode {
-
-        private final LeafNode<?> contextRef;
-
-        protected WithContextRef(final QName identifier, final DataObject binding, final LeafNode<?> contextRef,
-                final BindingNormalizedNodeSerializer registry) {
-            super(identifier, binding, registry);
-            this.contextRef = contextRef;
-        }
-
-        @Override
-        public Optional<DataContainerChild<? extends PathArgument, ?>> getChild(final PathArgument child) {
-            /*
-             * Use precached value of routing field and do not run full serialization if we are
-             * accessing it.
-             */
-            if (contextRef.getIdentifier().equals(child)) {
-                return Optional.<DataContainerChild<? extends PathArgument, ?>>of(contextRef);
-            }
-            return super.getChild(child);
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedDOMNotification.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazySerializedDOMNotification.java
deleted file mode 100644 (file)
index 3e3e8be..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Lazy serialized implementation of DOM Notification.
- *
- * <p>
- * This implementation performs serialization of data, only if receiver
- * of notification actually accessed data from notification.
- *
- */
-public final class LazySerializedDOMNotification implements DOMNotification {
-
-    private final BindingNormalizedNodeSerializer codec;
-    private final Notification data;
-    private final SchemaPath type;
-
-    private ContainerNode domBody;
-
-    private LazySerializedDOMNotification(final BindingNormalizedNodeSerializer codec, final Notification data,
-            final SchemaPath type) {
-        this.codec = codec;
-        this.data = data;
-        this.type = type;
-    }
-
-    static DOMNotification create(final BindingNormalizedNodeSerializer codec, final Notification data) {
-        final SchemaPath type = SchemaPath.create(true, BindingReflections.findQName(data.getImplementedInterface()));
-        return new LazySerializedDOMNotification(codec, data, type);
-    }
-
-    @Override
-    public SchemaPath getType() {
-        return type;
-    }
-
-    @Override
-    public ContainerNode getBody() {
-        if (domBody == null) {
-            domBody = codec.toNormalizedNodeNotification(data);
-        }
-        return domBody;
-    }
-
-    public Notification getBindingData() {
-        return data;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/RpcServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/RpcServiceAdapter.java
deleted file mode 100644 (file)
index 51a4280..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.lang.reflect.InvocationHandler;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.util.Collection;
-import java.util.Map.Entry;
-import java.util.concurrent.Future;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.broker.spi.rpc.RpcRoutingStrategy;
-import org.opendaylight.controller.sal.core.compat.LegacyDOMRpcResultFutureAdapter;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingRpcFutureAware;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-class RpcServiceAdapter implements InvocationHandler {
-
-    private final ImmutableMap<Method, RpcInvocationStrategy> rpcNames;
-    private final Class<? extends RpcService> type;
-    private final BindingToNormalizedNodeCodec codec;
-    private final DOMRpcService delegate;
-    private final RpcService proxy;
-
-    RpcServiceAdapter(final Class<? extends RpcService> type, final BindingToNormalizedNodeCodec codec,
-            final DOMRpcService domService) {
-        this.type = Preconditions.checkNotNull(type);
-        this.codec = Preconditions.checkNotNull(codec);
-        this.delegate = Preconditions.checkNotNull(domService);
-        final ImmutableMap.Builder<Method, RpcInvocationStrategy> rpcBuilder = ImmutableMap.builder();
-        for (final Entry<Method, RpcDefinition> rpc : codec.getRpcMethodToSchema(type).entrySet()) {
-            rpcBuilder.put(rpc.getKey(), createStrategy(rpc.getKey(), rpc.getValue()));
-        }
-        rpcNames = rpcBuilder.build();
-        proxy = (RpcService) Proxy.newProxyInstance(type.getClassLoader(), new Class[] {type}, this);
-    }
-
-    ListenableFuture<RpcResult<?>> invoke0(final SchemaPath schemaPath, final NormalizedNode<?, ?> input) {
-        final CheckedFuture<DOMRpcResult, DOMRpcException> result = delegate.invokeRpc(schemaPath, input);
-        if (result instanceof BindingRpcFutureAware) {
-            return ((BindingRpcFutureAware) result).getBindingFuture();
-        } else if (result instanceof LegacyDOMRpcResultFutureAdapter) {
-            Future<org.opendaylight.mdsal.dom.api.DOMRpcResult> delegateFuture =
-                    ((LegacyDOMRpcResultFutureAdapter)result).delegate();
-            if (delegateFuture instanceof BindingRpcFutureAware) {
-                return ((BindingRpcFutureAware) delegateFuture).getBindingFuture();
-            }
-        }
-
-        return transformFuture(schemaPath, result, codec.getCodecFactory());
-    }
-
-    private RpcInvocationStrategy createStrategy(final Method method, final RpcDefinition schema) {
-        final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(schema);
-        if (strategy.isContextBasedRouted()) {
-            return new RoutedStrategy(schema.getPath(), method, strategy.getLeaf());
-        }
-        return new NonRoutedStrategy(schema.getPath());
-    }
-
-    RpcService getProxy() {
-        return proxy;
-    }
-
-    @Override
-    public Object invoke(final Object proxyObj, final Method method, final Object[] args) {
-
-        final RpcInvocationStrategy rpc = rpcNames.get(method);
-        if (rpc != null) {
-            if (method.getParameterCount() == 0) {
-                return rpc.invokeEmpty();
-            }
-            if (args.length != 1) {
-                throw new IllegalArgumentException("Input must be provided.");
-            }
-            return rpc.invoke((DataObject) args[0]);
-        }
-
-        if (isObjectMethod(method)) {
-            return callObjectMethod(proxyObj, method, args);
-        }
-        throw new UnsupportedOperationException("Method " + method.toString() + "is unsupported.");
-    }
-
-    private static boolean isObjectMethod(final Method method) {
-        switch (method.getName()) {
-            case "toString":
-                return method.getReturnType().equals(String.class) && method.getParameterCount() == 0;
-            case "hashCode":
-                return method.getReturnType().equals(int.class) && method.getParameterCount() == 0;
-            case "equals":
-                return method.getReturnType().equals(boolean.class) && method.getParameterCount() == 1 && method
-                        .getParameterTypes()[0] == Object.class;
-            default:
-                return false;
-        }
-    }
-
-    private Object callObjectMethod(final Object self, final Method method, final Object[] args) {
-        switch (method.getName()) {
-            case "toString":
-                return type.getName() + "$Adapter{delegate=" + delegate.toString() + "}";
-            case "hashCode":
-                return System.identityHashCode(self);
-            case "equals":
-                return self == args[0];
-            default:
-                return null;
-        }
-    }
-
-    private static ListenableFuture<RpcResult<?>> transformFuture(final SchemaPath rpc,
-            final ListenableFuture<DOMRpcResult> domFuture, final BindingNormalizedNodeSerializer codec) {
-        return Futures.transform(domFuture, input -> {
-            final NormalizedNode<?, ?> domData = input.getResult();
-            final DataObject bindingResult;
-            if (domData != null) {
-                final SchemaPath rpcOutput = rpc.createChild(QName.create(rpc.getLastComponent(), "output"));
-                bindingResult = codec.fromNormalizedNodeRpcData(rpcOutput, (ContainerNode) domData);
-            } else {
-                bindingResult = null;
-            }
-
-            // DOMRpcResult does not have a notion of success, hence we have to reverse-engineer it by looking
-            // at reported errors and checking whether they are just warnings.
-            final Collection<? extends RpcError> errors = input.getErrors();
-            return RpcResult.class.cast(RpcResultBuilder.status(errors.stream()
-                .noneMatch(error -> error.getSeverity() == ErrorSeverity.ERROR))
-                .withResult(bindingResult).withRpcErrors(errors).build());
-        }, MoreExecutors.directExecutor());
-    }
-
-    private abstract class RpcInvocationStrategy {
-
-        private final SchemaPath rpcName;
-
-        protected RpcInvocationStrategy(final SchemaPath path) {
-            rpcName = path;
-        }
-
-        final ListenableFuture<RpcResult<?>> invoke(final DataObject input) {
-            return invoke0(rpcName, serialize(input));
-        }
-
-        abstract NormalizedNode<?, ?> serialize(DataObject input);
-
-        final ListenableFuture<RpcResult<?>> invokeEmpty() {
-            return invoke0(rpcName, null);
-        }
-
-        final SchemaPath getRpcName() {
-            return rpcName;
-        }
-    }
-
-    private final class NonRoutedStrategy extends RpcInvocationStrategy {
-
-        protected NonRoutedStrategy(final SchemaPath path) {
-            super(path);
-        }
-
-        @Override
-        NormalizedNode<?, ?> serialize(final DataObject input) {
-            return LazySerializedContainerNode.create(getRpcName(), input, codec.getCodecRegistry());
-        }
-    }
-
-    private final class RoutedStrategy extends RpcInvocationStrategy {
-
-        private final ContextReferenceExtractor refExtractor;
-        private final NodeIdentifier contextName;
-
-        protected RoutedStrategy(final SchemaPath path, final Method rpcMethod, final QName leafName) {
-            super(path);
-            final Class<? extends DataContainer> inputType = BindingReflections.resolveRpcInputClass(rpcMethod).get();
-            refExtractor = ContextReferenceExtractor.from(inputType);
-            this.contextName = new NodeIdentifier(leafName);
-        }
-
-        @Override
-        NormalizedNode<?, ?> serialize(final DataObject input) {
-            final InstanceIdentifier<?> bindingII = refExtractor.extract(input);
-            if (bindingII != null) {
-                final YangInstanceIdentifier yangII = codec.toYangInstanceIdentifierCached(bindingII);
-                final LeafNode<?> contextRef = ImmutableNodes.leafNode(contextName, yangII);
-                return LazySerializedContainerNode.withContextRef(getRpcName(), input, contextRef,
-                        codec.getCodecRegistry());
-            }
-            return LazySerializedContainerNode.create(getRpcName(), input, codec.getCodecRegistry());
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterBuilder.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterBuilder.java
deleted file mode 100644 (file)
index 8fc838b..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.spi;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.MutableClassToInstanceMap;
-import java.util.Set;
-import org.opendaylight.yangtools.concepts.Builder;
-
-public abstract class AdapterBuilder<T,D> implements Builder<T> {
-
-    private final ClassToInstanceMap<D> delegates = MutableClassToInstanceMap.create();
-
-    public abstract Set<? extends Class<? extends D>> getRequiredDelegates();
-
-    protected abstract T createInstance(ClassToInstanceMap<D> classToInstanceMap);
-
-    private void checkAllRequiredServices() {
-        for (final Class<? extends D> type : getRequiredDelegates()) {
-            Preconditions.checkState(delegates.get(type) != null, "Requires service %s is not defined.",type);
-        }
-    }
-
-    public final <V extends D> void addDelegate(final Class<V> type,final D impl) {
-        delegates.put(type,impl);
-    }
-
-    @Override
-    public final  T build() {
-        checkAllRequiredServices();
-        return createInstance(ImmutableClassToInstanceMap.<D,D>copyOf(delegates));
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterFactory.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterFactory.java
deleted file mode 100644 (file)
index 5656471..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.spi;
-
-import com.google.common.annotations.Beta;
-import org.eclipse.jdt.annotation.NonNullByDefault;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-
-@Beta
-@NonNullByDefault
-public interface AdapterFactory {
-    /**
-     * Return a {@link DataBroker} implementation backed by the specified {@link DOMDataBroker}.
-     *
-     * @param domBroker Backing DOMDataBroker
-     * @return A DataBroker instance.
-     * @throws NullPointerException if {@code domBroker} is null.
-     */
-    DataBroker createDataBroker(DOMDataBroker domBroker);
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterLoader.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/spi/AdapterLoader.java
deleted file mode 100644 (file)
index d956746..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.spi;
-
-import com.google.common.base.Optional;
-import com.google.common.cache.CacheLoader;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-
-public abstract class AdapterLoader<T, D> extends CacheLoader<Class<? extends T>, Optional<T>> {
-
-    @Override
-    public Optional<T> load(final Class<? extends T> key) {
-
-        final AdapterBuilder<? extends T, D> builder = createBuilder(key);
-        for (final Class<? extends D> reqDeleg : builder.getRequiredDelegates()) {
-            final D deleg = getDelegate(reqDeleg);
-            if (deleg != null) {
-                builder.addDelegate(reqDeleg, deleg);
-            } else {
-                return Optional.absent();
-            }
-        }
-        return  Optional.<T>of(builder.build());
-    }
-
-    protected abstract @Nullable D getDelegate(Class<? extends D> reqDeleg);
-
-    protected abstract @NonNull AdapterBuilder<? extends T, D> createBuilder(Class<? extends T> key);
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/RootBindingAwareBroker.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/sal/binding/impl/RootBindingAwareBroker.java
deleted file mode 100644 (file)
index fc53bbe..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.impl;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.util.AbstractBindingSalProviderInstance;
-import org.opendaylight.controller.md.sal.binding.util.BindingContextUtils;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.BindingAwareConsumer;
-import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
-import org.opendaylight.controller.sal.binding.api.BindingAwareService;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.NotificationService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.Mutable;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.osgi.framework.BundleContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@Deprecated
-public class RootBindingAwareBroker implements Mutable, Identifiable<String>, BindingAwareBroker, AutoCloseable,
-        RpcProviderRegistry {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RootBindingAwareBroker.class);
-
-    RootSalInstance controllerRoot;
-
-    private final String identifier;
-
-    private RpcProviderRegistry rpcBroker;
-
-    private NotificationProviderService notificationBroker;
-
-    private NotificationPublishService notificationPublishService;
-
-    private DataBroker dataBroker;
-
-    private ImmutableClassToInstanceMap<BindingAwareService> supportedConsumerServices;
-
-    private ImmutableClassToInstanceMap<BindingAwareService> supportedProviderServices;
-
-    private MountPointService mountService;
-
-    public RootBindingAwareBroker(final String instanceName) {
-        this.identifier = instanceName;
-    }
-
-    @Override
-    public String getIdentifier() {
-        return identifier;
-    }
-
-    public RootSalInstance getRoot() {
-        return controllerRoot;
-    }
-
-    public NotificationProviderService getNotificationBroker() {
-        return this.notificationBroker;
-    }
-
-    public NotificationPublishService getNotificationPublishService() {
-        return this.notificationPublishService;
-    }
-
-    public RpcProviderRegistry getRpcProviderRegistry() {
-        return this.rpcBroker;
-    }
-
-    public RpcProviderRegistry getRpcBroker() {
-        return rpcBroker;
-    }
-
-    public MountPointService getMountService() {
-        return mountService;
-    }
-
-    public void setDataBroker(final DataBroker asyncDataBroker) {
-        dataBroker = asyncDataBroker;
-    }
-
-    public void setMountService(final MountPointService mount) {
-        this.mountService = mount;
-    }
-
-    public void setRpcBroker(final RpcProviderRegistry rpcBroker) {
-        this.rpcBroker = rpcBroker;
-    }
-
-    public void setNotificationBroker(final NotificationProviderService notificationBroker) {
-        this.notificationBroker = notificationBroker;
-    }
-
-    public void setNotificationPublishService(final NotificationPublishService notificationPublishService) {
-        this.notificationPublishService = notificationPublishService;
-    }
-
-    public void start() {
-        checkState(controllerRoot == null, "Binding Aware Broker was already started.");
-        LOG.info("Starting Binding Aware Broker: {}", identifier);
-
-        controllerRoot = new RootSalInstance(getRpcProviderRegistry(), getNotificationBroker());
-
-        final ImmutableClassToInstanceMap.Builder<BindingAwareService> consBuilder = ImmutableClassToInstanceMap
-                .builder();
-
-        consBuilder.put(NotificationService.class, getRoot());
-        consBuilder.put(RpcConsumerRegistry.class, getRoot());
-        if (dataBroker != null) {
-            consBuilder.put(DataBroker.class, dataBroker);
-        }
-        consBuilder.put(MountPointService.class, mountService);
-
-        supportedConsumerServices = consBuilder.build();
-        final ImmutableClassToInstanceMap.Builder<BindingAwareService> provBuilder = ImmutableClassToInstanceMap
-                .builder();
-        provBuilder.putAll(supportedConsumerServices).put(NotificationProviderService.class, getRoot())
-                .put(RpcProviderRegistry.class, getRoot());
-        if (notificationPublishService != null) {
-            provBuilder.put(NotificationPublishService.class, notificationPublishService);
-        }
-
-        supportedProviderServices = provBuilder.build();
-    }
-
-    @Override
-    public ConsumerContext registerConsumer(final BindingAwareConsumer consumer, final BundleContext ctx) {
-        return registerConsumer(consumer);
-    }
-
-    @Override
-    public ConsumerContext registerConsumer(final BindingAwareConsumer consumer) {
-        checkState(supportedConsumerServices != null, "Broker is not initialized.");
-        return BindingContextUtils.createConsumerContextAndInitialize(consumer, supportedConsumerServices);
-    }
-
-    @Override
-    public ProviderContext registerProvider(final BindingAwareProvider provider, final BundleContext ctx) {
-        return registerProvider(provider);
-    }
-
-    @Override
-    public ProviderContext registerProvider(final BindingAwareProvider provider) {
-        checkState(supportedProviderServices != null, "Broker is not initialized.");
-        return BindingContextUtils.createProviderContextAndInitialize(provider, supportedProviderServices);
-    }
-
-    @Override
-    public void close() {
-        // FIXME: Close all sessions
-    }
-
-    @Override
-    public <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(final Class<T> type,
-            final T implementation) throws IllegalStateException {
-        return getRoot().addRoutedRpcImplementation(type, implementation);
-    }
-
-    @Override
-    public <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T implementation)
-            throws IllegalStateException {
-        return getRoot().addRpcImplementation(type, implementation);
-    }
-
-    @Override
-    public <T extends RpcService> T getRpcService(final Class<T> module) {
-        return getRoot().getRpcService(module);
-    }
-
-    @Override
-    public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L>
-            registerRouteChangeListener(final L listener) {
-        return getRoot().registerRouteChangeListener(listener);
-    }
-
-    public static class RootSalInstance extends
-            AbstractBindingSalProviderInstance<NotificationProviderService, RpcProviderRegistry> {
-
-        public RootSalInstance(final RpcProviderRegistry rpcRegistry,
-                final NotificationProviderService notificationBroker) {
-            super(rpcRegistry, notificationBroker);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/resources/OSGI-INF/blueprint/binding-broker.xml b/opendaylight/md-sal/sal-binding-broker/src/main/resources/OSGI-INF/blueprint/binding-broker.xml
deleted file mode 100644 (file)
index 8b13c00..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0">
-
-  <reference id="classLoadingStrategy" interface="org.opendaylight.mdsal.binding.generator.api.ClassLoadingStrategy" />
-  <reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService" />
-  <reference id="domRpcService" interface="org.opendaylight.controller.md.sal.dom.api.DOMRpcService"/>
-  <reference id="domRpcRegistry" interface="org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService"/>
-  <reference id="domMountPointService" interface="org.opendaylight.controller.md.sal.dom.api.DOMMountPointService"/>
-  <reference id="domNotificationService" interface="org.opendaylight.controller.md.sal.dom.api.DOMNotificationService"/>
-  <reference id="domNotificationPublishService" interface="org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService"/>
-  <reference id="domNotificationListenerRegistry" interface="org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListenerRegistry"/>
-  <reference id="domDefaultDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-          odl:type="default"/>
-  <reference id="domPingPongDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-          odl:type="pingpong"/>
-
-  <bean id="wiring" class="org.opendaylight.controller.md.sal.binding.impl.BindingBrokerWiring">
-    <argument ref="classLoadingStrategy"/>
-    <argument ref="schemaService"/>
-    <argument ref="domRpcService"/>
-    <argument ref="domRpcRegistry"/>
-    <argument ref="domMountPointService"/>
-    <argument ref="domNotificationService"/>
-    <argument ref="domNotificationPublishService"/>
-    <argument ref="domNotificationListenerRegistry"/>
-    <argument ref="domDefaultDataBroker"/>
-    <argument ref="domPingPongDataBroker"/>
-  </bean>
-
-  <!-- Runtime binding/normalized mapping service -->
-
-  <bean id="mappingCodec" factory-ref="wiring" factory-method="getBindingToNormalizedNodeCodec" />
-
-  <service ref="mappingCodec" odl:type="default">
-    <interfaces>
-      <value>org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer</value>
-      <value>org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTreeFactory</value>
-    </interfaces>
-  </service>
-
-  <!-- TODO: remove this service as it is referring to a class. To do that we need to migrate everyone over to 'factory' -->
-  <service id="codec" ref="mappingCodec" interface="org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec"/>
-
-  <!-- Adapter service -->
-  <bean id="adapterFactory" factory-ref="wiring" factory-method="getAdapterFactory" />
-
-  <service id="factory" ref="adapterFactory">
-    <interfaces>
-      <value>org.opendaylight.controller.md.sal.binding.spi.AdapterFactory</value>
-    </interfaces>
-  </service>
-
-  <!-- Binding RPC Registry Service -->
-
-  <bean id="bindingRpcRegistry" factory-ref="wiring" factory-method="getRpcProviderRegistry" />
-
-  <service ref="bindingRpcRegistry" interface="org.opendaylight.controller.sal.binding.api.RpcProviderRegistry"
-          odl:type="default"/>
-
-  <!-- Binding MountPoint Service -->
-
-  <bean id="bindingMountPointService" factory-ref="wiring" factory-method="getMountPointService" />
-
-  <service ref="bindingMountPointService" interface="org.opendaylight.controller.md.sal.binding.api.MountPointService"
-          odl:type="default"/>
-
-  <!-- Binding Notification Service -->
-
-  <bean id="bindingNotificationServiceAdapter" factory-ref="wiring" factory-method="getNotificationService" />
-  <service ref="bindingNotificationServiceAdapter" interface="org.opendaylight.controller.md.sal.binding.api.NotificationService"
-          odl:type="default"/>
-
-  <bean id="bindingNotificationPublishAdapter" factory-ref="wiring" factory-method="getNotificationPublishService" />
-  <service ref="bindingNotificationPublishAdapter" interface="org.opendaylight.controller.md.sal.binding.api.NotificationPublishService"
-          odl:type="default"/>
-
-  <bean id="notificationProviderService" factory-ref="wiring" factory-method="getNotificationProviderService" />
-  <service ref="notificationProviderService" interface="org.opendaylight.controller.sal.binding.api.NotificationProviderService" odl:type="default" />
-
-  <bean id="deprecatedNotificationService" factory-ref="wiring" factory-method="getNotificationProviderService" />
-  <service ref="deprecatedNotificationService" interface="org.opendaylight.controller.sal.binding.api.NotificationService" odl:type="default" />
-
-  <!-- Binding DataBroker -->
-
-  <bean id="bindingDataBroker" factory-ref="wiring" factory-method="getDataBroker" />
-
-  <service ref="bindingDataBroker" interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
-          odl:type="default"/>
-
-  <!-- Binding PingPong DataBroker -->
-
-  <bean id="bindingPingPongDataBroker" factory-ref="wiring" factory-method="getPingPongDataBroker" />
-
-  <service ref="bindingPingPongDataBroker" interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
-          odl:type="pingpong"/>
-
-  <!-- Legacy BindingAwareBroker -->
-
-  <bean id="bindingNotificationProviderService" class="org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceWithInterestListeners">
-    <argument ref="bindingNotificationPublishAdapter"/>
-    <argument ref="bindingNotificationServiceAdapter"/>
-    <argument ref="domNotificationListenerRegistry"/>
-  </bean>
-
-  <bean id="bindingAwareBroker" class="org.opendaylight.controller.sal.binding.impl.RootBindingAwareBroker"
-      init-method="start" destroy-method="close">
-    <argument value="deprecated-BindingAwareBroker"/>
-    <property name="dataBroker" ref="bindingDataBroker"/>
-    <property name="mountService" ref="bindingMountPointService"/>
-    <property name="rpcBroker" ref="bindingRpcRegistry"/>
-    <property name="notificationBroker" ref="bindingNotificationProviderService"/>
-    <property name="notificationPublishService" ref="bindingNotificationPublishAdapter"/>
-  </bean>
-
-  <service ref="bindingAwareBroker" interface="org.opendaylight.controller.sal.binding.api.BindingAwareBroker"/>
-</blueprint>
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapterTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapterTest.java
deleted file mode 100644 (file)
index 3e68e48..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static org.mockito.AdditionalMatchers.not;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-
-import java.util.Collection;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentMatchers;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Unit tests for BindingDOMDataTreeChangeServiceAdapter.
- *
- * @author Thomas Pantelis
- */
-public class BindingDOMDataTreeChangeServiceAdapterTest {
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-
-    @Mock
-    private DOMDataTreeChangeService mockDOMService;
-
-    @Mock
-    private GeneratedClassLoadingStrategy classLoadingStrategy;
-
-    @Mock
-    private BindingNormalizedNodeCodecRegistry codecRegistry;
-
-    @Mock
-    private YangInstanceIdentifier mockYangID;
-
-    @SuppressWarnings("rawtypes")
-    @Mock
-    private ListenerRegistration mockDOMReg;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        doReturn(this.mockYangID).when(this.codecRegistry).toYangInstanceIdentifier(TOP_PATH);
-    }
-
-    @Test
-    public void testRegisterDataTreeChangeListener() {
-        final BindingToNormalizedNodeCodec codec =
-                new BindingToNormalizedNodeCodec(this.classLoadingStrategy, this.codecRegistry);
-
-        final DataTreeChangeService service = BindingDOMDataTreeChangeServiceAdapter.create(codec, this.mockDOMService);
-
-        doReturn(this.mockDOMReg).when(this.mockDOMService).registerDataTreeChangeListener(
-                domDataTreeIdentifier(this.mockYangID),
-                any(DOMDataTreeChangeListener.class));
-        final DataTreeIdentifier<Top> treeId = new DataTreeIdentifier<>(LogicalDatastoreType.CONFIGURATION, TOP_PATH);
-        final TestClusteredDataTreeChangeListener mockClusteredListener = new TestClusteredDataTreeChangeListener();
-        service.registerDataTreeChangeListener(treeId , mockClusteredListener);
-
-        verify(this.mockDOMService).registerDataTreeChangeListener(domDataTreeIdentifier(this.mockYangID),
-                isA(ClusteredDOMDataTreeChangeListener.class));
-
-        reset(this.mockDOMService);
-        doReturn(this.mockDOMReg).when(this.mockDOMService).registerDataTreeChangeListener(
-                domDataTreeIdentifier(this.mockYangID), any(DOMDataTreeChangeListener.class));
-        final TestDataTreeChangeListener mockNonClusteredListener = new TestDataTreeChangeListener();
-        service.registerDataTreeChangeListener(treeId , mockNonClusteredListener);
-
-        verify(this.mockDOMService).registerDataTreeChangeListener(domDataTreeIdentifier(this.mockYangID),
-                not(isA(ClusteredDOMDataTreeChangeListener.class)));
-    }
-
-    static DOMDataTreeIdentifier domDataTreeIdentifier(final YangInstanceIdentifier yangID) {
-        return ArgumentMatchers.argThat(treeId -> treeId.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
-                        && yangID.equals(treeId.getRootIdentifier()));
-    }
-
-    private static class TestClusteredDataTreeChangeListener implements ClusteredDataTreeChangeListener<Top> {
-        @Override
-        public void onDataTreeChanged(final Collection<DataTreeModification<Top>> changes) {
-        }
-    }
-
-    private static class TestDataTreeChangeListener implements DataTreeChangeListener<Top> {
-        @Override
-        public void onDataTreeChanged(final Collection<DataTreeModification<Top>> changes) {
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingNormalizedCodecTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/BindingNormalizedCodecTest.java
deleted file mode 100644 (file)
index a72eba5..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import com.google.common.collect.ImmutableBiMap;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableSetMultimap;
-import com.google.common.collect.SetMultimap;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.lang.reflect.Method;
-import java.net.URI;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import javassist.ClassPool;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.test.AbstractSchemaAwareTest;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.DataObjectSerializerGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.util.JavassistUtils;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeLeafOnlyAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.model.util.AbstractSchemaContext;
-
-public class BindingNormalizedCodecTest extends AbstractSchemaAwareTest {
-
-    private static final TopLevelListKey TOP_FOO_KEY = new TopLevelListKey("foo");
-    private static final InstanceIdentifier<TopLevelList> BA_TOP_LEVEL_LIST = InstanceIdentifier
-            .builder(Top.class).child(TopLevelList.class, TOP_FOO_KEY).build();
-    private static final InstanceIdentifier<TreeLeafOnlyAugment> BA_TREE_LEAF_ONLY =
-            BA_TOP_LEVEL_LIST.augmentation(TreeLeafOnlyAugment.class);
-    private static final InstanceIdentifier<TreeComplexUsesAugment> BA_TREE_COMPLEX_USES =
-            BA_TOP_LEVEL_LIST.augmentation(TreeComplexUsesAugment.class);
-    private static final QName SIMPLE_VALUE_QNAME = QName.create(TreeComplexUsesAugment.QNAME, "simple-value");
-    private static final QName NAME_QNAME = QName.create(Top.QNAME, "name");
-    private static final YangInstanceIdentifier BI_TOP_LEVEL_LIST = YangInstanceIdentifier.builder()
-            .node(Top.QNAME).node(TopLevelList.QNAME).nodeWithKey(
-                    TopLevelList.QNAME, NAME_QNAME, TOP_FOO_KEY.getName()).build();
-
-
-    private BindingToNormalizedNodeCodec codec;
-    private SchemaContext context;
-
-    @Override
-    protected void setupWithSchema(final SchemaContext schemaContext) {
-        this.context = schemaContext;
-        final DataObjectSerializerGenerator streamWriter = StreamWriterGenerator
-                .create(JavassistUtils.forClassPool(ClassPool.getDefault()));
-        final BindingNormalizedNodeCodecRegistry registry = new BindingNormalizedNodeCodecRegistry(streamWriter);
-        this.codec = new BindingToNormalizedNodeCodec(GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy(),
-                registry, true);
-    }
-
-    @Test
-    public void testComplexAugmentationSerialization() {
-        this.codec.onGlobalContextUpdated(this.context);
-        final PathArgument lastArg = this.codec.toYangInstanceIdentifier(BA_TREE_COMPLEX_USES).getLastPathArgument();
-        assertTrue(lastArg instanceof AugmentationIdentifier);
-    }
-
-
-    @Test
-    public void testLeafOnlyAugmentationSerialization() {
-        this.codec.onGlobalContextUpdated(this.context);
-        final PathArgument leafOnlyLastArg = this.codec.toYangInstanceIdentifier(BA_TREE_LEAF_ONLY)
-                .getLastPathArgument();
-        assertTrue(leafOnlyLastArg instanceof AugmentationIdentifier);
-        assertTrue(((AugmentationIdentifier) leafOnlyLastArg).getPossibleChildNames().contains(SIMPLE_VALUE_QNAME));
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testToYangInstanceIdentifierBlocking() {
-        this.codec.onGlobalContextUpdated(new EmptySchemaContext());
-
-        final CountDownLatch done = new CountDownLatch(1);
-        final AtomicReference<YangInstanceIdentifier> yangId = new AtomicReference<>();
-        final AtomicReference<RuntimeException> error = new AtomicReference<>();
-
-        new Thread(() -> {
-            try {
-                yangId.set(BindingNormalizedCodecTest.this.codec.toYangInstanceIdentifierBlocking(BA_TOP_LEVEL_LIST));
-            } catch (RuntimeException e) {
-                error.set(e);
-            } finally {
-                done.countDown();
-            }
-        }).start();
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        this.codec.onGlobalContextUpdated(this.context);
-
-        assertEquals("toYangInstanceIdentifierBlocking completed", true,
-                Uninterruptibles.awaitUninterruptibly(done, 3, TimeUnit.SECONDS));
-        if (error.get() != null) {
-            throw error.get();
-        }
-
-        assertEquals("toYangInstanceIdentifierBlocking", BI_TOP_LEVEL_LIST, yangId.get());
-    }
-
-    @Test
-    public void testGetRpcMethodToSchemaPathWithNoInitialSchemaContext() {
-        testGetRpcMethodToSchemaPath();
-    }
-
-    @Test
-    public void testGetRpcMethodToSchemaPathBlocking() {
-        this.codec.onGlobalContextUpdated(new EmptySchemaContext());
-        testGetRpcMethodToSchemaPath();
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void testGetRpcMethodToSchemaPath() {
-        final CountDownLatch done = new CountDownLatch(1);
-        final AtomicReference<ImmutableBiMap<Method, SchemaPath>> retMap = new AtomicReference<>();
-        final AtomicReference<RuntimeException> error = new AtomicReference<>();
-        new Thread(() -> {
-            try {
-                retMap.set(BindingNormalizedCodecTest.this.codec.getRpcMethodToSchemaPath(
-                        OpendaylightTestRpcServiceService.class));
-            } catch (RuntimeException e) {
-                error.set(e);
-            } finally {
-                done.countDown();
-            }
-        }).start();
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        this.codec.onGlobalContextUpdated(this.context);
-
-        assertEquals("getRpcMethodToSchemaPath completed", true,
-                Uninterruptibles.awaitUninterruptibly(done, 3, TimeUnit.SECONDS));
-        if (error.get() != null) {
-            throw error.get();
-        }
-
-        for (final Method method: retMap.get().keySet()) {
-            if (method.getName().equals("rockTheHouse")) {
-                return;
-            }
-        }
-
-        fail("rockTheHouse RPC method not found");
-    }
-
-    static class EmptySchemaContext extends AbstractSchemaContext {
-        @Override
-        public Set<Module> getModules() {
-            return ImmutableSet.of();
-        }
-
-        @Override
-        protected Map<QNameModule, Module> getModuleMap() {
-            return ImmutableMap.of();
-        }
-
-        @Override
-        protected SetMultimap<URI, Module> getNamespaceToModules() {
-            return ImmutableSetMultimap.of();
-        }
-
-        @Override
-        protected SetMultimap<String, Module> getNameToModules() {
-            return ImmutableSetMultimap.of();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/ContextExtractorTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/ContextExtractorTest.java
deleted file mode 100644 (file)
index 3e1a688..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-
-import org.junit.Test;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.EncapsulatedRoute;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.EncapsulatedRouteInGrouping;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder;
-import org.opendaylight.yangtools.yang.binding.DataContainer;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public final class ContextExtractorTest {
-
-    public interface Transitive extends EncapsulatedRouteInGrouping {
-
-    }
-
-    private static final InstanceIdentifier<?> TEST_ROUTE = InstanceIdentifier.create(Top.class);
-    private static final Transitive TEST_GROUPING = new Transitive() {
-
-        @Override
-        public Class<? extends DataContainer> getImplementedInterface() {
-            return Transitive.class;
-        }
-
-        @Override
-        public EncapsulatedRoute getRoute() {
-            return new EncapsulatedRoute(TEST_ROUTE);
-        }
-    };
-
-    @Test
-    public void testNonRoutedExtraction() {
-        final ContextReferenceExtractor extractor = ContextReferenceExtractor.from(RockTheHouseInput.class);
-        final RockTheHouseInput input = new RockTheHouseInputBuilder().build();
-        final InstanceIdentifier<?> extractedValue = extractor.extract(input);
-        assertNull(extractedValue);
-    }
-
-    @Test
-    public void testRoutedSimpleExtraction() {
-        final ContextReferenceExtractor extractor = ContextReferenceExtractor.from(RoutedSimpleRouteInput.class);
-        final RoutedSimpleRouteInput input = new RoutedSimpleRouteInputBuilder().setRoute(TEST_ROUTE).build();
-        final InstanceIdentifier<?> extractedValue = extractor.extract(input);
-        assertSame(TEST_ROUTE,extractedValue);
-    }
-
-    @Test
-    public void testRoutedEncapsulatedExtraction() {
-        final ContextReferenceExtractor extractor = ContextReferenceExtractor.from(EncapsulatedRouteInGrouping.class);
-        final InstanceIdentifier<?> extractedValue = extractor.extract(TEST_GROUPING);
-        assertSame(TEST_ROUTE,extractedValue);
-
-    }
-
-    @Test
-    public void testRoutedEncapsulatedTransitiveExtraction() {
-        final ContextReferenceExtractor extractor = ContextReferenceExtractor.from(Transitive.class);
-        final InstanceIdentifier<?> extractedValue = extractor.extract(TEST_GROUPING);
-        assertSame(TEST_ROUTE,extractedValue);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BackwardsCompatibleNotificationBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BackwardsCompatibleNotificationBrokerTest.java
deleted file mode 100644 (file)
index 5b49bd7..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.collect.ImmutableList;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.test.AbstractNotificationBrokerTest;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.OpendaylightMdsalListTestListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelListChanged;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelListChangedBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-public class BackwardsCompatibleNotificationBrokerTest extends AbstractNotificationBrokerTest {
-
-    private NotificationProviderService notificationProviderService;
-
-    @Before
-    public void initTest() {
-        final NotificationService notificationService = getNotificationService();
-        final NotificationPublishService notificationPublishService = getNotificationPublishService();
-        notificationProviderService = new HeliumNotificationProviderServiceAdapter(notificationPublishService,
-                notificationService);
-    }
-
-    private TwoLevelListChanged createTestData() {
-        final TwoLevelListChangedBuilder tb = new TwoLevelListChangedBuilder();
-        tb.setTopLevelList(ImmutableList.of(new TopLevelListBuilder().withKey(new TopLevelListKey("test")).build()));
-        return tb.build();
-    }
-
-    @Test
-    public void testNotifSubscriptionForwarded() throws InterruptedException {
-        final CountDownLatch latch = new CountDownLatch(1);
-        final TwoLevelListChanged testData = createTestData();
-
-        final NotifTestListenerChild testNotifListener = new NotifTestListenerChild(latch);
-        final ListenerRegistration<NotificationListener> listenerRegistration =
-                notificationProviderService.registerNotificationListener(testNotifListener);
-        notificationProviderService.publish(testData);
-
-        latch.await(500L, TimeUnit.MILLISECONDS);
-        assertTrue(testNotifListener.getReceivedNotifications().size() == 1);
-        assertEquals(testData, testNotifListener.getReceivedNotifications().get(0));
-        listenerRegistration.close();
-    }
-
-    private static class NotifTestListenerChild extends  NotifTestListener {
-
-        NotifTestListenerChild(final CountDownLatch latch) {
-            super(latch);
-        }
-    }
-
-    private static class NotifTestListener implements OpendaylightMdsalListTestListener {
-        private final List<TwoLevelListChanged> receivedNotifications = new ArrayList<>();
-        private final CountDownLatch latch;
-
-        NotifTestListener(final CountDownLatch latch) {
-            this.latch = latch;
-        }
-
-        @Override
-        public void onTwoLevelListChanged(final TwoLevelListChanged notification) {
-            receivedNotifications.add(notification);
-            latch.countDown();
-        }
-
-        public List<TwoLevelListChanged> getReceivedNotifications() {
-            return receivedNotifications;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BindingDOMDataBrokerAdapterTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/BindingDOMDataBrokerAdapterTest.java
deleted file mode 100644 (file)
index b01340a..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.collect.ImmutableMap;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.binding.api.ClusteredDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class BindingDOMDataBrokerAdapterTest {
-
-    @Mock
-    DOMDataBroker dataBroker;
-
-    @Mock
-    GeneratedClassLoadingStrategy classLoadingStrategy;
-
-    @Mock
-    BindingNormalizedNodeCodecRegistry codecRegistry;
-
-    @Mock
-    DOMDataTreeChangeService dataTreeChangeService;
-
-    @Mock
-    ListenerRegistration<DOMDataTreeChangeListener> listenerRegistration;
-
-    @Mock
-    ClusteredDataTreeChangeListener<Top> clusteredDataTreeChangeListener;
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @Test
-    public void testClusteredDataTreeChangeListenerRegisteration() {
-
-        doReturn(YangInstanceIdentifier.of(Top.QNAME)).when(codecRegistry).toYangInstanceIdentifier(TOP_PATH);
-
-        doReturn(listenerRegistration).when(dataTreeChangeService).registerDataTreeChangeListener(any(), any());
-
-        doReturn(ImmutableMap.of(DOMDataTreeChangeService.class, dataTreeChangeService))
-            .when(dataBroker).getSupportedExtensions();
-
-        final BindingToNormalizedNodeCodec codec =
-                new BindingToNormalizedNodeCodec(this.classLoadingStrategy, this.codecRegistry);
-
-        try (BindingDOMDataBrokerAdapter bindingDOMDataBrokerAdapter = new BindingDOMDataBrokerAdapter(this.dataBroker,
-                codec)) {
-
-            ListenerRegistration<ClusteredDataTreeChangeListener<Top>> bindingListenerReg =
-                bindingDOMDataBrokerAdapter.registerDataTreeChangeListener(
-                    new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, TOP_PATH),
-                    clusteredDataTreeChangeListener);
-
-            verify(dataTreeChangeService).registerDataTreeChangeListener(
-                eq(new DOMDataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(Top.QNAME))),
-                any(ClusteredDOMDataTreeChangeListener.class));
-
-            bindingListenerReg.close();
-
-            verify(listenerRegistration).close();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1125RegressionTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1125RegressionTest.java
deleted file mode 100644 (file)
index cf278bd..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-/**
- * Regression test suite for Bug 1125 - Can't detect switch disconnection
- * https://bugs.opendaylight.org/show_bug.cgi?id=1125.
- */
-public class Bug1125RegressionTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier
-            .create(Top.class);
-    private static final InstanceIdentifier<TopLevelList> TOP_FOO_PATH = TOP_PATH
-            .child(TopLevelList.class, TOP_FOO_KEY);
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> FOO_AUGMENT_PATH = TOP_FOO_PATH
-            .augmentation(TreeComplexUsesAugment.class);
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> WILDCARDED_AUGMENT_PATH = TOP_PATH
-            .child(TopLevelList.class).augmentation(
-                    TreeComplexUsesAugment.class);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class));
-    }
-
-    private TreeComplexUsesAugment writeInitialState() {
-        WriteTransaction initialTx = getDataBroker().newWriteOnlyTransaction();
-        initialTx.put(LogicalDatastoreType.OPERATIONAL, TOP_PATH,
-                new TopBuilder().build());
-        TreeComplexUsesAugment fooAugment = new TreeComplexUsesAugmentBuilder()
-                .setContainerWithUses(
-                        new ContainerWithUsesBuilder().setLeafFromGrouping(
-                                "foo").build()).build();
-        initialTx.put(LogicalDatastoreType.OPERATIONAL, path(TOP_FOO_KEY),
-                topLevelList(TOP_FOO_KEY, fooAugment));
-        assertCommit(initialTx.submit());
-        return fooAugment;
-    }
-
-    private void delete(final InstanceIdentifier<?> path) {
-        WriteTransaction tx = getDataBroker().newWriteOnlyTransaction();
-        tx.delete(LogicalDatastoreType.OPERATIONAL, path);
-        assertCommit(tx.submit());
-    }
-
-    private void deleteAndListenAugment(final InstanceIdentifier<?> path) {
-        TreeComplexUsesAugment augment = writeInitialState();
-        TestListener<TreeComplexUsesAugment> listener = createListener(LogicalDatastoreType.OPERATIONAL,
-                WILDCARDED_AUGMENT_PATH, added(FOO_AUGMENT_PATH, augment), deleted(FOO_AUGMENT_PATH, augment));
-        delete(path);
-        listener.verify();
-    }
-
-    @Test
-    public void deleteAndListenAugment() {
-        deleteAndListenAugment(TOP_PATH);
-
-        deleteAndListenAugment(TOP_FOO_PATH);
-
-        deleteAndListenAugment(FOO_AUGMENT_PATH);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1333DataChangeListenerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1333DataChangeListenerTest.java
deleted file mode 100644 (file)
index 1665bef..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_ONE_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_TWO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUses;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-/**
- * This testsuite tries to replicate bug 1333 and tests regresion of it
- * using test-model with similar construction as one reported.
- *
- * <p>
- * See  https://bugs.opendaylight.org/show_bug.cgi?id=1333 for Bug Description
- */
-public class Bug1333DataChangeListenerTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> AUGMENT_WILDCARD =
-            TOP_PATH.child(TopLevelList.class).augmentation(TreeComplexUsesAugment.class);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class));
-    }
-
-    private Top topWithListItem() {
-        return top(topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_ONE_KEY, USES_TWO_KEY)));
-    }
-
-    public Top writeTopWithListItem(final LogicalDatastoreType store) {
-        ReadWriteTransaction tx = getDataBroker().newReadWriteTransaction();
-        Top topItem = topWithListItem();
-        tx.put(store, TOP_PATH, topItem);
-        assertCommit(tx.submit());
-        return topItem;
-    }
-
-    public void deleteItem(final LogicalDatastoreType store, final InstanceIdentifier<?> path) {
-        ReadWriteTransaction tx = getDataBroker().newReadWriteTransaction();
-        tx.delete(store, path);
-        assertCommit(tx.submit());
-    }
-
-    @Test
-    public void writeTopWithListItemAugmentedListenTopSubtree() {
-        TestListener<Top> listener = createListener(CONFIGURATION, TOP_PATH, added(TOP_PATH, topWithListItem()));
-
-        writeTopWithListItem(CONFIGURATION);
-
-        listener.verify();
-    }
-
-    @Test
-    public void writeTopWithListItemAugmentedListenAugmentSubtreeWildcarded() {
-        TestListener<TreeComplexUsesAugment> listener = createListener(CONFIGURATION, AUGMENT_WILDCARD,
-                added(path(TOP_FOO_KEY, TreeComplexUsesAugment.class), complexUsesAugment(USES_ONE_KEY, USES_TWO_KEY)));
-
-        writeTopWithListItem(CONFIGURATION);
-
-        listener.verify();
-    }
-
-    @Test
-    public void deleteAugmentChildListenTopSubtree() {
-        Top top = writeTopWithListItem(CONFIGURATION);
-
-        TestListener<Top> listener = createListener(CONFIGURATION, TOP_PATH, added(TOP_PATH, top),
-                subtreeModified(TOP_PATH, top, top(topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_TWO_KEY)))));
-
-        InstanceIdentifier<ListViaUses> deletePath = path(TOP_FOO_KEY, USES_ONE_KEY);
-        deleteItem(CONFIGURATION, deletePath);
-
-        listener.verify();
-    }
-
-    @Test
-    public void deleteAugmentChildListenAugmentSubtreeWildcarded() {
-        writeTopWithListItem(CONFIGURATION);
-
-        TestListener<TreeComplexUsesAugment> listener = createListener(CONFIGURATION, AUGMENT_WILDCARD,
-                added(path(TOP_FOO_KEY, TreeComplexUsesAugment.class), complexUsesAugment(USES_ONE_KEY, USES_TWO_KEY)),
-                subtreeModified(path(TOP_FOO_KEY, TreeComplexUsesAugment.class),
-                    complexUsesAugment(USES_ONE_KEY, USES_TWO_KEY), complexUsesAugment(USES_TWO_KEY)));
-
-        InstanceIdentifier<?> deletePath = path(TOP_FOO_KEY, USES_ONE_KEY);
-        deleteItem(CONFIGURATION, deletePath);
-
-        listener.verify();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1418AugmentationTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug1418AugmentationTest.java
deleted file mode 100644 (file)
index f106742..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.leafOnlyUsesAugment;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeLeafOnlyUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUsesKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class Bug1418AugmentationTest extends AbstractDataTreeChangeListenerTest {
-    private static final InstanceIdentifier<Top> TOP = InstanceIdentifier.create(Top.class);
-    private static final InstanceIdentifier<TopLevelList> TOP_FOO = TOP.child(TopLevelList.class, TOP_FOO_KEY);
-    private static final InstanceIdentifier<TreeLeafOnlyUsesAugment> SIMPLE_AUGMENT =
-            TOP.child(TopLevelList.class, TOP_FOO_KEY).augmentation(TreeLeafOnlyUsesAugment.class);
-    private static final InstanceIdentifier<TreeComplexUsesAugment> COMPLEX_AUGMENT =
-            TOP.child(TopLevelList.class, TOP_FOO_KEY).augmentation(TreeComplexUsesAugment.class);
-    private static final ListViaUsesKey LIST_VIA_USES_KEY =
-            new ListViaUsesKey("list key");
-    private static final ListViaUsesKey LIST_VIA_USES_KEY_MOD =
-            new ListViaUsesKey("list key modified");
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class),
-                BindingReflections.getModuleInfo(TreeLeafOnlyUsesAugment.class));
-    }
-
-    @Test
-    public void leafOnlyAugmentationCreatedTest() {
-        TreeLeafOnlyUsesAugment leafOnlyUsesAugment = leafOnlyUsesAugment("test leaf");
-        final TestListener<TreeLeafOnlyUsesAugment> listener = createListener(CONFIGURATION, SIMPLE_AUGMENT,
-                added(path(TOP_FOO_KEY, TreeLeafOnlyUsesAugment.class), leafOnlyUsesAugment));
-
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, TOP, top());
-        writeTx.put(CONFIGURATION, TOP_FOO, topLevelList(new TopLevelListKey(TOP_FOO_KEY)));
-        writeTx.put(CONFIGURATION, SIMPLE_AUGMENT, leafOnlyUsesAugment);
-        assertCommit(writeTx.submit());
-
-        listener.verify();
-    }
-
-    @Test
-    public void leafOnlyAugmentationUpdatedTest() {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, TOP, top());
-        writeTx.put(CONFIGURATION, TOP_FOO, topLevelList(new TopLevelListKey(TOP_FOO_KEY)));
-        TreeLeafOnlyUsesAugment leafOnlyUsesAugmentBefore = leafOnlyUsesAugment("test leaf");
-        writeTx.put(CONFIGURATION, SIMPLE_AUGMENT, leafOnlyUsesAugmentBefore);
-        assertCommit(writeTx.submit());
-
-        TreeLeafOnlyUsesAugment leafOnlyUsesAugmentAfter = leafOnlyUsesAugment("test leaf changed");
-        final TestListener<TreeLeafOnlyUsesAugment> listener = createListener(CONFIGURATION, SIMPLE_AUGMENT,
-                added(path(TOP_FOO_KEY, TreeLeafOnlyUsesAugment.class), leafOnlyUsesAugmentBefore),
-                replaced(path(TOP_FOO_KEY, TreeLeafOnlyUsesAugment.class), leafOnlyUsesAugmentBefore,
-                    leafOnlyUsesAugmentAfter));
-
-        writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, SIMPLE_AUGMENT, leafOnlyUsesAugmentAfter);
-        assertCommit(writeTx.submit());
-
-        listener.verify();
-    }
-
-    @Test
-    public void leafOnlyAugmentationDeletedTest() {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, TOP, top());
-        writeTx.put(CONFIGURATION, TOP_FOO, topLevelList(new TopLevelListKey(TOP_FOO_KEY)));
-        TreeLeafOnlyUsesAugment leafOnlyUsesAugment = leafOnlyUsesAugment("test leaf");
-        writeTx.put(CONFIGURATION, SIMPLE_AUGMENT, leafOnlyUsesAugment);
-        assertCommit(writeTx.submit());
-
-        final TestListener<TreeLeafOnlyUsesAugment> listener = createListener(CONFIGURATION, SIMPLE_AUGMENT,
-                added(path(TOP_FOO_KEY, TreeLeafOnlyUsesAugment.class), leafOnlyUsesAugment),
-                deleted(path(TOP_FOO_KEY, TreeLeafOnlyUsesAugment.class), leafOnlyUsesAugment));
-
-        writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.delete(CONFIGURATION, SIMPLE_AUGMENT);
-        assertCommit(writeTx.submit());
-
-        listener.verify();
-    }
-
-    @Test
-    public void complexAugmentationCreatedTest() {
-        TreeComplexUsesAugment complexUsesAugment = complexUsesAugment(LIST_VIA_USES_KEY);
-        final TestListener<TreeComplexUsesAugment>  listener = createListener(CONFIGURATION, COMPLEX_AUGMENT,
-                added(path(TOP_FOO_KEY, TreeComplexUsesAugment.class), complexUsesAugment));
-
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, TOP, top());
-        writeTx.put(CONFIGURATION, TOP_FOO, topLevelList(new TopLevelListKey(TOP_FOO_KEY)));
-        writeTx.put(CONFIGURATION, COMPLEX_AUGMENT, complexUsesAugment);
-        assertCommit(writeTx.submit());
-
-        listener.verify();
-    }
-
-    @Test
-    public void complexAugmentationUpdatedTest() {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, TOP, top());
-        writeTx.put(CONFIGURATION, TOP_FOO, topLevelList(new TopLevelListKey(TOP_FOO_KEY)));
-        TreeComplexUsesAugment complexUsesAugmentBefore = complexUsesAugment(LIST_VIA_USES_KEY);
-        writeTx.put(CONFIGURATION, COMPLEX_AUGMENT, complexUsesAugmentBefore);
-        assertCommit(writeTx.submit());
-
-        TreeComplexUsesAugment complexUsesAugmentAfter = complexUsesAugment(LIST_VIA_USES_KEY_MOD);
-
-        final TestListener<TreeComplexUsesAugment> listener = createListener(CONFIGURATION, COMPLEX_AUGMENT,
-                added(path(TOP_FOO_KEY, TreeComplexUsesAugment.class), complexUsesAugmentBefore),
-                replaced(path(TOP_FOO_KEY, TreeComplexUsesAugment.class), complexUsesAugmentBefore,
-                        complexUsesAugmentAfter));
-
-        writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(CONFIGURATION, COMPLEX_AUGMENT, complexUsesAugmentAfter);
-        assertCommit(writeTx.submit());
-
-        listener.verify();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug2562DeserializedUnkeyedListTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug2562DeserializedUnkeyedListTest.java
deleted file mode 100644 (file)
index bb3fbc2..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Arrays;
-import java.util.Set;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.Root;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.RootBuilder;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.root.Fooroot;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.root.FoorootBuilder;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.root.fooroot.Barroot;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.root.fooroot.BarrootBuilder;
-import org.opendaylight.yang.gen.v1.opendaylight.test.bug._2562.namespace.rev160101.root.fooroot.BarrootKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class Bug2562DeserializedUnkeyedListTest extends AbstractDataTreeChangeListenerTest {
-    private static final InstanceIdentifier<Root> ROOT_PATH = InstanceIdentifier.create(Root.class);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Root.class));
-    }
-
-    @Test
-    public void writeListToList2562Root() {
-        final Barroot barRoot = new BarrootBuilder().setType(2).setValue(2).withKey(new BarrootKey(2)).build();
-        final Fooroot fooRoot = new FoorootBuilder().setBarroot(Arrays.asList(barRoot)).build();
-        final Root root = new RootBuilder().setFooroot(Arrays.asList(fooRoot)).build();
-
-        final TestListener<Root> listenerRoot = createListener(LogicalDatastoreType.CONFIGURATION, ROOT_PATH,
-                added(ROOT_PATH, root));
-
-        final ReadWriteTransaction readWriteTransaction = getDataBroker().newReadWriteTransaction();
-        readWriteTransaction.put(LogicalDatastoreType.CONFIGURATION, ROOT_PATH, root);
-        assertCommit(readWriteTransaction.submit());
-
-        listenerRoot.verify();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug3090MultiKeyList.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug3090MultiKeyList.java
deleted file mode 100644 (file)
index bf6f1b2..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Objects;
-import java.util.Set;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.opendaylight.test.bug._3090.rev160101.Root;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.opendaylight.test.bug._3090.rev160101.RootBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.opendaylight.test.bug._3090.rev160101.root.ListInRoot;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.opendaylight.test.bug._3090.rev160101.root.ListInRootBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class Bug3090MultiKeyList extends AbstractDataTreeChangeListenerTest {
-    private static final InstanceIdentifier<Root> ROOT_PATH = InstanceIdentifier.create(Root.class);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Root.class));
-    }
-
-    @Test
-    public void listWithMultiKeyTest() {
-        final List<ListInRoot> listInRoots = new ArrayList<>();
-        for (int i = 0; i < 10; i++) {
-            listInRoots.add(new ListInRootBuilder()
-                .setLeafA("leaf a" + i)
-                .setLeafC("leaf c" + i)
-                .setLeafB("leaf b" + i)
-                .build()
-            );
-        }
-
-        final Root root = new RootBuilder().setListInRoot(listInRoots).build();
-
-        final TestListener<Root> listener = createListener(LogicalDatastoreType.CONFIGURATION, ROOT_PATH,
-                match(ModificationType.WRITE, ROOT_PATH, Objects::isNull,
-                        (Function<Root, Boolean>) dataAfter -> checkData(root, dataAfter)));
-
-        final ReadWriteTransaction readWriteTransaction = getDataBroker().newReadWriteTransaction();
-        readWriteTransaction.put(LogicalDatastoreType.CONFIGURATION, ROOT_PATH, root);
-        assertCommit(readWriteTransaction.submit());
-
-        listener.verify();
-    }
-
-    private static boolean checkData(final Root expected, final Root actual) {
-        if (actual == null) {
-            return false;
-        }
-
-        Set<ListInRoot> expListInRoot = new HashSet<>(expected.getListInRoot());
-        Set<ListInRoot> actualListInRoot = actual.getListInRoot().stream()
-                .map(list -> new ListInRootBuilder(list).build()).collect(Collectors.toSet());
-        return expListInRoot.equals(actualListInRoot);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug4513Test.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/Bug4513Test.java
deleted file mode 100644 (file)
index 5414a14..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc., Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import java.util.Arrays;
-import java.util.Collection;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataBrokerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.listener.rev150825.ListenerTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.listener.rev150825.ListenerTestBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.listener.rev150825.listener.test.ListItem;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.listener.rev150825.listener.test.ListItemBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Regression test suite for https://bugs.opendaylight.org/show_bug.cgi?id=4513 - Change event is empty when
- * Homogeneous composite key is used homogeneous composite key is used.
- */
-public class Bug4513Test extends AbstractDataBrokerTest {
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    @Test
-    public void testDataTreeChangeListener() {
-        DataBroker dataBroker = getDataBroker();
-
-        DataTreeChangeListener<ListItem> listener = mock(DataTreeChangeListener.class);
-        InstanceIdentifier<ListItem> wildCard = InstanceIdentifier.builder(ListenerTest.class)
-                .child(ListItem.class).build();
-        ListenerRegistration<DataTreeChangeListener<ListItem>> reg = dataBroker.registerDataTreeChangeListener(
-                new DataTreeIdentifier(LogicalDatastoreType.OPERATIONAL, wildCard), listener);
-
-        final ListItem item = writeListItem();
-
-        ArgumentCaptor<Collection> captor = ArgumentCaptor.forClass(Collection.class);
-
-        verify(listener, timeout(100)).onDataTreeChanged(captor.capture());
-
-        Collection<DataTreeModification<ListItem>> mods = captor.getValue();
-        assertEquals("ListItem", item, mods.iterator().next().getRootNode().getDataAfter());
-    }
-
-    private ListItem writeListItem() {
-        WriteTransaction writeTransaction = getDataBroker().newWriteOnlyTransaction();
-        final ListItem item = new ListItemBuilder().setSip("name").setOp(43L).build();
-        ListenerTestBuilder builder = new ListenerTestBuilder().setListItem(Arrays.asList(item));
-        writeTransaction.put(LogicalDatastoreType.OPERATIONAL, InstanceIdentifier.builder(
-                ListenerTest.class).build(), builder.build());
-        assertCommit(writeTransaction.submit());
-        return item;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java
deleted file mode 100644 (file)
index 91d86c1..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_ONE_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Matchers;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
-import org.opendaylight.controller.md.sal.binding.test.AbstractConcurrentDataBrokerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class DataTreeChangeListenerTest extends AbstractConcurrentDataBrokerTest {
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-    private static final PathArgument TOP_ARGUMENT = TOP_PATH.getPathArguments().iterator().next();
-    private static final InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
-    private static final PathArgument FOO_ARGUMENT = Iterables.getLast(FOO_PATH.getPathArguments());
-    private static final TopLevelList FOO_DATA = topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_ONE_KEY));
-    private static final InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
-    private static final PathArgument BAR_ARGUMENT = Iterables.getLast(BAR_PATH.getPathArguments());
-    private static final TopLevelList BAR_DATA = topLevelList(TOP_BAR_KEY);
-    private static final DataTreeIdentifier<Top> TOP_IDENTIFIER =
-            new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, TOP_PATH);
-
-    private static final Top TOP_INITIAL_DATA = top(FOO_DATA);
-
-    private BindingDOMDataBrokerAdapter dataBrokerImpl;
-
-    private static final class EventCapturingListener<T extends DataObject> implements DataTreeChangeListener<T> {
-
-        private SettableFuture<Collection<DataTreeModification<T>>> changes = SettableFuture.create();
-
-        @Override
-        public void onDataTreeChanged(final Collection<DataTreeModification<T>> modification) {
-            this.changes.set(modification);
-
-        }
-
-        Collection<DataTreeModification<T>> nextEvent() throws Exception {
-            final Collection<DataTreeModification<T>> result = changes.get(200,TimeUnit.MILLISECONDS);
-            changes = SettableFuture.create();
-            return result;
-        }
-    }
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(
-                BindingReflections.getModuleInfo(TwoLevelList.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class)
-                );
-    }
-
-    @Before
-    public void setupWithDataBroker() {
-        dataBrokerImpl = (BindingDOMDataBrokerAdapter) getDataBroker();
-    }
-
-    @Test
-    public void testTopLevelListener() throws Exception {
-        final EventCapturingListener<Top> listener = new EventCapturingListener<>();
-        dataBrokerImpl.registerDataTreeChangeListener(TOP_IDENTIFIER, listener);
-
-        createAndVerifyTop(listener);
-
-        putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
-        final DataObjectModification<Top> afterBarPutEvent = Iterables.getOnlyElement(listener.nextEvent())
-                .getRootNode();
-        verifyModification(afterBarPutEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
-        final DataObjectModification<TopLevelList> barPutMod = afterBarPutEvent
-                .getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
-        assertNotNull(barPutMod);
-        verifyModification(barPutMod, BAR_ARGUMENT, ModificationType.WRITE);
-
-        deleteTx(BAR_PATH).submit().checkedGet();
-        final DataObjectModification<Top> afterBarDeleteEvent = Iterables.getOnlyElement(listener.nextEvent())
-                .getRootNode();
-        verifyModification(afterBarDeleteEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
-        final DataObjectModification<TopLevelList> barDeleteMod = afterBarDeleteEvent
-                .getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
-        verifyModification(barDeleteMod, BAR_ARGUMENT, ModificationType.DELETE);
-    }
-
-    @Test
-    public void testWildcardedListListener() throws Exception {
-        final EventCapturingListener<TopLevelList> listener = new EventCapturingListener<>();
-        final DataTreeIdentifier<TopLevelList> wildcard = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL,
-                TOP_PATH.child(TopLevelList.class));
-        dataBrokerImpl.registerDataTreeChangeListener(wildcard, listener);
-
-        putTx(TOP_PATH, TOP_INITIAL_DATA).submit().checkedGet();
-
-        final DataTreeModification<TopLevelList> fooWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
-        assertEquals(FOO_PATH, fooWriteEvent.getRootPath().getRootIdentifier());
-        verifyModification(fooWriteEvent.getRootNode(), FOO_ARGUMENT, ModificationType.WRITE);
-
-        putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
-        final DataTreeModification<TopLevelList> barWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
-        assertEquals(BAR_PATH, barWriteEvent.getRootPath().getRootIdentifier());
-        verifyModification(barWriteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.WRITE);
-
-        deleteTx(BAR_PATH).submit().checkedGet();
-        final DataTreeModification<TopLevelList> barDeleteEvent = Iterables.getOnlyElement(listener.nextEvent());
-        assertEquals(BAR_PATH, barDeleteEvent.getRootPath().getRootIdentifier());
-        verifyModification(barDeleteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.DELETE);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testWildcardNotificationOfPreexistingData() {
-        InstanceIdentifier<Top> id = InstanceIdentifier.builder(Top.class).build();
-        ArrayList<TopLevelList> list = new ArrayList<>();
-        list.add(new TopLevelListBuilder().setName("name").build());
-        TopBuilder builder = new TopBuilder().setTopLevelList(list);
-
-        DataBroker dataBroker = getDataBroker();
-
-        WriteTransaction writeTransaction = dataBroker.newWriteOnlyTransaction();
-        writeTransaction.put(LogicalDatastoreType.OPERATIONAL, id, builder.build());
-        assertCommit(writeTransaction.submit());
-
-        DataTreeChangeListener<TopLevelList> listener = mock(DataTreeChangeListener.class);
-        InstanceIdentifier<TopLevelList> wildcard = InstanceIdentifier.builder(Top.class).child(TopLevelList.class)
-                .build();
-        dataBroker.registerDataTreeChangeListener(new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, wildcard),
-                listener);
-
-        verify(listener, timeout(1000)).onDataTreeChanged(Matchers.anyObject());
-    }
-
-    private void createAndVerifyTop(final EventCapturingListener<Top> listener) throws Exception {
-        putTx(TOP_PATH,TOP_INITIAL_DATA).submit().checkedGet();
-        final Collection<DataTreeModification<Top>> events = listener.nextEvent();
-
-        assertFalse("Non empty collection should be received.",events.isEmpty());
-        final DataTreeModification<Top> initialWrite = Iterables.getOnlyElement(events);
-        final DataObjectModification<? extends DataObject> initialNode = initialWrite.getRootNode();
-        verifyModification(initialNode,TOP_PATH.getPathArguments().iterator().next(),ModificationType.WRITE);
-        assertEquals(TOP_INITIAL_DATA, initialNode.getDataAfter());
-    }
-
-    private void verifyModification(final DataObjectModification<? extends DataObject> barWrite,
-            final PathArgument pathArg, final ModificationType eventType) {
-        assertEquals(pathArg.getType(), barWrite.getDataType());
-        assertEquals(eventType,barWrite.getModificationType());
-        assertEquals(pathArg, barWrite.getIdentifier());
-    }
-
-    private <T extends DataObject> WriteTransaction putTx(final InstanceIdentifier<T> path,final T data) {
-        final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
-        tx.put(LogicalDatastoreType.OPERATIONAL, path, data);
-        return tx;
-    }
-
-    private WriteTransaction deleteTx(final InstanceIdentifier<?> path) {
-        final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
-        tx.delete(LogicalDatastoreType.OPERATIONAL, path);
-        return tx;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedNotificationAdapterTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedNotificationAdapterTest.java
deleted file mode 100644 (file)
index 7eef5db..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.test.AbstractNotificationBrokerTest;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.OpendaylightMdsalListTestListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelListChanged;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelListChangedBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ForwardedNotificationAdapterTest extends AbstractNotificationBrokerTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ForwardedNotificationAdapterTest.class);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(TwoLevelListChanged.class));
-
-    }
-
-    private static TwoLevelListChanged createTestData() {
-        final TwoLevelListChangedBuilder tb = new TwoLevelListChangedBuilder();
-        tb.setTopLevelList(ImmutableList.of(new TopLevelListBuilder().withKey(new TopLevelListKey("test")).build()));
-        return tb.build();
-    }
-
-    @Test
-    public void testNotifSubscription() throws InterruptedException {
-        final CountDownLatch latch = new CountDownLatch(1);
-        final TwoLevelListChanged testData = createTestData();
-
-        final TestNotifListener testNotifListener = new TestNotifListener(latch);
-        final ListenerRegistration<TestNotifListener> listenerRegistration = getNotificationService()
-                .registerNotificationListener(testNotifListener);
-        getNotificationPublishService().putNotification(testData);
-
-        latch.await();
-        assertTrue(testNotifListener.getReceivedNotifications().size() == 1);
-        assertEquals(testData, testNotifListener.getReceivedNotifications().get(0));
-
-        listenerRegistration.close();
-    }
-
-    @Test
-    public void testNotifSubscription2() throws InterruptedException {
-        final CountDownLatch latch = new CountDownLatch(1);
-        final TwoLevelListChanged testData = createTestData();
-
-        final TestNotifListener testNotifListener = new TestNotifListener(latch);
-        final ListenerRegistration<TestNotifListener> listenerRegistration = getNotificationService()
-                .registerNotificationListener(testNotifListener);
-        try {
-            getNotificationPublishService().offerNotification(testData).get(1, TimeUnit.SECONDS);
-        } catch (ExecutionException | TimeoutException e) {
-            LOG.error("Notification delivery failed", e);
-            Assert.fail("notification should be delivered");
-        }
-
-        latch.await();
-        assertTrue(testNotifListener.getReceivedNotifications().size() == 1);
-        assertEquals(testData, testNotifListener.getReceivedNotifications().get(0));
-
-        listenerRegistration.close();
-    }
-
-    @Test
-    public void testNotifSubscription3() throws InterruptedException {
-        final CountDownLatch latch = new CountDownLatch(1);
-        final TwoLevelListChanged testData = createTestData();
-
-        final TestNotifListener testNotifListener = new TestNotifListener(latch);
-        final ListenerRegistration<TestNotifListener> listenerRegistration = getNotificationService()
-                .registerNotificationListener(testNotifListener);
-        assertNotSame(NotificationPublishService.REJECTED,
-                getNotificationPublishService().offerNotification(testData, 5, TimeUnit.SECONDS));
-
-        latch.await();
-        assertTrue(testNotifListener.getReceivedNotifications().size() == 1);
-        assertEquals(testData, testNotifListener.getReceivedNotifications().get(0));
-
-        listenerRegistration.close();
-    }
-
-    private static class TestNotifListener implements OpendaylightMdsalListTestListener {
-        private final List<TwoLevelListChanged> receivedNotifications = new ArrayList<>();
-        private final CountDownLatch latch;
-
-        TestNotifListener(final CountDownLatch latch) {
-            this.latch = latch;
-        }
-
-        @Override
-        public void onTwoLevelListChanged(final TwoLevelListChanged notification) {
-            receivedNotifications.add(notification);
-            latch.countDown();
-        }
-
-        public List<TwoLevelListChanged> getReceivedNotifications() {
-            return receivedNotifications;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ListInsertionDataChangeListenerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ListInsertionDataChangeListenerTest.java
deleted file mode 100644 (file)
index 143cb8e..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.HashSet;
-import java.util.Objects;
-import java.util.Set;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-/**
- * This testsuite tests explanation for data change scope and data modifications
- * which were described in
- * https://lists.opendaylight.org/pipermail/controller-dev/2014-July/005541.html.
- */
-public class ListInsertionDataChangeListenerTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final InstanceIdentifier<Top> TOP = InstanceIdentifier.create(Top.class);
-    private static final InstanceIdentifier<TopLevelList> WILDCARDED = TOP.child(TopLevelList.class);
-    private static final InstanceIdentifier<TopLevelList> TOP_FOO = TOP.child(TopLevelList.class, TOP_FOO_KEY);
-    private static final InstanceIdentifier<TopLevelList> TOP_BAR = TOP.child(TopLevelList.class, TOP_BAR_KEY);
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class));
-    }
-
-    @Before
-    public void setupWithDataBroker() {
-        WriteTransaction initialTx = getDataBroker().newWriteOnlyTransaction();
-        initialTx.put(CONFIGURATION, TOP, top(topLevelList(TOP_FOO_KEY)));
-        assertCommit(initialTx.submit());
-    }
-
-    @Test
-    public void replaceTopNodeSubtreeListeners() {
-        final TopLevelList topBar = topLevelList(TOP_BAR_KEY);
-        final Top top = top(topBar);
-        final TopLevelList topFoo = topLevelList(TOP_FOO_KEY);
-
-        // Listener for TOP element
-        final TestListener<Top> topListener = createListener(CONFIGURATION, TOP,
-                added(TOP, top(topLevelList(TOP_FOO_KEY))), replaced(TOP, top(topFoo), top));
-
-        // Listener for all list items. This one should see Foo item deleted and Bar item added.
-        final TestListener<TopLevelList> allListener = createListener(CONFIGURATION, WILDCARDED,
-                added(TOP_FOO, topFoo), added(TOP_BAR, topBar), deleted(TOP_FOO, topFoo));
-
-        // Listener for all Foo item. This one should see only Foo item deleted.
-        final TestListener<TopLevelList> fooListener = createListener(CONFIGURATION, TOP_FOO,
-                added(TOP_FOO, topFoo), deleted(TOP_FOO, topFoo));
-
-        // Listener for bar list items.
-        final TestListener<TopLevelList> barListener = createListener(CONFIGURATION, TOP_BAR,
-                added(TOP_BAR, topBar));
-
-        ReadWriteTransaction writeTx = getDataBroker().newReadWriteTransaction();
-        writeTx.put(CONFIGURATION, TOP, top);
-        assertCommit(writeTx.submit());
-
-        topListener.verify();
-        allListener.verify();
-        fooListener.verify();
-        barListener.verify();
-    }
-
-    @Test
-    public void mergeTopNodeSubtreeListeners() {
-        final TopLevelList topBar = topLevelList(TOP_BAR_KEY);
-        final TopLevelList topFoo = topLevelList(TOP_FOO_KEY);
-
-        final TestListener<Top> topListener = createListener(CONFIGURATION, TOP,
-                added(TOP, top(topLevelList(TOP_FOO_KEY))), topSubtreeModified(topFoo, topBar));
-        final TestListener<TopLevelList> allListener = createListener(CONFIGURATION, WILDCARDED,
-                added(TOP_FOO, topFoo), added(TOP_BAR, topBar));
-        final TestListener<TopLevelList> fooListener = createListener(CONFIGURATION, TOP_FOO,
-                added(TOP_FOO, topFoo));
-        final TestListener<TopLevelList> barListener = createListener(CONFIGURATION, TOP_BAR,
-                added(TOP_BAR, topBar));
-
-        ReadWriteTransaction writeTx = getDataBroker().newReadWriteTransaction();
-        writeTx.merge(CONFIGURATION, TOP, top(topLevelList(TOP_BAR_KEY)));
-        assertCommit(writeTx.submit());
-
-        topListener.verify();
-        allListener.verify();
-        fooListener.verify();
-        barListener.verify();
-    }
-
-    @Test
-    public void putTopBarNodeSubtreeListeners() {
-        final TopLevelList topBar = topLevelList(TOP_BAR_KEY);
-        final TopLevelList topFoo = topLevelList(TOP_FOO_KEY);
-
-        final TestListener<Top> topListener = createListener(CONFIGURATION, TOP,
-                added(TOP, top(topLevelList(TOP_FOO_KEY))), topSubtreeModified(topFoo, topBar));
-        final TestListener<TopLevelList> allListener = createListener(CONFIGURATION, WILDCARDED,
-                added(TOP_FOO, topFoo), added(TOP_BAR, topBar));
-        final TestListener<TopLevelList> fooListener = createListener(CONFIGURATION, TOP_FOO,
-                added(TOP_FOO, topFoo));
-        final TestListener<TopLevelList> barListener = createListener(CONFIGURATION, TOP_BAR,
-                added(TOP_BAR, topBar));
-
-        ReadWriteTransaction writeTx = getDataBroker().newReadWriteTransaction();
-        writeTx.put(CONFIGURATION, TOP_BAR, topLevelList(TOP_BAR_KEY));
-        assertCommit(writeTx.submit());
-
-        topListener.verify();
-        allListener.verify();
-        fooListener.verify();
-        barListener.verify();
-    }
-
-    @Test
-    public void mergeTopBarNodeSubtreeListeners() {
-        final TopLevelList topBar = topLevelList(TOP_BAR_KEY);
-        final TopLevelList topFoo = topLevelList(TOP_FOO_KEY);
-
-        final TestListener<Top> topListener = createListener(CONFIGURATION, TOP,
-                added(TOP, top(topLevelList(TOP_FOO_KEY))), topSubtreeModified(topFoo, topBar));
-        final TestListener<TopLevelList> allListener = createListener(CONFIGURATION, WILDCARDED,
-                added(TOP_FOO, topFoo), added(TOP_BAR, topBar));
-        final TestListener<TopLevelList> fooListener = createListener(CONFIGURATION, TOP_FOO,
-                added(TOP_FOO, topFoo));
-        final TestListener<TopLevelList> barListener = createListener(CONFIGURATION, TOP_BAR,
-                added(TOP_BAR, topBar));
-
-        ReadWriteTransaction writeTx = getDataBroker().newReadWriteTransaction();
-        writeTx.merge(CONFIGURATION, TOP_BAR, topLevelList(TOP_BAR_KEY));
-        assertCommit(writeTx.submit());
-
-        topListener.verify();
-        allListener.verify();
-        fooListener.verify();
-        barListener.verify();
-    }
-
-    private Function<DataTreeModification<Top>, Boolean> topSubtreeModified(final TopLevelList topFoo,
-            final TopLevelList topBar) {
-        return match(ModificationType.SUBTREE_MODIFIED, TOP,
-            (Function<Top, Boolean>) dataBefore -> Objects.equals(top(topFoo), dataBefore),
-            dataAfter -> {
-                Set<TopLevelList> expList = new HashSet<>(top(topBar, topFoo).getTopLevelList());
-                Set<TopLevelList> actualList = dataAfter.getTopLevelList().stream()
-                        .map(list -> new TopLevelListBuilder(list).build()).collect(Collectors.toSet());
-                return expList.equals(actualList);
-            });
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/WriteTransactionTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/WriteTransactionTest.java
deleted file mode 100644 (file)
index ca2375f..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.impl.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractConcurrentDataBrokerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class WriteTransactionTest extends AbstractConcurrentDataBrokerTest {
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-    private static final TopLevelListKey TOP_LIST_KEY = new TopLevelListKey("foo");
-    private static final InstanceIdentifier<TopLevelList> NODE_PATH = TOP_PATH.child(TopLevelList.class, TOP_LIST_KEY);
-    private static final TopLevelList NODE = new TopLevelListBuilder().withKey(TOP_LIST_KEY).build();
-
-    @Test
-    @Deprecated
-    public void testSubmit() throws InterruptedException, ExecutionException, TimeoutException {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, TOP_PATH, new TopBuilder().build());
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE);
-        writeTx.submit().get(5, TimeUnit.SECONDS);
-
-        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
-        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
-        assertTrue("List node must exists after commit", listNode.isPresent());
-        assertEquals("List node", NODE, listNode.get());
-    }
-
-    @Test
-    public void testCommit() throws InterruptedException, ExecutionException, TimeoutException {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, TOP_PATH, new TopBuilder().build());
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE);
-        writeTx.commit().get(5, TimeUnit.SECONDS);
-
-        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
-        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
-        assertTrue("List node must exists after commit", listNode.isPresent());
-        assertEquals("List node", NODE, listNode.get());
-    }
-
-    @Test
-    public void testPutCreateParentsSuccess() throws InterruptedException, ExecutionException, TimeoutException {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE,true);
-        writeTx.commit().get(5, TimeUnit.SECONDS);
-
-        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
-        Optional<Top> topNode = readTx.read(LogicalDatastoreType.OPERATIONAL, TOP_PATH).get();
-        assertTrue("Top node must exists after commit",topNode.isPresent());
-        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
-        assertTrue("List node must exists after commit",listNode.isPresent());
-    }
-
-    @Test
-    public void testMergeCreateParentsSuccess() throws InterruptedException, ExecutionException, TimeoutException {
-        WriteTransaction writeTx = getDataBroker().newWriteOnlyTransaction();
-        writeTx.merge(LogicalDatastoreType.OPERATIONAL, NODE_PATH, NODE,true);
-        writeTx.commit().get(5, TimeUnit.SECONDS);
-
-        ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction();
-        Optional<Top> topNode = readTx.read(LogicalDatastoreType.OPERATIONAL, TOP_PATH).get();
-        assertTrue("Top node must exists after commit",topNode.isPresent());
-        Optional<TopLevelList> listNode = readTx.read(LogicalDatastoreType.OPERATIONAL, NODE_PATH).get();
-        assertTrue("List node must exists after commit",listNode.isPresent());
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractBaseDataBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractBaseDataBrokerTest.java
deleted file mode 100644 (file)
index 408d8c1..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public abstract class AbstractBaseDataBrokerTest extends AbstractSchemaAwareTest {
-
-    private static final int ASSERT_COMMIT_DEFAULT_TIMEOUT = 5000;
-
-    private AbstractDataBrokerTestCustomizer testCustomizer;
-    private DataBroker dataBroker;
-    private DOMDataBroker domBroker;
-
-    protected abstract AbstractDataBrokerTestCustomizer createDataBrokerTestCustomizer();
-
-    public AbstractDataBrokerTestCustomizer getDataBrokerTestCustomizer() {
-        if (testCustomizer == null) {
-            throw new IllegalStateException("testCustomizer not yet set by call to createDataBrokerTestCustomizer()");
-        }
-        return testCustomizer;
-    }
-
-    @Override
-    protected void setupWithSchema(final SchemaContext context) {
-        testCustomizer = createDataBrokerTestCustomizer();
-        dataBroker = testCustomizer.createDataBroker();
-        domBroker = testCustomizer.getDOMDataBroker();
-        testCustomizer.updateSchema(context);
-    }
-
-    public DataBroker getDataBroker() {
-        return dataBroker;
-    }
-
-    public DOMDataBroker getDomBroker() {
-        return domBroker;
-    }
-
-    protected static final void assertCommit(final ListenableFuture<Void> commit) {
-        assertCommit(commit, ASSERT_COMMIT_DEFAULT_TIMEOUT);
-    }
-
-    protected static final void assertCommit(final ListenableFuture<Void> commit, long timeoutInMS) {
-        try {
-            commit.get(timeoutInMS, TimeUnit.MILLISECONDS);
-        } catch (InterruptedException | ExecutionException | TimeoutException e) {
-            throw new IllegalStateException(e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractConcurrentDataBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractConcurrentDataBrokerTest.java
deleted file mode 100644 (file)
index de4d0ec..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-/**
- * AbstractConcurrentDataBrokerTest.
- *
- * <p>Uses single thread executor for the Serialized DOM DataBroker (instead of the
- * direct executor used by the {@literal @}Deprecated AbstractDataBrokerTest) in order
- * to allow tests to use the DataBroker concurrently from several threads.
- *
- * <p>See also <a href="https://bugs.opendaylight.org/show_bug.cgi?id=7538">bug 7538</a> for more details.
- *
- * @author Michael Vorburger
- */
-public abstract class AbstractConcurrentDataBrokerTest extends AbstractBaseDataBrokerTest {
-    private final boolean useMTDataTreeChangeListenerExecutor;
-
-    protected AbstractConcurrentDataBrokerTest() {
-        this(false);
-    }
-
-    protected AbstractConcurrentDataBrokerTest(final boolean useMTDataTreeChangeListenerExecutor) {
-        this.useMTDataTreeChangeListenerExecutor = useMTDataTreeChangeListenerExecutor;
-    }
-
-    @Override
-    protected AbstractDataBrokerTestCustomizer createDataBrokerTestCustomizer() {
-        return new ConcurrentDataBrokerTestCustomizer(useMTDataTreeChangeListenerExecutor);
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTest.java
deleted file mode 100644 (file)
index 35f0650..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * AbstractDataBrokerTest.
- *
- * @deprecated Please now use the AbstractConcurrentDataBrokerTest instead of
- *             this. Normally in a well written test this should be a drop-in
- *             replacement. Instead of {@literal @}Override
- *             setupWithDataBroker(DataBroker dataBroker), please just use
- *             JUnit's {@literal @}Before before() { ... getDataBroker() }. Some
- *             tests which relied on the Test DataBroker being synchronous,
- *             contrary to its specification as well as the production
- *             implementation, may require changes to e.g. use get() on
- *             submit()'ed transaction to make the test wait before asserts. See
- *             also
- *             <a href="https://bugs.opendaylight.org/show_bug.cgi?id=7538">bug
- *             7538</a> for more details.
- */
-@Deprecated
-public class AbstractDataBrokerTest extends AbstractBaseDataBrokerTest {
-
-    @Override
-    protected AbstractDataBrokerTestCustomizer createDataBrokerTestCustomizer() {
-        return new DataBrokerTestCustomizer();
-    }
-
-    @Override
-    protected void setupWithSchema(SchemaContext context) {
-        super.setupWithSchema(context);
-        setupWithDataBroker(getDataBroker());
-    }
-
-    protected void setupWithDataBroker(final DataBroker dataBroker) {
-        // Intentionally left No-op, subclasses may customize it
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTestCustomizer.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataBrokerTestCustomizer.java
deleted file mode 100644 (file)
index 722bbd2..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import javassist.ClassPool;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationPublishServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
-import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.binding.test.util.MockSchemaService;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.DataObjectSerializerGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.util.JavassistUtils;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public abstract class AbstractDataBrokerTestCustomizer {
-
-    private DOMDataBroker domDataBroker;
-    private final DOMNotificationRouter domNotificationRouter;
-    private final MockSchemaService schemaService;
-    private ImmutableMap<LogicalDatastoreType, DOMStore> datastores;
-    private final BindingToNormalizedNodeCodec bindingToNormalized;
-
-    public ImmutableMap<LogicalDatastoreType, DOMStore> createDatastores() {
-        return ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
-                .put(LogicalDatastoreType.OPERATIONAL, createOperationalDatastore())
-                .put(LogicalDatastoreType.CONFIGURATION,createConfigurationDatastore())
-                .build();
-    }
-
-    public AbstractDataBrokerTestCustomizer() {
-        this.schemaService = new MockSchemaService();
-        final ClassPool pool = ClassPool.getDefault();
-        final DataObjectSerializerGenerator generator = StreamWriterGenerator.create(JavassistUtils.forClassPool(pool));
-        final BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
-        final GeneratedClassLoadingStrategy loading = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
-        this.bindingToNormalized = new BindingToNormalizedNodeCodec(loading, codecRegistry);
-        this.schemaService.registerSchemaContextListener(this.bindingToNormalized);
-        this.domNotificationRouter = DOMNotificationRouter.create(16);
-    }
-
-    public DOMStore createConfigurationDatastore() {
-        final InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG", getDataTreeChangeListenerExecutor());
-        this.schemaService.registerSchemaContextListener(store);
-        return store;
-    }
-
-    public DOMStore createOperationalDatastore() {
-        final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", getDataTreeChangeListenerExecutor());
-        this.schemaService.registerSchemaContextListener(store);
-        return store;
-    }
-
-    public DOMDataBroker createDOMDataBroker() {
-        return new SerializedDOMDataBroker(getDatastores(), getCommitCoordinatorExecutor());
-    }
-
-    public NotificationService createNotificationService() {
-        return new BindingDOMNotificationServiceAdapter(this.bindingToNormalized.getCodecRegistry(),
-                this.domNotificationRouter);
-    }
-
-    public NotificationPublishService createNotificationPublishService() {
-        return new BindingDOMNotificationPublishServiceAdapter(this.bindingToNormalized, this.domNotificationRouter);
-    }
-
-    public abstract ListeningExecutorService getCommitCoordinatorExecutor();
-
-    public ListeningExecutorService getDataTreeChangeListenerExecutor() {
-        return MoreExecutors.newDirectExecutorService();
-    }
-
-    public DataBroker createDataBroker() {
-        return new BindingDOMDataBrokerAdapter(getDOMDataBroker(), this.bindingToNormalized);
-    }
-
-    public BindingToNormalizedNodeCodec getBindingToNormalized() {
-        return this.bindingToNormalized;
-    }
-
-    public DOMSchemaService getSchemaService() {
-        return this.schemaService;
-    }
-
-    public DOMDataBroker getDOMDataBroker() {
-        if (this.domDataBroker == null) {
-            this.domDataBroker = createDOMDataBroker();
-        }
-        return this.domDataBroker;
-    }
-
-    private synchronized ImmutableMap<LogicalDatastoreType, DOMStore> getDatastores() {
-        if (this.datastores == null) {
-            this.datastores = createDatastores();
-        }
-        return this.datastores;
-    }
-
-    public void updateSchema(final SchemaContext ctx) {
-        this.schemaService.changeSchema(ctx);
-    }
-
-    public DOMNotificationRouter getDomNotificationRouter() {
-        return this.domNotificationRouter;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataTreeChangeListenerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractDataTreeChangeListenerTest.java
deleted file mode 100644 (file)
index f2b5f93..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import static org.junit.Assert.fail;
-
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Objects;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.function.Function;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Abstract base that provides a DTCL for verification.
- *
- * @author Thomas Pantelis
- */
-public class AbstractDataTreeChangeListenerTest extends AbstractConcurrentDataBrokerTest {
-    protected static final class TestListener<T extends DataObject> implements DataTreeChangeListener<T> {
-
-        private final List<DataTreeModification<T>> accumulatedChanges = new ArrayList<>();
-        private final SettableFuture<Collection<DataTreeModification<T>>> future = SettableFuture.create();
-        private final Function<DataTreeModification<T>, Boolean>[] matchers;
-        private final int expChangeCount;
-
-        private TestListener(Function<DataTreeModification<T>, Boolean>[] matchers) {
-            this.expChangeCount = matchers.length;
-            this.matchers = matchers;
-        }
-
-        @Override
-        public void onDataTreeChanged(Collection<DataTreeModification<T>> changes) {
-            synchronized (accumulatedChanges) {
-                accumulatedChanges.addAll(changes);
-                if (expChangeCount == accumulatedChanges.size()) {
-                    future.set(new ArrayList<>(accumulatedChanges));
-                }
-            }
-        }
-
-        public Collection<DataTreeModification<T>> changes() {
-            try {
-                final Collection<DataTreeModification<T>> changes = future.get(5, TimeUnit.SECONDS);
-                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-                return changes;
-            } catch (InterruptedException | TimeoutException | ExecutionException e) {
-                throw new AssertionError(String.format(
-                    "Data tree change notifications not received. Expected: %s. Actual: %s - %s",
-                        expChangeCount, accumulatedChanges.size(), accumulatedChanges), e);
-            }
-        }
-
-        public void verify() {
-            Collection<DataTreeModification<T>> changes = new ArrayList<>(changes());
-            Iterator<DataTreeModification<T>> iter = changes.iterator();
-            while (iter.hasNext()) {
-                DataTreeModification<T> dataTreeModification = iter.next();
-                for (Function<DataTreeModification<T>, Boolean> matcher: matchers) {
-                    if (matcher.apply(dataTreeModification)) {
-                        iter.remove();
-                        break;
-                    }
-                }
-            }
-
-            if (!changes.isEmpty()) {
-                DataTreeModification<T> mod = changes.iterator().next();
-                fail(String.format("Received unexpected notification: type: %s, path: %s, before: %s, after: %s",
-                        mod.getRootNode().getModificationType(), mod.getRootPath().getRootIdentifier(),
-                        mod.getRootNode().getDataBefore(), mod.getRootNode().getDataAfter()));
-            }
-        }
-
-        public boolean hasChanges() {
-            synchronized (accumulatedChanges) {
-                return !accumulatedChanges.isEmpty();
-            }
-        }
-    }
-
-    protected AbstractDataTreeChangeListenerTest() {
-        super(true);
-    }
-
-    @SafeVarargs
-    protected final <T extends DataObject> TestListener<T> createListener(final LogicalDatastoreType store,
-            final InstanceIdentifier<T> path, Function<DataTreeModification<T>, Boolean>... matchers) {
-        TestListener<T> listener = new TestListener<>(matchers);
-        getDataBroker().registerDataTreeChangeListener(new DataTreeIdentifier<>(store, path), listener);
-        return listener;
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> match(
-            ModificationType type, InstanceIdentifier<T> path, Function<T, Boolean> checkDataBefore,
-            Function<T, Boolean> checkDataAfter) {
-        return modification -> type == modification.getRootNode().getModificationType()
-                && path.equals(modification.getRootPath().getRootIdentifier())
-                && checkDataBefore.apply(modification.getRootNode().getDataBefore())
-                && checkDataAfter.apply(modification.getRootNode().getDataAfter());
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> match(
-            ModificationType type, InstanceIdentifier<T> path, T expDataBefore, T expDataAfter) {
-        return match(type, path, dataBefore -> Objects.equals(expDataBefore, dataBefore),
-            (Function<T, Boolean>) dataAfter -> Objects.equals(expDataAfter, dataAfter));
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> added(
-            InstanceIdentifier<T> path, T data) {
-        return match(ModificationType.WRITE, path, null, data);
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> replaced(
-            InstanceIdentifier<T> path, T dataBefore, T dataAfter) {
-        return match(ModificationType.WRITE, path, dataBefore, dataAfter);
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> deleted(
-            InstanceIdentifier<T> path, T dataBefore) {
-        return match(ModificationType.DELETE, path, dataBefore, null);
-    }
-
-    public static <T extends DataObject> Function<DataTreeModification<T>, Boolean> subtreeModified(
-            InstanceIdentifier<T> path, T dataBefore, T dataAfter) {
-        return match(ModificationType.SUBTREE_MODIFIED, path, dataBefore, dataAfter);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractNotificationBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractNotificationBrokerTest.java
deleted file mode 100644 (file)
index 341350c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class AbstractNotificationBrokerTest extends AbstractSchemaAwareTest {
-
-    private BindingToNormalizedNodeCodec bindingToNormalizedNodeCodec;
-    private DOMNotificationRouter domNotificationRouter;
-    private NotificationService notificationService;
-    private NotificationPublishService notificationPublishService;
-
-    @Override
-    protected void setupWithSchema(final SchemaContext context) {
-        final DataBrokerTestCustomizer testCustomizer = createDataBrokerTestCustomizer();
-        domNotificationRouter = testCustomizer.getDomNotificationRouter();
-        notificationService = testCustomizer.createNotificationService();
-        notificationPublishService = testCustomizer.createNotificationPublishService();
-        bindingToNormalizedNodeCodec = testCustomizer.getBindingToNormalized();
-        testCustomizer.updateSchema(context);
-    }
-
-    protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
-        return new DataBrokerTestCustomizer();
-    }
-
-    public NotificationService getNotificationService() {
-        return notificationService;
-    }
-
-    public NotificationPublishService getNotificationPublishService() {
-        return notificationPublishService;
-    }
-
-    public DOMNotificationRouter getDomNotificationRouter() {
-        return domNotificationRouter;
-    }
-
-    public BindingToNormalizedNodeCodec getBindingToNormalizedNodeCodec() {
-        return bindingToNormalizedNodeCodec;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractSchemaAwareTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AbstractSchemaAwareTest.java
deleted file mode 100644 (file)
index db77bc5..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import org.junit.Before;
-import org.opendaylight.mdsal.binding.generator.impl.ModuleInfoBackedContext;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public abstract class AbstractSchemaAwareTest {
-    private static final LoadingCache<ClassLoader, Set<YangModuleInfo>> MODULE_INFO_CACHE = CacheBuilder.newBuilder()
-            .weakKeys().weakValues().build(new CacheLoader<ClassLoader, Set<YangModuleInfo>>() {
-                @Override
-                public Set<YangModuleInfo> load(final ClassLoader key) {
-                    return BindingReflections.loadModuleInfos(key);
-                }
-            });
-    private static final LoadingCache<Set<YangModuleInfo>, SchemaContext> SCHEMA_CONTEXT_CACHE =
-            CacheBuilder.newBuilder().weakValues().build(new CacheLoader<Set<YangModuleInfo>, SchemaContext>() {
-                @Override
-                public SchemaContext load(final Set<YangModuleInfo> key) {
-                    final ModuleInfoBackedContext moduleContext = ModuleInfoBackedContext.create();
-                    moduleContext.addModuleInfos(key);
-                    return moduleContext.tryToCreateSchemaContext().get();
-                }
-            });
-
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return MODULE_INFO_CACHE.getUnchecked(Thread.currentThread().getContextClassLoader());
-    }
-
-    protected SchemaContext getSchemaContext() throws Exception {
-        // ImmutableSet guarantees non-null
-        return SCHEMA_CONTEXT_CACHE.getUnchecked(ImmutableSet.copyOf(getModuleInfos()));
-    }
-
-    @Before
-    public final void setup() throws Exception {
-        setupWithSchema(getSchemaContext());
-    }
-
-    /**
-     * Setups test with Schema context.
-     * This method is called before {@link #setupWithSchemaService(SchemaService)}
-     */
-    protected abstract void setupWithSchema(SchemaContext context);
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AssertCollections.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/AssertCollections.java
deleted file mode 100644 (file)
index f1619d0..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.binding.test;
-
-import java.util.Collection;
-import java.util.Map;
-import org.junit.Assert;
-
-public final class AssertCollections {
-    private AssertCollections() {
-    }
-
-    public static void assertEmpty(final Collection<?> set) {
-        Assert.assertTrue(set.isEmpty());
-    }
-
-    public static void assertEmpty(final Map<?,?> set) {
-        Assert.assertTrue(set.isEmpty());
-    }
-
-    public static void assertContains(final Collection<?> set, final Object... values) {
-        for (Object key : values) {
-            Assert.assertTrue(set.contains(key));
-        }
-
-    }
-
-    public static void assertContains(final Map<?,?> map, final Object... values) {
-        for (Object key : values) {
-            Assert.assertTrue(map.containsKey(key));
-        }
-    }
-
-    public static void assertNotContains(final Collection<?> set, final Object... values) {
-        for (Object key : values) {
-            Assert.assertFalse(set.contains(key));
-        }
-    }
-
-    public static void assertNotContains(final Map<?,?> map, final Object... values) {
-        for (Object key : values) {
-            Assert.assertFalse(map.containsKey(key));
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConcurrentDataBrokerTestCustomizer.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConcurrentDataBrokerTestCustomizer.java
deleted file mode 100644 (file)
index ee761f4..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2017 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.Executors;
-
-/**
- * ConcurrentDataBrokerTestCustomizer.
- *
- * <p>See {@link AbstractConcurrentDataBrokerTest} and
- * <a href="https://bugs.opendaylight.org/show_bug.cgi?id=7538">bug 7538</a> for more details & background.
- *
- * @author Michael Vorburger
- */
-public class ConcurrentDataBrokerTestCustomizer extends AbstractDataBrokerTestCustomizer {
-
-    private final ListeningExecutorService dataTreeChangeListenerExecutorSingleton;
-
-    public ConcurrentDataBrokerTestCustomizer(boolean useMTDataTreeChangeListenerExecutor) {
-        if (useMTDataTreeChangeListenerExecutor) {
-            dataTreeChangeListenerExecutorSingleton = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
-        } else {
-            dataTreeChangeListenerExecutorSingleton = MoreExecutors.newDirectExecutorService();
-        }
-    }
-
-    @Override
-    public ListeningExecutorService getCommitCoordinatorExecutor() {
-        return MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
-    }
-
-    @Override
-    public ListeningExecutorService getDataTreeChangeListenerExecutor() {
-        return dataTreeChangeListenerExecutorSingleton;
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConstantSchemaAbstractDataBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/ConstantSchemaAbstractDataBrokerTest.java
deleted file mode 100644 (file)
index 93f15b7..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * AbstractDataBrokerTest which creates the SchemaContext
- * only once, and keeps it in a static, instead of re-recreating
- * it for each Test, and is thus faster.
- *
- * @author Michael Vorburger
- * @deprecated This class is no longer useful, as {@link AbstractSchemaAwareTest#getSchemaContext()} provides effective
- *             caching.
- */
-@Deprecated
-public class ConstantSchemaAbstractDataBrokerTest extends AbstractConcurrentDataBrokerTest {
-
-    public ConstantSchemaAbstractDataBrokerTest() {
-    }
-
-    public ConstantSchemaAbstractDataBrokerTest(final boolean useMTDataTreeChangeListenerExecutor) {
-        super(useMTDataTreeChangeListenerExecutor);
-    }
-
-    @Override
-    protected SchemaContext getSchemaContext() throws Exception {
-        return SchemaContextSingleton.getSchemaContext(super::getSchemaContext);
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestCustomizer.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestCustomizer.java
deleted file mode 100644 (file)
index e037de4..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
-/**
- * DataBrokerTestCustomizer.
- *
- * @deprecated Please use the ConcurrentDataBrokerTestCustomizer instead of
- *             this; see AbstractDataBrokerTest for more details.
- */
-@Deprecated
-public class DataBrokerTestCustomizer extends AbstractDataBrokerTestCustomizer {
-
-    @Override
-    public ListeningExecutorService getCommitCoordinatorExecutor() {
-        return MoreExecutors.newDirectExecutorService();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestModule.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestModule.java
deleted file mode 100644 (file)
index f98f1a1..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-
-public class DataBrokerTestModule {
-
-    public static DataBroker dataBroker() {
-        return new DataBrokerTestModule(false).getDataBroker();
-    }
-
-    private final boolean useMTDataTreeChangeListenerExecutor;
-    private ConstantSchemaAbstractDataBrokerTest dataBrokerTest;
-
-    public DataBrokerTestModule(boolean useMTDataTreeChangeListenerExecutor) {
-        this.useMTDataTreeChangeListenerExecutor = useMTDataTreeChangeListenerExecutor;
-    }
-
-    // Suppress IllegalCatch because of AbstractDataBrokerTest (change later)
-    @SuppressWarnings({ "checkstyle:IllegalCatch", "checkstyle:IllegalThrows" })
-    public DataBroker getDataBroker() throws RuntimeException {
-        try {
-            // This is a little bit "upside down" - in the future,
-            // we should probably put what is in AbstractDataBrokerTest
-            // into this DataBrokerTestModule, and make AbstractDataBrokerTest
-            // use it, instead of the way around it currently is (the opposite);
-            // this is just for historical reasons... and works for now.
-            dataBrokerTest = new ConstantSchemaAbstractDataBrokerTest(useMTDataTreeChangeListenerExecutor);
-            dataBrokerTest.setup();
-            return dataBrokerTest.getDataBroker();
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    public DOMDataBroker getDOMDataBroker() {
-        return dataBrokerTest.getDomBroker();
-    }
-
-    public BindingToNormalizedNodeCodec getBindingToNormalizedNodeCodec() {
-        return dataBrokerTest.getDataBrokerTestCustomizer().getBindingToNormalized();
-    }
-
-    public DOMNotificationRouter getDOMNotificationRouter() {
-        return dataBrokerTest.getDataBrokerTestCustomizer().getDomNotificationRouter();
-    }
-
-    public DOMSchemaService getSchemaService() {
-        return dataBrokerTest.getDataBrokerTestCustomizer().getSchemaService();
-    }
-
-    public SchemaContextProvider getSchemaContextProvider() {
-        return (SchemaContextProvider) dataBrokerTest.getDataBrokerTestCustomizer().getSchemaService();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/SchemaContextSingleton.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/SchemaContextSingleton.java
deleted file mode 100644 (file)
index 32f84a8..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test;
-
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * {@link SchemaContext} singleton holder (static).
- *
- * <p>This is useful in scenarios such as unit tests, but not OSGi environments,
- * where there is a flat classpath and thus really only one single
- * SchemaContext.
- *
- * @author Michael Vorburger
- * @deprecated This class should not be used, as it pollutes the classpath.
- */
-@Deprecated
-public final class SchemaContextSingleton {
-
-    private static SchemaContext staticSchemaContext;
-
-    public static synchronized SchemaContext getSchemaContext(final Supplier<SchemaContext> supplier) throws Exception {
-        if (staticSchemaContext == null) {
-            staticSchemaContext = supplier.get();
-        }
-        return staticSchemaContext;
-    }
-
-    private SchemaContextSingleton() { }
-
-    @FunctionalInterface
-    public interface Supplier<T> {
-        T get() throws Exception;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/AbstractDataBrokerTestTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/AbstractDataBrokerTestTest.java
deleted file mode 100644 (file)
index 3751f71..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test.tests;
-
-import static com.google.common.truth.Truth.assertThat;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
-import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
-
-import org.junit.Before;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
-import org.junit.runners.MethodSorters;
-import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractConcurrentDataBrokerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TopBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Integration tests the AbstractDataBrokerTest.
- *
- * @author Michael Vorburger
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-public class AbstractDataBrokerTestTest extends AbstractConcurrentDataBrokerTest {
-
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
-
-    @Before
-    public void before() {
-        assertThat(getDataBroker()).isNotNull();
-    }
-
-    @Test
-    public void aEnsureDataBrokerIsNotNull() {
-        assertThat(getDataBroker()).isNotNull();
-    }
-
-    @Test
-    public void bPutSomethingIntoDataStore() throws Exception {
-        writeInitialState();
-        assertThat(isTopInDataStore()).isTrue();
-    }
-
-    @Test
-    public void cEnsureDataStoreIsEmptyAgainInNewTest() throws ReadFailedException {
-        assertThat(isTopInDataStore()).isFalse();
-    }
-
-    // copy/pasted from Bug1125RegressionTest.writeInitialState()
-    private void writeInitialState() throws TransactionCommitFailedException {
-        WriteTransaction initialTx = getDataBroker().newWriteOnlyTransaction();
-        initialTx.put(LogicalDatastoreType.OPERATIONAL, TOP_PATH, new TopBuilder().build());
-        TreeComplexUsesAugment fooAugment = new TreeComplexUsesAugmentBuilder()
-                .setContainerWithUses(new ContainerWithUsesBuilder().setLeafFromGrouping("foo").build()).build();
-        initialTx.put(LogicalDatastoreType.OPERATIONAL, path(TOP_FOO_KEY), topLevelList(TOP_FOO_KEY, fooAugment));
-        initialTx.submit().checkedGet();
-    }
-
-    private boolean isTopInDataStore() throws ReadFailedException {
-        try (ReadOnlyTransaction readTx = getDataBroker().newReadOnlyTransaction()) {
-            return readTx.read(LogicalDatastoreType.OPERATIONAL, TOP_PATH).checkedGet().isPresent();
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/DataBrokerTestModuleTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/tests/DataBrokerTestModuleTest.java
deleted file mode 100644 (file)
index 4fbb851..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2016 Red Hat, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.test.tests;
-
-import static com.google.common.truth.Truth.assertThat;
-
-import org.junit.Ignore;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.test.DataBrokerTestModule;
-
-/**
- * Integration tests the DataBrokerTestModule.
- *
- * @author Michael Vorburger
- */
-public class DataBrokerTestModuleTest {
-
-    @Test
-    public void ensureDataBrokerTestModuleWorksWithoutException() {
-        assertThat(DataBrokerTestModule.dataBroker()).isNotNull();
-    }
-
-    @Test
-    @Ignore // This test is flaky on build server VMs (although fine locally)
-    public void slowYangLoadingShouldOnlyHappenOnceAndNotDelayEachDataBroker() {
-        // TODO Write a lil' Timer utility class to make this kind of timing test code more readable
-        long startAtMs = System.currentTimeMillis();
-        DataBrokerTestModule.dataBroker();
-        long firstDataBrokerAtMs = System.currentTimeMillis();
-        long firstDataBrokerDurationMs = firstDataBrokerAtMs - startAtMs;
-        DataBrokerTestModule.dataBroker();
-        long secondDataBrokerDurationMs = System.currentTimeMillis() - firstDataBrokerAtMs;
-        assertThat(Math.abs(secondDataBrokerDurationMs - firstDataBrokerDurationMs))
-                .isLessThan(firstDataBrokerDurationMs / 4);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AbstractDataServiceTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AbstractDataServiceTest.java
deleted file mode 100644 (file)
index aa80e11..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.junit.Before;
-import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
-import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-
-public abstract class AbstractDataServiceTest {
-
-    protected BindingTestContext testContext;
-
-    @Before
-    public void setUp() {
-        ListeningExecutorService executor = MoreExecutors.newDirectExecutorService();
-        BindingBrokerTestFactory factory = new BindingBrokerTestFactory();
-        factory.setExecutor(executor);
-        factory.setStartWithParsedSchema(getStartWithSchema());
-        testContext = factory.getTestContext();
-        testContext.start();
-    }
-
-    protected boolean getStartWithSchema() {
-        return true;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AugmentationVerifier.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/AugmentationVerifier.java
deleted file mode 100644 (file)
index 77b34a8..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test;
-
-import static org.junit.Assert.assertNotNull;
-
-import org.opendaylight.yangtools.yang.binding.Augmentable;
-import org.opendaylight.yangtools.yang.binding.Augmentation;
-
-public class AugmentationVerifier<T extends Augmentable<T>> {
-
-    private final T object;
-
-    public AugmentationVerifier(final T objectToVerify) {
-        this.object = objectToVerify;
-    }
-
-    public AugmentationVerifier<T> assertHasAugmentation(final Class<? extends Augmentation<T>> augmentation) {
-        assertHasAugmentation(object, augmentation);
-        return this;
-    }
-
-    public static <T extends Augmentable<T>> void assertHasAugmentation(final T object,
-            final Class<? extends Augmentation<T>> augmentation) {
-        assertNotNull(object);
-        assertNotNull("Augmentation " + augmentation.getSimpleName() + " is not present.",
-                object.augmentation(augmentation));
-    }
-
-    public static <T extends Augmentable<T>> AugmentationVerifier<T> from(final T obj) {
-        return new AugmentationVerifier<>(obj);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarListener.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarListener.java
deleted file mode 100644 (file)
index 6c13372..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-public interface BarListener extends NotificationListener {
-
-    void onBarUpdate(BarUpdate notification);
-
-    void onFlowDelete(FlowDelete notification);
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarUpdate.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/BarUpdate.java
deleted file mode 100644 (file)
index 45f0313..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public interface BarUpdate extends Grouping,Notification {
-
-
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/CompositeListener.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/CompositeListener.java
deleted file mode 100644 (file)
index d9f752f..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-public interface CompositeListener extends FooListener,BarListener {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FlowDelete.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FlowDelete.java
deleted file mode 100644 (file)
index 4a28aaa..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public interface FlowDelete extends Notification{
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooListener.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooListener.java
deleted file mode 100644 (file)
index cb8020e..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
-
-public interface FooListener extends NotificationListener {
-
-    void onFooUpdate(FooUpdate notification);
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooService.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooService.java
deleted file mode 100644 (file)
index 2bd2119..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import java.util.concurrent.Future;
-
-import org.opendaylight.yangtools.yang.binding.RpcService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-
-public interface FooService extends RpcService {
-
-    Future<RpcResult<Void>> foo();
-
-    Future<RpcResult<Void>> simple(SimpleInput obj);
-
-    Future<RpcResult<Void>> inheritedContext(InheritedContextInput obj);
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooUpdate.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/FooUpdate.java
deleted file mode 100644 (file)
index faf4576..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.Notification;
-
-public interface FooUpdate extends Notification {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/Grouping.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/Grouping.java
deleted file mode 100644 (file)
index 5864845..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
-
-public interface Grouping {
-
-    @RoutingContext(BaseIdentity.class)
-    InstanceIdentifier<?> getInheritedIdentifier();
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/InheritedContextInput.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/InheritedContextInput.java
deleted file mode 100644 (file)
index 90375a0..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-public interface InheritedContextInput extends Grouping {
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObject.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObject.java
deleted file mode 100644 (file)
index a8ff31c..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.ChildOf;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.DataRoot;
-import org.opendaylight.yangtools.yang.binding.Identifiable;
-
-public interface ReferencableObject extends DataObject, Identifiable<ReferencableObjectKey>,ChildOf<DataRoot> {
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObjectKey.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/ReferencableObjectKey.java
deleted file mode 100644 (file)
index 412f1bf..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.Identifier;
-
-public class ReferencableObjectKey implements Identifier<ReferencableObject> {
-    private static final long serialVersionUID = 1L;
-    final Integer value;
-
-    public ReferencableObjectKey(Integer value) {
-        this.value = value;
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + (value == null ? 0 : value.hashCode());
-        return result;
-    }
-
-    @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (obj == null) {
-            return false;
-        }
-        if (getClass() != obj.getClass()) {
-            return false;
-        }
-        ReferencableObjectKey other = (ReferencableObjectKey) obj;
-        if (value == null) {
-            if (other.value != null) {
-                return false;
-            }
-        } else if (!value.equals(other.value)) {
-            return false;
-        }
-        return true;
-    }
-
-    @Override
-    public String toString() {
-        return "ReferencableObjectKey [value=" + value + "]";
-    }
-
-
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/SimpleInput.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/mock/SimpleInput.java
deleted file mode 100644 (file)
index 0a38c90..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.mock;
-
-import org.opendaylight.yangtools.yang.binding.Augmentable;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.annotations.RoutingContext;
-
-public interface SimpleInput extends DataObject,Augmentable<SimpleInput> {
-
-    @RoutingContext(BaseIdentity.class)
-    InstanceIdentifier<?> getIdentifier();
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingBrokerTestFactory.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingBrokerTestFactory.java
deleted file mode 100644 (file)
index 16c065b..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.util;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutorService;
-import javassist.ClassPool;
-
-@Beta
-public class BindingBrokerTestFactory {
-
-    private static final ClassPool CLASS_POOL = ClassPool.getDefault();
-    private boolean startWithParsedSchema = true;
-    private ExecutorService executor;
-    private ClassPool classPool;
-
-
-    public boolean isStartWithParsedSchema() {
-        return startWithParsedSchema;
-    }
-
-    public void setStartWithParsedSchema(final boolean startWithParsedSchema) {
-        this.startWithParsedSchema = startWithParsedSchema;
-    }
-
-    public ExecutorService getExecutor() {
-        return executor;
-    }
-
-    public void setExecutor(final ExecutorService executor) {
-        this.executor = executor;
-    }
-
-
-    public BindingTestContext getTestContext() {
-        Preconditions.checkState(executor != null, "Executor is not set.");
-        ListeningExecutorService listenableExecutor = MoreExecutors.listeningDecorator(executor);
-        return new BindingTestContext(listenableExecutor, getClassPool(),startWithParsedSchema);
-    }
-
-    public ClassPool getClassPool() {
-        if (classPool == null) {
-            return CLASS_POOL;
-        }
-
-        return classPool;
-    }
-
-    public void setClassPool(final ClassPool classPool) {
-        this.classPool = classPool;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingTestContext.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingTestContext.java
deleted file mode 100644 (file)
index 8f4794e..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.util;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Set;
-import javassist.ClassPool;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumNotificationProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.compat.HeliumRpcProviderRegistry;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMMountPointServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationPublishServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMNotificationServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingDOMRpcServiceAdapter;
-import org.opendaylight.controller.md.sal.binding.impl.BindingToNormalizedNodeCodec;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter;
-import org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter;
-import org.opendaylight.controller.md.sal.dom.broker.impl.SerializedDOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.impl.RootBindingAwareBroker;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.DataObjectSerializerGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.impl.ModuleInfoBackedContext;
-import org.opendaylight.mdsal.binding.generator.util.JavassistUtils;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-@Beta
-public class BindingTestContext implements AutoCloseable {
-    private BindingToNormalizedNodeCodec codec;
-
-    private RootBindingAwareBroker baBrokerImpl;
-
-    private HeliumNotificationProviderServiceAdapter baNotifyImpl;
-
-
-    private final ListeningExecutorService executor;
-    private final ClassPool classPool;
-
-    private final boolean startWithSchema;
-
-    private DOMMountPointService biMountImpl;
-
-    private ImmutableMap<LogicalDatastoreType, DOMStore> newDatastores;
-
-    private DOMDataBroker newDOMDataBroker;
-
-    private final MockSchemaService mockSchemaService = new MockSchemaService();
-
-    private DataBroker dataBroker;
-
-    private RpcConsumerRegistry baConsumerRpc;
-
-    private BindingDOMRpcProviderServiceAdapter baProviderRpc;
-    private DOMRpcRouter domRouter;
-    private org.opendaylight.mdsal.dom.broker.DOMRpcRouter delegateDomRouter;
-
-    private NotificationPublishService publishService;
-
-    private NotificationService listenService;
-
-    private DOMNotificationPublishService domPublishService;
-
-    private DOMNotificationService domListenService;
-
-    private Set<YangModuleInfo> schemaModuleInfos;
-
-    public DOMDataBroker getDomAsyncDataBroker() {
-        return this.newDOMDataBroker;
-    }
-
-    public BindingToNormalizedNodeCodec getCodec() {
-        return this.codec;
-    }
-
-    protected BindingTestContext(final ListeningExecutorService executor, final ClassPool classPool,
-            final boolean startWithSchema) {
-        this.executor = executor;
-        this.classPool = classPool;
-        this.startWithSchema = startWithSchema;
-    }
-
-    public void startDomDataBroker() {
-    }
-
-    public void startNewDataBroker() {
-        checkState(this.executor != null, "Executor needs to be set");
-        checkState(this.newDOMDataBroker != null, "DOM Data Broker must be set");
-        this.dataBroker = new BindingDOMDataBrokerAdapter(this.newDOMDataBroker, this.codec);
-    }
-
-    public void startNewDomDataBroker() {
-        checkState(this.executor != null, "Executor needs to be set");
-        final InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
-            MoreExecutors.newDirectExecutorService());
-        final InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
-            MoreExecutors.newDirectExecutorService());
-        this.newDatastores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
-                .put(LogicalDatastoreType.OPERATIONAL, operStore)
-                .put(LogicalDatastoreType.CONFIGURATION, configStore)
-                .build();
-
-        this.newDOMDataBroker = new SerializedDOMDataBroker(this.newDatastores, this.executor);
-
-        this.mockSchemaService.registerSchemaContextListener(configStore);
-        this.mockSchemaService.registerSchemaContextListener(operStore);
-    }
-
-    public void startBindingDataBroker() {
-
-    }
-
-    public void startBindingBroker() {
-        checkState(this.executor != null, "Executor needs to be set");
-        checkState(this.baNotifyImpl != null, "Notification Service must be started");
-
-        this.baConsumerRpc = new BindingDOMRpcServiceAdapter(getDomRpcInvoker(), this.codec);
-        this.baProviderRpc = new BindingDOMRpcProviderServiceAdapter(getDomRpcRegistry(), this.codec);
-
-        this.baBrokerImpl = new RootBindingAwareBroker("test");
-
-        final MountPointService mountService = new BindingDOMMountPointServiceAdapter(this.biMountImpl, this.codec);
-        this.baBrokerImpl.setMountService(mountService);
-        this.baBrokerImpl.setRpcBroker(new HeliumRpcProviderRegistry(this.baConsumerRpc, this.baProviderRpc));
-        this.baBrokerImpl.setNotificationBroker(this.baNotifyImpl);
-        this.baBrokerImpl.start();
-    }
-
-    public void startForwarding() {
-
-    }
-
-    public void startBindingToDomMappingService() {
-        checkState(this.classPool != null, "ClassPool needs to be present");
-
-        final DataObjectSerializerGenerator generator = StreamWriterGenerator.create(
-                JavassistUtils.forClassPool(this.classPool));
-        final BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(generator);
-        final GeneratedClassLoadingStrategy loading = GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy();
-        this.codec = new BindingToNormalizedNodeCodec(loading,  codecRegistry);
-        this.mockSchemaService.registerSchemaContextListener(this.codec);
-    }
-
-    private void updateYangSchema(final Set<YangModuleInfo> moduleInfos) {
-        this.mockSchemaService.changeSchema(getContext(moduleInfos));
-    }
-
-    private static SchemaContext getContext(final Set<YangModuleInfo> moduleInfos) {
-        final ModuleInfoBackedContext ctx = ModuleInfoBackedContext.create();
-        ctx.addModuleInfos(moduleInfos);
-        return ctx.tryToCreateSchemaContext().get();
-    }
-
-    public void start() {
-        startNewDomDataBroker();
-
-        startDomBroker();
-        startDomMountPoint();
-        startBindingToDomMappingService();
-        startNewDataBroker();
-        startBindingNotificationBroker();
-        startBindingBroker();
-
-        startForwarding();
-
-        if (schemaModuleInfos != null) {
-            updateYangSchema(schemaModuleInfos);
-        } else if (this.startWithSchema) {
-            loadYangSchemaFromClasspath();
-        }
-    }
-
-    private void startDomMountPoint() {
-        this.biMountImpl = new DOMMountPointServiceImpl();
-    }
-
-    private void startDomBroker() {
-        checkState(this.executor != null);
-
-        delegateDomRouter = org.opendaylight.mdsal.dom.broker.DOMRpcRouter.newInstance(mockSchemaService);
-        this.domRouter = new DOMRpcRouter(delegateDomRouter.getRpcService(), delegateDomRouter.getRpcProviderService());
-    }
-
-    public void startBindingNotificationBroker() {
-        checkState(this.executor != null);
-        final DOMNotificationRouter router = DOMNotificationRouter.create(16);
-        this.domPublishService = router;
-        this.domListenService = router;
-        this.publishService = new BindingDOMNotificationPublishServiceAdapter(this.codec, this.domPublishService);
-        this.listenService = new BindingDOMNotificationServiceAdapter(this.codec, this.domListenService);
-        this.baNotifyImpl = new HeliumNotificationProviderServiceAdapter(this.publishService,this.listenService);
-
-    }
-
-    public void loadYangSchemaFromClasspath() {
-        updateYangSchema(BindingReflections.loadModuleInfos());
-    }
-
-    public RpcProviderRegistry getBindingRpcRegistry() {
-        return this.baBrokerImpl.getRoot();
-    }
-
-    public DOMRpcProviderService getDomRpcRegistry() {
-        return this.domRouter;
-    }
-
-    public DOMRpcService getDomRpcInvoker() {
-        return this.domRouter;
-    }
-
-    public org.opendaylight.mdsal.dom.broker.DOMRpcRouter getDelegateDomRouter() {
-        return delegateDomRouter;
-    }
-
-    @Override
-    public void close() {
-
-    }
-
-    public MountPointService getBindingMountPointService() {
-        return this.baBrokerImpl.getMountService();
-    }
-
-    public DOMMountPointService getDomMountProviderService() {
-        return this.biMountImpl;
-    }
-
-    public DataBroker getDataBroker() {
-        return this.dataBroker;
-    }
-
-    public void setSchemaModuleInfos(Set<YangModuleInfo> moduleInfos) {
-        this.schemaModuleInfos = moduleInfos;
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/MockSchemaService.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/MockSchemaService.java
deleted file mode 100644 (file)
index 39a6db2..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.util;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.api.DOMSchemaServiceExtension;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.ListenerRegistry;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-
-public final class MockSchemaService implements DOMSchemaService, SchemaContextProvider {
-
-    private SchemaContext schemaContext;
-
-    ListenerRegistry<SchemaContextListener> listeners = ListenerRegistry.create();
-
-    @Override
-    public synchronized SchemaContext getGlobalContext() {
-        return schemaContext;
-    }
-
-    @Override
-    public synchronized SchemaContext getSessionContext() {
-        return schemaContext;
-    }
-
-    @Override
-    public ListenerRegistration<SchemaContextListener> registerSchemaContextListener(
-            final SchemaContextListener listener) {
-        return listeners.register(listener);
-    }
-
-    @Override
-    public synchronized SchemaContext getSchemaContext() {
-        return schemaContext;
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMSchemaServiceExtension> getExtensions() {
-        return ImmutableClassToInstanceMap.of();
-    }
-
-    public synchronized void changeSchema(final SchemaContext newContext) {
-        schemaContext = newContext;
-        for (ListenerRegistration<SchemaContextListener> listener : listeners) {
-            listener.getInstance().onGlobalContextUpdated(schemaContext);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/pom.xml b/opendaylight/md-sal/sal-binding-dom-it/pom.xml
deleted file mode 100644 (file)
index f79e789..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.odlparent</groupId>
-    <artifactId>odlparent</artifactId>
-    <version>4.0.9</version>
-    <relativePath/>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-binding-dom-it</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>jar</packaging>
-
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>org.opendaylight.yangtools</groupId>
-        <artifactId>yangtools-artifacts</artifactId>
-        <version>2.1.8</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.opendaylight.mdsal</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>3.0.6</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-      <dependency>
-        <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-artifacts</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
-        <type>pom</type>
-        <scope>import</scope>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
-  <dependencies>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-broker-impl</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-broker-impl</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.ops4j.pax.exam</groupId>
-      <artifactId>pax-exam-container-native</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-test-model</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal.model</groupId>
-      <artifactId>opendaylight-l2-types</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.jacoco</groupId>
-        <artifactId>jacoco-maven-plugin</artifactId>
-        <configuration>
-          <includes>
-            <include>org.opendaylight.controller.*</include>
-          </includes>
-        </configuration>
-        <executions>
-          <execution>
-            <id>pre-test</id>
-            <goals>
-              <goal>prepare-agent</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>post-test</id>
-            <goals>
-              <goal>report</goal>
-            </goals>
-            <phase>test</phase>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/ConcurrentImplicitCreateTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/ConcurrentImplicitCreateTest.java
deleted file mode 100644 (file)
index 6959912..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.data;
-
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * FIXME: THis test should be moved to sal-binding-broker and rewritten to use new DataBroker API
- */
-public class ConcurrentImplicitCreateTest extends AbstractDataServiceTest {
-
-    private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
-    private static final TopLevelListKey BAR_KEY = new TopLevelListKey("bar");
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
-    private static final InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, FOO_KEY);
-    private static final InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, BAR_KEY);
-
-    @Test
-    public void testConcurrentCreate() throws InterruptedException, ExecutionException, TimeoutException {
-
-        DataBroker dataBroker = testContext.getDataBroker();
-        WriteTransaction fooTx = dataBroker.newWriteOnlyTransaction();
-        WriteTransaction barTx = dataBroker.newWriteOnlyTransaction();
-
-        fooTx.put(LogicalDatastoreType.OPERATIONAL, FOO_PATH, new TopLevelListBuilder().withKey(FOO_KEY).build());
-        barTx.put(LogicalDatastoreType.OPERATIONAL, BAR_PATH, new TopLevelListBuilder().withKey(BAR_KEY).build());
-
-        fooTx.submit().get(5, TimeUnit.SECONDS);
-        barTx.submit().get(5, TimeUnit.SECONDS);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/WildcardedDataChangeListenerTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/md/sal/binding/data/WildcardedDataChangeListenerTest.java
deleted file mode 100644 (file)
index 4f11c50..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.data;
-
-import static org.junit.Assert.assertFalse;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collections;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUses;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUses;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUsesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ListViaUsesKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-/**
- * FIXME: THis test should be moved to compat test-suite
- */
-public class WildcardedDataChangeListenerTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final TopLevelListKey TOP_LEVEL_LIST_0_KEY = new TopLevelListKey("test:0");
-    private static final TopLevelListKey TOP_LEVEL_LIST_1_KEY = new TopLevelListKey("test:1");
-
-    protected static final InstanceIdentifier<ListViaUses> DEEP_WILDCARDED_PATH = InstanceIdentifier
-            .builder(Top.class)
-            .child(TopLevelList.class)
-            .augmentation(TreeComplexUsesAugment.class)
-            .child(ListViaUses.class)
-            .build();
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> NODE_0_TCU_PATH = InstanceIdentifier
-            .builder(Top.class)
-            .child(TopLevelList.class, TOP_LEVEL_LIST_0_KEY)
-            .augmentation(TreeComplexUsesAugment.class)
-            .build();
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> NODE_1_TCU_PATH = InstanceIdentifier
-            .builder(Top.class)
-            .child(TopLevelList.class, TOP_LEVEL_LIST_1_KEY)
-            .augmentation(TreeComplexUsesAugment.class)
-            .build();
-
-
-    private static final ListViaUsesKey LIST_VIA_USES_KEY = new ListViaUsesKey("test");
-
-    private static final InstanceIdentifier<ListViaUses> NODE_0_LVU_PATH = NODE_0_TCU_PATH.child(ListViaUses.class,
-        LIST_VIA_USES_KEY);
-
-    private static final InstanceIdentifier<ListViaUses> NODE_1_LVU_PATH = NODE_1_TCU_PATH.child(ListViaUses.class,
-        LIST_VIA_USES_KEY);
-
-    private static final InstanceIdentifier<ContainerWithUses> NODE_0_CWU_PATH =
-            NODE_0_TCU_PATH.child(ContainerWithUses.class);
-
-    private static final ContainerWithUses CWU = new ContainerWithUsesBuilder()
-            .setLeafFromGrouping("some container value").build();
-
-    private static final ListViaUses LVU = new ListViaUsesBuilder()
-            .withKey(LIST_VIA_USES_KEY).setName("john").build();
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class));
-    }
-
-    @Test
-    public void testSeparateWrites() throws InterruptedException, TimeoutException, ExecutionException {
-
-        DataBroker dataBroker = getDataBroker();
-
-        final TestListener<ListViaUses> listener = createListener(OPERATIONAL, DEEP_WILDCARDED_PATH,
-            dataTreeModification -> NODE_0_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()),
-            dataTreeModification -> NODE_1_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()));
-
-        final WriteTransaction transaction = dataBroker.newWriteOnlyTransaction();
-        transaction.put(OPERATIONAL, NODE_0_CWU_PATH, CWU, true);
-        transaction.put(OPERATIONAL, NODE_0_LVU_PATH, LVU, true);
-        transaction.put(OPERATIONAL, NODE_1_LVU_PATH, LVU, true);
-        transaction.submit().get(5, TimeUnit.SECONDS);
-
-        listener.verify();
-    }
-
-    @Test
-    public void testWriteByReplace() throws InterruptedException, TimeoutException, ExecutionException {
-
-        DataBroker dataBroker = getDataBroker();
-
-        final TestListener<ListViaUses> listener = createListener(OPERATIONAL, DEEP_WILDCARDED_PATH,
-            dataTreeModification -> NODE_0_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()),
-            dataTreeModification -> NODE_1_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()));
-
-        final WriteTransaction cwuTx = dataBroker.newWriteOnlyTransaction();
-        cwuTx.put(OPERATIONAL, NODE_0_CWU_PATH, CWU, true);
-        cwuTx.submit().get(5, TimeUnit.SECONDS);
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        assertFalse(listener.hasChanges());
-
-        final WriteTransaction lvuTx = dataBroker.newWriteOnlyTransaction();
-
-        TreeComplexUsesAugment tcua = new TreeComplexUsesAugmentBuilder()
-                .setListViaUses(Collections.singletonList(LVU)).build();
-
-        lvuTx.put(OPERATIONAL, NODE_0_TCU_PATH, tcua, true);
-        lvuTx.put(OPERATIONAL, NODE_1_LVU_PATH, LVU, true);
-        lvuTx.submit().get(5, TimeUnit.SECONDS);
-
-        listener.verify();
-    }
-
-    @Test
-    public void testChangeOnReplaceWithSameValue() throws InterruptedException, TimeoutException, ExecutionException {
-
-        DataBroker dataBroker = getDataBroker();
-
-        // Write initial state NODE_0_FLOW
-        final WriteTransaction transaction = dataBroker.newWriteOnlyTransaction();
-        transaction.put(OPERATIONAL, NODE_0_LVU_PATH, LVU, true);
-        transaction.submit().get(5, TimeUnit.SECONDS);
-
-        final TestListener<ListViaUses> listener = createListener(OPERATIONAL, DEEP_WILDCARDED_PATH,
-            dataTreeModification -> NODE_1_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()),
-            dataTreeModification -> NODE_0_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()),
-            dataTreeModification -> NODE_1_LVU_PATH.equals(dataTreeModification.getRootPath().getRootIdentifier()));
-
-        final WriteTransaction secondTx = dataBroker.newWriteOnlyTransaction();
-        secondTx.put(OPERATIONAL, NODE_0_LVU_PATH, LVU, true);
-        secondTx.put(OPERATIONAL, NODE_1_LVU_PATH, LVU, true);
-        secondTx.submit().get(5, TimeUnit.SECONDS);
-
-        listener.verify();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/DeleteNestedAugmentationListenParentTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/DeleteNestedAugmentationListenParentTest.java
deleted file mode 100644 (file)
index 81d7e9d..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.bugfix;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugmentBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class DeleteNestedAugmentationListenParentTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final TopLevelListKey FOO_KEY = new TopLevelListKey("foo");
-
-    private static final List1Key LIST1_KEY = new List1Key("one");
-
-    private static final List11Key LIST11_KEY = new List11Key(100);
-
-    private static final InstanceIdentifier<TllComplexAugment> TLL_COMPLEX_AUGMENT_PATH = InstanceIdentifier
-            .builder(Top.class)
-            .child(TopLevelList.class,FOO_KEY)
-            .augmentation(TllComplexAugment.class)
-            .build();
-
-    private static final InstanceIdentifier<List11> LIST11_PATH = TLL_COMPLEX_AUGMENT_PATH.builder()
-            .child(List1.class,LIST1_KEY)
-            .child(List11.class,LIST11_KEY)
-            .build();
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(List11SimpleAugment.class));
-    }
-
-    @Test
-    public void deleteChildListenParent() throws InterruptedException, ExecutionException, TimeoutException {
-        DataBroker dataBroker = getDataBroker();
-        final WriteTransaction initTx = dataBroker.newWriteOnlyTransaction();
-
-        List11 list11Before = createList11();
-        initTx.put(LogicalDatastoreType.OPERATIONAL, LIST11_PATH, list11Before, true);
-        initTx.submit().get(5, TimeUnit.SECONDS);
-
-        List11 list11After = new List11Builder().withKey(LIST11_KEY).setAttrStr("good").build();
-
-        final TestListener<List11> listener = createListener(LogicalDatastoreType.OPERATIONAL, LIST11_PATH,
-                added(LIST11_PATH, list11Before), subtreeModified(LIST11_PATH, list11Before, list11After));
-
-        final WriteTransaction deleteTx = dataBroker.newWriteOnlyTransaction();
-        deleteTx.delete(LogicalDatastoreType.OPERATIONAL, LIST11_PATH.augmentation(List11SimpleAugment.class));
-        deleteTx.submit().get(5, TimeUnit.SECONDS);
-
-        listener.verify();
-    }
-
-    private static List11 createList11() {
-        List11Builder builder = new List11Builder()
-            .withKey(LIST11_KEY)
-            .addAugmentation(List11SimpleAugment.class,new List11SimpleAugmentBuilder()
-                    .setAttrStr2("bad").build())
-            .setAttrStr("good");
-        return builder.build();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentListenAugmentTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentListenAugmentTest.java
deleted file mode 100644 (file)
index 0983614..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.bugfix;
-
-import static org.junit.Assert.assertEquals;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.collect.ImmutableSet;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.binding.test.AbstractDataTreeChangeListenerTest;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.complex.from.grouping.ContainerWithUsesBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-
-public class WriteParentListenAugmentTest extends AbstractDataTreeChangeListenerTest {
-
-    private static final String TLL_NAME = "foo";
-
-    private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
-    private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class)
-            .child(TopLevelList.class, TLL_KEY).build();
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> AUGMENT_WILDCARDED_PATH = InstanceIdentifier
-            .builder(Top.class).child(TopLevelList.class).augmentation(TreeComplexUsesAugment.class).build();
-
-    private static final InstanceIdentifier<TreeComplexUsesAugment> AUGMENT_TLL_PATH = InstanceIdentifier
-            .builder(Top.class).child(TopLevelList.class, TLL_KEY).augmentation(TreeComplexUsesAugment.class).build();
-
-    @Override
-    protected Set<YangModuleInfo> getModuleInfos() throws Exception {
-        return ImmutableSet.of(BindingReflections.getModuleInfo(Top.class),
-                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class));
-    }
-
-    @Test
-    public void writeNodeListenAugment() throws Exception {
-
-        DataBroker dataBroker = getDataBroker();
-
-        final TreeComplexUsesAugment treeComplexUsesAugment = treeComplexUsesAugment("one");
-
-        final TestListener<TreeComplexUsesAugment> listener = createListener(OPERATIONAL, AUGMENT_WILDCARDED_PATH,
-                added(AUGMENT_TLL_PATH, treeComplexUsesAugment));
-
-        final WriteTransaction transaction = dataBroker.newWriteOnlyTransaction();
-
-        TopLevelList tll = new TopLevelListBuilder().withKey(TLL_KEY)
-                .addAugmentation(TreeComplexUsesAugment.class, treeComplexUsesAugment).build();
-        transaction.put(OPERATIONAL, TLL_INSTANCE_ID_BA, tll, true);
-        transaction.submit().get(5, TimeUnit.SECONDS);
-
-        listener.verify();
-
-        final WriteTransaction transaction2 = dataBroker.newWriteOnlyTransaction();
-        transaction2.put(OPERATIONAL, AUGMENT_TLL_PATH, treeComplexUsesAugment("two"));
-        transaction2.submit().get(5, TimeUnit.SECONDS);
-
-        TreeComplexUsesAugment readedAug = dataBroker.newReadOnlyTransaction().read(
-                OPERATIONAL, AUGMENT_TLL_PATH).get(5, TimeUnit.SECONDS).get();
-        assertEquals("two", readedAug.getContainerWithUses().getLeafFromGrouping());
-    }
-
-    private static TreeComplexUsesAugment treeComplexUsesAugment(final String value) {
-        return new TreeComplexUsesAugmentBuilder()
-                .setContainerWithUses(new ContainerWithUsesBuilder().setLeafFromGrouping(value).build())
-                .build();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentReadChildTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/WriteParentReadChildTest.java
deleted file mode 100644 (file)
index ccedb89..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.binding.test.bugfix;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableList;
-import java.util.concurrent.TimeUnit;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Builder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Builder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class WriteParentReadChildTest extends AbstractDataServiceTest {
-
-    private static final int LIST11_ID = 1234;
-    private static final String LIST1_NAME = "bar";
-    private static final String TLL_NAME = "foo";
-
-    private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
-    private static final List11Key LIST11_KEY = new List11Key(LIST11_ID);
-    private static final List1Key LIST1_KEY = new List1Key(LIST1_NAME);
-
-    private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class)
-            .child(TopLevelList.class, TLL_KEY).build();
-
-    private static final InstanceIdentifier<List1> LIST1_INSTANCE_ID_BA =
-            TLL_INSTANCE_ID_BA.builder()
-            .augmentation(TllComplexAugment.class).child(List1.class, LIST1_KEY).build();
-
-    private static final InstanceIdentifier<? extends DataObject> LIST11_INSTANCE_ID_BA =
-            LIST1_INSTANCE_ID_BA.child(List11.class, LIST11_KEY);
-
-    /**
-     * The scenario tests writing parent node, which also contains child items
-     * and then reading child directly, by specifying path to the child.
-     * Expected behaviour is child is returned.
-     */
-    @Test
-    public void writeParentReadChild() throws Exception {
-
-        DataBroker dataBroker = testContext.getDataBroker();
-        final WriteTransaction transaction = dataBroker.newWriteOnlyTransaction();
-
-        List11 list11 = new List11Builder().withKey(LIST11_KEY).setAttrStr("primary").build();
-        List1 list1 = new List1Builder().withKey(LIST1_KEY).setList11(ImmutableList.of(list11)).build();
-
-        transaction.put(LogicalDatastoreType.OPERATIONAL, LIST1_INSTANCE_ID_BA, list1, true);
-        transaction.submit().get(5, TimeUnit.SECONDS);
-
-        Optional<List1> readList1 = dataBroker.newReadOnlyTransaction().read(LogicalDatastoreType.OPERATIONAL,
-                LIST1_INSTANCE_ID_BA).get(1000, TimeUnit.MILLISECONDS);
-        assertTrue(readList1.isPresent());
-
-        Optional<? extends DataObject> readList11 = dataBroker.newReadOnlyTransaction().read(
-                LogicalDatastoreType.OPERATIONAL, LIST11_INSTANCE_ID_BA).get(5, TimeUnit.SECONDS);
-        assertNotNull("Readed flow should not be null.",readList11);
-        assertTrue(readList11.isPresent());
-        assertEquals(list11, readList11.get());
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/package-info.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/bugfix/package-info.java
deleted file mode 100644 (file)
index 84559d5..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-/**
- * This testsuite test Hydrogen-level API and regression for most of it,
- *
- * FIXME: this testsuite needs to be refactored to use new DataBroker API,
- * moved to sal-binding-broker-impl and uses models only from sal-test-model.
- *
- */
-package org.opendaylight.controller.sal.binding.test.bugfix;
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/BrokerIntegrationTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/BrokerIntegrationTest.java
deleted file mode 100644 (file)
index 306cfcf..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.connect.dom;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.binding.test.AbstractDataServiceTest;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-public class BrokerIntegrationTest extends AbstractDataServiceTest {
-
-    private static final TopLevelListKey TLL_FOO_KEY = new TopLevelListKey("foo");
-    private static final TopLevelListKey TLL_BAR_KEY = new TopLevelListKey("bar");
-    private static final TopLevelListKey TLL_BAZ_KEY = new TopLevelListKey("baz");
-    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.builder(Top.class).build();
-    private static final InstanceIdentifier<TopLevelList> FOO_PATH = TOP_PATH.child(TopLevelList.class, TLL_FOO_KEY);
-    private static final InstanceIdentifier<TopLevelList> BAR_PATH = TOP_PATH.child(TopLevelList.class, TLL_BAR_KEY);
-    private static final InstanceIdentifier<TopLevelList> BAZ_PATH = TOP_PATH.child(TopLevelList.class, TLL_BAZ_KEY);
-
-    @Test
-    public void simpleModifyOperation() throws Exception {
-
-        DataBroker dataBroker = testContext.getDataBroker();
-        Optional<TopLevelList> tllFoo = dataBroker.newReadOnlyTransaction().read(
-                LogicalDatastoreType.CONFIGURATION, FOO_PATH).get(5, TimeUnit.SECONDS);
-        assertFalse(tllFoo.isPresent());
-
-        TopLevelList tllFooData = createTll(TLL_FOO_KEY);
-
-        final WriteTransaction transaction = dataBroker.newWriteOnlyTransaction();
-        transaction.put(LogicalDatastoreType.CONFIGURATION, FOO_PATH, tllFooData);
-        transaction.submit().get(5, TimeUnit.SECONDS);
-
-        Optional<TopLevelList> readedData = dataBroker.newReadOnlyTransaction().read(
-                LogicalDatastoreType.CONFIGURATION, FOO_PATH).get(5, TimeUnit.SECONDS);
-        assertTrue(readedData.isPresent());
-        assertEquals(tllFooData.key(), readedData.get().key());
-
-        TopLevelList nodeBarData = createTll(TLL_BAR_KEY);
-        TopLevelList nodeBazData = createTll(TLL_BAZ_KEY);
-
-        final WriteTransaction insertMoreTr = dataBroker.newWriteOnlyTransaction();
-        insertMoreTr.put(LogicalDatastoreType.CONFIGURATION, BAR_PATH, nodeBarData);
-        insertMoreTr.put(LogicalDatastoreType.CONFIGURATION, BAZ_PATH, nodeBazData);
-        insertMoreTr.submit().get(5, TimeUnit.SECONDS);
-
-        Optional<Top> top = dataBroker.newReadOnlyTransaction().read(LogicalDatastoreType.CONFIGURATION, TOP_PATH)
-                .get(5, TimeUnit.SECONDS);
-        assertTrue(top.isPresent());
-        assertEquals(3, top.get().getTopLevelList().size());
-
-         // We create transaction no 2
-        final WriteTransaction removalTransaction = dataBroker.newWriteOnlyTransaction();
-
-         // We remove node 1
-        removalTransaction.delete(LogicalDatastoreType.CONFIGURATION, BAR_PATH);
-
-         // We commit transaction
-        removalTransaction.submit().get(5, TimeUnit.SECONDS);
-
-        Optional<TopLevelList> readedData2 = dataBroker.newReadOnlyTransaction().read(
-                LogicalDatastoreType.CONFIGURATION, BAR_PATH).get(5, TimeUnit.SECONDS);
-        assertFalse(readedData2.isPresent());
-    }
-
-    private static TopLevelList createTll(final TopLevelListKey key) {
-        return new TopLevelListBuilder().withKey(key).build();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerMountPointTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerMountPointTest.java
deleted file mode 100644 (file)
index b17c5d3..0000000
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.connect.dom;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
-import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.List11SimpleAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.TllComplexAugment;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.List1Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.aug.grouping.list1.List11Key;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.top.top.level.list.list1.list1._1.Cont;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-
-public class CrossBrokerMountPointTest {
-
-    private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
-    private static final String TLL_NAME = "foo:1";
-
-    private static final TopLevelListKey TLL_KEY = new TopLevelListKey(TLL_NAME);
-
-    private static final Map<QName, Object> TLL_KEY_BI = Collections.<QName, Object>singletonMap(TLL_NAME_QNAME,
-            TLL_NAME);
-
-    private static final InstanceIdentifier<TopLevelList> TLL_INSTANCE_ID_BA = InstanceIdentifier.builder(Top.class) //
-            .child(TopLevelList.class, TLL_KEY).build();
-
-    private static final List1Key LIST1_KEY = new List1Key("foo");
-    private static final List11Key LIST11_KEY = new List11Key(1);
-
-    private static final InstanceIdentifier<Cont> AUG_CONT_ID_BA = TLL_INSTANCE_ID_BA
-            .builder().augmentation(TllComplexAugment.class) //
-            .child(List1.class, LIST1_KEY) //
-            .child(List11.class, LIST11_KEY) //
-            .augmentation(List11SimpleAugment.class) //
-            .child(Cont.class) //
-            .build();
-
-    private static final QName AUG_CONT = QName.create(List11.QNAME,
-            Cont.QNAME.getLocalName());
-
-    private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier TLL_INSTANCE_ID_BI = //
-        YangInstanceIdentifier.builder() //
-            .node(Top.QNAME) //
-            .node(TopLevelList.QNAME) //
-            .nodeWithKey(TopLevelList.QNAME, TLL_KEY_BI) //
-            .build();
-
-    private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier GROUP_STATISTICS_ID_BI =
-        YangInstanceIdentifier
-            .builder(TLL_INSTANCE_ID_BI)
-            .nodeWithKey(QName.create(TllComplexAugment.QNAME, "list1"),
-                    QName.create(TllComplexAugment.QNAME, "attr-str"), LIST1_KEY.getAttrStr())
-            .nodeWithKey(QName.create(TllComplexAugment.QNAME, "list1-1"),
-                    QName.create(TllComplexAugment.QNAME, "attr-int"), LIST11_KEY.getAttrInt())
-            .node(AUG_CONT).build();
-
-    private BindingTestContext testContext;
-    private MountPointService bindingMountPointService;
-    private DOMMountPointService domMountPointService;
-
-    @Before
-    public void setup() {
-        final BindingBrokerTestFactory testFactory = new BindingBrokerTestFactory();
-        testFactory.setExecutor(MoreExecutors.newDirectExecutorService());
-        testFactory.setStartWithParsedSchema(true);
-        testContext = testFactory.getTestContext();
-
-        testContext.start();
-        bindingMountPointService = testContext.getBindingMountPointService();
-        domMountPointService = testContext.getDomMountProviderService();
-
-        // biRpcInvoker = testContext.getDomRpcInvoker();
-        assertNotNull(bindingMountPointService);
-        assertNotNull(domMountPointService);
-
-        // flowService = MessageCapturingFlowService.create(baRpcRegistry);
-    }
-
-    @Test
-    public void testMountPoint() throws ReadFailedException, TimeoutException {
-        final Integer attrIntValue = 500;
-        domMountPointService.createMountPoint(TLL_INSTANCE_ID_BI)
-            .addService(DOMDataBroker.class, new DOMDataBroker() {
-
-                @Override
-                public DOMDataWriteTransaction newWriteOnlyTransaction() {
-                    throw new UnsupportedOperationException();
-                }
-
-                @Override
-                public DOMDataReadWriteTransaction newReadWriteTransaction() {
-                    return  new DOMDataReadWriteTransaction() {
-
-                        @Override
-                        public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
-                                final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-                            if (store == LogicalDatastoreType.OPERATIONAL && path.getLastPathArgument()
-                                    .equals(GROUP_STATISTICS_ID_BI.getLastPathArgument())) {
-
-                                final ContainerNode data = Builders.containerBuilder()
-                                        .withNodeIdentifier(new NodeIdentifier(AUG_CONT))
-                                        .withChild(ImmutableNodes.leafNode(QName.create(AUG_CONT, "attr-int"),
-                                                attrIntValue))
-                                        .build();
-
-                                return Futures.immediateCheckedFuture(Optional.<NormalizedNode<?,?>>of(data));
-                            }
-                            return Futures.immediateFailedCheckedFuture(new ReadFailedException(TLL_NAME,
-                                    new Exception()));
-                        }
-
-                        @Override
-                        public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                                final YangInstanceIdentifier path) {
-                            throw new UnsupportedOperationException();
-                        }
-
-                        @Override
-                        public Object getIdentifier() {
-                            return this;
-                        }
-
-                        @Override
-                        public boolean cancel() {
-                            return false;
-                        }
-
-                        @Override
-                        public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-                            throw new UnsupportedOperationException();
-                        }
-
-                        @Override
-                        public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                                final NormalizedNode<?, ?> data) {
-                            throw new UnsupportedOperationException();
-                        }
-
-                        @Override
-                        public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                                final NormalizedNode<?, ?> data) {
-                            throw new UnsupportedOperationException();
-                        }
-
-                        @Override
-                        public FluentFuture<? extends CommitInfo> commit() {
-                            throw new UnsupportedOperationException();
-                        }
-                    };
-                }
-
-                @Override
-                public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-                    throw new UnsupportedOperationException();
-                }
-
-                @Override
-                public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-                    throw new UnsupportedOperationException();
-                }
-
-                @Override
-                public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-                    return Collections.emptyMap();
-                }
-            }).register();
-
-        final Optional<MountPoint> bindingMountPoint = bindingMountPointService.getMountPoint(TLL_INSTANCE_ID_BA);
-        assertTrue(bindingMountPoint.isPresent());
-
-        final Optional<DataBroker> dataBroker = bindingMountPoint.get().getService(DataBroker.class);
-        assertTrue(dataBroker.isPresent());
-
-        final Optional<Cont> data = dataBroker.get().newReadWriteTransaction().read(LogicalDatastoreType.OPERATIONAL,
-                AUG_CONT_ID_BA).checkedGet(5, TimeUnit.SECONDS);
-        assertTrue(data.isPresent());
-        assertEquals(attrIntValue ,data.get().getAttrInt());
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerRpcTest.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/CrossBrokerRpcTest.java
deleted file mode 100644 (file)
index eb5cf21..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.connect.dom;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
-import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-import org.opendaylight.mdsal.binding.dom.adapter.BindingDOMRpcProviderServiceAdapter;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.OpendaylightOfMigrationTestModelService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class CrossBrokerRpcTest {
-
-    protected RpcProviderRegistry providerRegistry;
-    protected DOMRpcProviderService provisionRegistry;
-    private BindingTestContext testContext;
-    private DOMRpcService biRpcInvoker;
-    private MessageCapturingFlowService knockService;
-
-    public static final TopLevelListKey NODE_A = new TopLevelListKey("a");
-    public static final TopLevelListKey NODE_B = new TopLevelListKey("b");
-    public static final TopLevelListKey NODE_C = new TopLevelListKey("c");
-
-    private static final QName NODE_ID_QNAME = QName.create(TopLevelList.QNAME, "name");
-    private static final QName KNOCK_KNOCK_QNAME = QName.create(KnockKnockOutput.QNAME, "knock-knock");
-    private static final SchemaPath KNOCK_KNOCK_PATH = SchemaPath.create(true, KNOCK_KNOCK_QNAME);
-
-    public static final InstanceIdentifier<Top> NODES_PATH = InstanceIdentifier.builder(Top.class).build();
-    public static final InstanceIdentifier<TopLevelList> BA_NODE_A_ID = NODES_PATH.child(TopLevelList.class, NODE_A);
-    public static final InstanceIdentifier<TopLevelList> BA_NODE_B_ID = NODES_PATH.child(TopLevelList.class, NODE_B);
-    public static final InstanceIdentifier<TopLevelList> BA_NODE_C_ID = NODES_PATH.child(TopLevelList.class, NODE_C);
-
-    public static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_NODE_C_ID =
-            createBINodeIdentifier(NODE_C);
-
-
-    @Before
-    public void setup() throws Exception {
-        BindingBrokerTestFactory testFactory = new BindingBrokerTestFactory();
-        testFactory.setExecutor(MoreExecutors.newDirectExecutorService());
-        testContext = testFactory.getTestContext();
-
-        testContext.setSchemaModuleInfos(ImmutableSet.of(
-                BindingReflections.getModuleInfo(OpendaylightOfMigrationTestModelService.class)));
-        testContext.start();
-        providerRegistry = testContext.getBindingRpcRegistry();
-        provisionRegistry = testContext.getDomRpcRegistry();
-        biRpcInvoker = testContext.getDomRpcInvoker();
-        assertNotNull(providerRegistry);
-        assertNotNull(provisionRegistry);
-
-        knockService = MessageCapturingFlowService.create(providerRegistry);
-
-    }
-
-    @After
-    public void teardown() {
-        testContext.close();
-    }
-
-    @Test
-    public void testBindingRpcShortcutRegisteredViaLegacyAPI()
-            throws InterruptedException, ExecutionException, TimeoutException {
-        final ListenableFuture<RpcResult<KnockKnockOutput>> knockResult = knockResult(true, "open");
-        knockService.registerPath(TestContext.class, BA_NODE_A_ID).setKnockKnockResult(knockResult);
-
-        OpendaylightOfMigrationTestModelService baKnockInvoker =
-                providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
-
-        final KnockKnockInput knockInput = knockKnock(BA_NODE_A_ID).setQuestion("Who's there?").build();
-        ListenableFuture<RpcResult<KnockKnockOutput>> future = baKnockInvoker.knockKnock(knockInput);
-
-        final RpcResult<KnockKnockOutput> rpcResult = future.get(5, TimeUnit.SECONDS);
-
-        assertEquals(knockResult.get().getResult().getClass(), rpcResult.getResult().getClass());
-        assertSame(knockResult.get().getResult(), rpcResult.getResult());
-        assertSame(knockInput, knockService.getReceivedKnocks().get(BA_NODE_A_ID).iterator().next());
-    }
-
-    @Test
-    public void testBindingRpcShortcutRegisteredViaMdsalAPI()
-            throws InterruptedException, ExecutionException, TimeoutException {
-        final ListenableFuture<RpcResult<KnockKnockOutput>> knockResult = knockResult(true, "open");
-
-        BindingDOMRpcProviderServiceAdapter mdsalServiceRegistry = new BindingDOMRpcProviderServiceAdapter(
-                testContext.getDelegateDomRouter().getRpcProviderService(), testContext.getCodec());
-
-        final Multimap<InstanceIdentifier<?>, KnockKnockInput> receivedKnocks = HashMultimap.create();
-        mdsalServiceRegistry.registerRpcImplementation(OpendaylightOfMigrationTestModelService.class,
-            (OpendaylightOfMigrationTestModelService) input -> {
-                receivedKnocks.put(input.getKnockerId(), input);
-                return knockResult;
-            }, ImmutableSet.of(BA_NODE_A_ID));
-
-        OpendaylightOfMigrationTestModelService baKnockInvoker =
-                providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
-
-        final KnockKnockInput knockInput = knockKnock(BA_NODE_A_ID).setQuestion("Who's there?").build();
-        Future<RpcResult<KnockKnockOutput>> future = baKnockInvoker.knockKnock(knockInput);
-
-        final RpcResult<KnockKnockOutput> rpcResult = future.get(5, TimeUnit.SECONDS);
-
-        assertEquals(knockResult.get().getResult().getClass(), rpcResult.getResult().getClass());
-        assertSame(knockResult.get().getResult(), rpcResult.getResult());
-        assertSame(knockInput, receivedKnocks.get(BA_NODE_A_ID).iterator().next());
-    }
-
-    @Test
-    public void bindingRoutedRpcProvider_DomInvokerTest() throws Exception {
-
-        knockService//
-                .registerPath(TestContext.class, BA_NODE_A_ID) //
-                .registerPath(TestContext.class, BA_NODE_B_ID) //
-                .setKnockKnockResult(knockResult(true, "open"));
-
-        OpendaylightOfMigrationTestModelService baKnockInvoker =
-                providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
-        assertNotSame(knockService, baKnockInvoker);
-
-        KnockKnockInput knockKnockA = knockKnock(BA_NODE_A_ID) //
-                .setQuestion("who's there?").build();
-
-        ContainerNode knockKnockDom = toDomRpc(KNOCK_KNOCK_QNAME, knockKnockA);
-        assertNotNull(knockKnockDom);
-        DOMRpcResult domResult = biRpcInvoker.invokeRpc(KNOCK_KNOCK_PATH, knockKnockDom).get();
-        assertNotNull(domResult);
-        assertNotNull("DOM result is successful.", domResult.getResult());
-        assertTrue("Bidning Add Flow RPC was captured.", knockService.getReceivedKnocks().containsKey(BA_NODE_A_ID));
-        assertEquals(knockKnockA, knockService.getReceivedKnocks().get(BA_NODE_A_ID).iterator().next());
-    }
-
-    @Test
-    public void bindingRpcInvoker_DomRoutedProviderTest() throws Exception {
-        KnockKnockOutputBuilder builder = new KnockKnockOutputBuilder();
-        builder.setAnswer("open");
-        final KnockKnockOutput output = builder.build();
-
-        provisionRegistry.registerRpcImplementation((rpc, input) -> {
-            ContainerNode result = testContext.getCodec().getCodecFactory().toNormalizedNodeRpcData(output);
-            return Futures.immediateCheckedFuture(new DefaultDOMRpcResult(result));
-        }, DOMRpcIdentifier.create(KNOCK_KNOCK_PATH, BI_NODE_C_ID));
-
-        OpendaylightOfMigrationTestModelService baKnockInvoker =
-                providerRegistry.getRpcService(OpendaylightOfMigrationTestModelService.class);
-        Future<RpcResult<KnockKnockOutput>> baResult = baKnockInvoker.knockKnock(knockKnock(BA_NODE_C_ID)
-            .setQuestion("Who's there?").build());
-        assertNotNull(baResult);
-        assertEquals(output, baResult.get().getResult());
-    }
-
-    private ContainerNode toDomRpcInput(final DataObject addFlowA) {
-        return testContext.getCodec().getCodecFactory().toNormalizedNodeRpcData(addFlowA);
-    }
-
-    private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBINodeIdentifier(
-            final TopLevelListKey listKey) {
-        return org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.builder().node(Top.QNAME)
-                .node(TopLevelList.QNAME)
-                .nodeWithKey(TopLevelList.QNAME, NODE_ID_QNAME, listKey.getName()).build();
-    }
-
-    private static ListenableFuture<RpcResult<KnockKnockOutput>> knockResult(final boolean success,
-            final String answer) {
-        KnockKnockOutput output = new KnockKnockOutputBuilder().setAnswer(answer).build();
-        RpcResult<KnockKnockOutput> result = RpcResultBuilder.<KnockKnockOutput>status(success).withResult(output)
-                .build();
-        return Futures.immediateFuture(result);
-    }
-
-    private static KnockKnockInputBuilder knockKnock(final InstanceIdentifier<TopLevelList> listId) {
-        KnockKnockInputBuilder builder = new KnockKnockInputBuilder();
-        builder.setKnockerId(listId);
-        return builder;
-    }
-
-    private ContainerNode toDomRpc(final QName rpcName, final KnockKnockInput knockInput) {
-        return toDomRpcInput(knockInput);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/DOMRpcServiceTestBugfix560.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/DOMRpcServiceTestBugfix560.java
deleted file mode 100644 (file)
index 8117134..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.connect.dom;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutionException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.MountPoint;
-import org.opendaylight.controller.md.sal.binding.api.MountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.test.util.BindingBrokerTestFactory;
-import org.opendaylight.controller.sal.binding.test.util.BindingTestContext;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.OpendaylightTestRpcServiceService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.StatementParserMode;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
-import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
-
-/**
- * Test case for reported bug 560
- *
- * @author Lukas Sedlak
- * @see <a
- *      href="https://bugs.opendaylight.org/show_bug.cgi?id=560">https://bugs.opendaylight.org/show_bug.cgi?id=560</a>
- */
-public class DOMRpcServiceTestBugfix560 {
-
-    private final static String RPC_SERVICE_NAMESPACE = "urn:opendaylight:params:xml:ns:yang:controller:md:sal:test:bi:ba:rpcservice";
-    private final static String REVISION_DATE = "2014-07-01";
-    private final static QName RPC_NAME = QName.create(RPC_SERVICE_NAMESPACE,
-            REVISION_DATE, "rock-the-house");
-
-    private static final String TLL_NAME = "id";
-    private static final QName TLL_NAME_QNAME = QName.create(TopLevelList.QNAME, "name");
-
-    private static final InstanceIdentifier<TopLevelList> BA_MOUNT_ID = createBATllIdentifier(TLL_NAME);
-    private static final org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier BI_MOUNT_ID = createBITllIdentifier(TLL_NAME);
-
-    private BindingTestContext testContext;
-    private DOMMountPointService domMountPointService;
-    private MountPointService bindingMountPointService;
-    private SchemaContext schemaContext;
-
-    /**
-     * @throws java.lang.Exception
-     */
-    @Before
-    public void setUp() throws Exception {
-        final BindingBrokerTestFactory testFactory = new BindingBrokerTestFactory();
-        testFactory.setExecutor(MoreExecutors.newDirectExecutorService());
-        testFactory.setStartWithParsedSchema(true);
-        testContext = testFactory.getTestContext();
-
-        testContext.start();
-        domMountPointService = testContext.getDomMountProviderService();
-        bindingMountPointService = testContext.getBindingMountPointService();
-        assertNotNull(domMountPointService);
-
-        final YangModuleInfo moduleInfo = BindingReflections.getModuleInfo(OpendaylightTestRpcServiceService.class);
-        assertNotNull(moduleInfo);
-
-        schemaContext = YangParserTestUtils.parseYangSources(StatementParserMode.DEFAULT_MODE, null,
-            YangTextSchemaSource.delegateForByteSource(RevisionSourceIdentifier.create(
-                    moduleInfo.getName().getLocalName(), moduleInfo.getName().getRevision()),
-                moduleInfo.getYangTextByteSource()));
-    }
-
-    private static org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier createBITllIdentifier(
-            final String mount) {
-        return org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier
-                .builder().node(Top.QNAME)
-                .node(TopLevelList.QNAME)
-                .nodeWithKey(TopLevelList.QNAME, TLL_NAME_QNAME, mount)
-                .build();
-    }
-
-    private static InstanceIdentifier<TopLevelList> createBATllIdentifier(
-            final String mount) {
-        return InstanceIdentifier.builder(Top.class)
-                .child(TopLevelList.class, new TopLevelListKey(mount)).build();
-    }
-
-    @Test
-    public void test() throws ExecutionException, InterruptedException {
-        // FIXME: This is made to only make sure instance identifier codec for path is instantiated.
-        domMountPointService
-                .createMountPoint(BI_MOUNT_ID).addService(DOMRpcService.class, new DOMRpcService() {
-
-                    @Override
-                    public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(final T arg0) {
-                        // TODO Auto-generated method stub
-                        return null;
-                    }
-
-                    @Override
-                    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath arg0, final NormalizedNode<?, ?> arg1) {
-                        final DOMRpcResult result = new DefaultDOMRpcResult((NormalizedNode<?, ?>) null);
-                        return Futures.immediateCheckedFuture(result);
-                    }
-                }).register();
-
-        final Optional<MountPoint> mountInstance = bindingMountPointService.getMountPoint(BA_MOUNT_ID);
-        assertTrue(mountInstance.isPresent());
-
-        final Optional<RpcConsumerRegistry> rpcRegistry = mountInstance.get().getService(RpcConsumerRegistry.class);
-        assertTrue(rpcRegistry.isPresent());
-        final OpendaylightTestRpcServiceService rpcService = rpcRegistry.get()
-                .getRpcService(OpendaylightTestRpcServiceService.class);
-        assertNotNull(rpcService);
-
-        try {
-            final ListenableFuture<RpcResult<RockTheHouseOutput>> result = rpcService
-                    .rockTheHouse(new RockTheHouseInputBuilder().build());
-            assertTrue(result.get().isSuccessful());
-        } catch (final IllegalStateException ex) {
-            fail("OpendaylightTestRpcServiceService class doesn't contain rockTheHouse method!");
-        }
-    }
-
-    /**
-     * @throws java.lang.Exception
-     */
-    @After
-    public void teardown() {
-        testContext.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/MessageCapturingFlowService.java b/opendaylight/md-sal/sal-binding-dom-it/src/test/java/org/opendaylight/controller/sal/binding/test/connect/dom/MessageCapturingFlowService.java
deleted file mode 100644 (file)
index 9987bff..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.binding.test.connect.dom;
-
-import static org.junit.Assert.assertNotNull;
-
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.KnockKnockOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.of.migration.test.model.rev150210.OpendaylightOfMigrationTestModelService;
-import org.opendaylight.yangtools.yang.binding.BaseIdentity;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-
-public class MessageCapturingFlowService implements OpendaylightOfMigrationTestModelService, AutoCloseable {
-
-    private ListenableFuture<RpcResult<KnockKnockOutput>> knockKnockResult;
-
-    private final Multimap<InstanceIdentifier<?>, KnockKnockInput> receivedKnocks = HashMultimap.create();
-    private RoutedRpcRegistration<OpendaylightOfMigrationTestModelService> registration;
-
-    public ListenableFuture<RpcResult<KnockKnockOutput>> getKnockKnockResult() {
-        return knockKnockResult;
-    }
-
-    public MessageCapturingFlowService setKnockKnockResult(final ListenableFuture<RpcResult<KnockKnockOutput>> kkOutput) {
-        this.knockKnockResult = kkOutput;
-        return this;
-    }
-
-    public Multimap<InstanceIdentifier<?>, KnockKnockInput> getReceivedKnocks() {
-        return receivedKnocks;
-    }
-
-    public MessageCapturingFlowService registerTo(final RpcProviderRegistry registry) {
-        registration = registry.addRoutedRpcImplementation(OpendaylightOfMigrationTestModelService.class, this);
-        assertNotNull(registration);
-        return this;
-    }
-
-    @Override
-    public void close() {
-        registration.close();
-    }
-
-    public MessageCapturingFlowService registerPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
-        registration.registerPath(context, path);
-        return this;
-    }
-
-    public MessageCapturingFlowService unregisterPath(final Class<? extends BaseIdentity> context, final InstanceIdentifier<?> path) {
-        registration.unregisterPath(context, path);
-        return this;
-    }
-
-    public static MessageCapturingFlowService create() {
-        return new MessageCapturingFlowService();
-    }
-
-    public static MessageCapturingFlowService create(final RpcProviderRegistry registry) {
-        MessageCapturingFlowService ret = new MessageCapturingFlowService();
-        ret.registerTo(registry);
-        return ret;
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<KnockKnockOutput>> knockKnock(final KnockKnockInput input) {
-        receivedKnocks.put(input.getKnockerId(), input);
-        return knockKnockResult;
-    }
-
-
-}
index 7b0913c0968991706e4b0dea7b812d60cb1308aa..dd32609d307c32838291646b34e4efdcb5dbb6d7 100644 (file)
@@ -4,7 +4,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-it-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../mdsal-it-parent</relativePath>
   </parent>
   <artifactId>sal-binding-it</artifactId>
@@ -12,7 +12,7 @@
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-mdsal</artifactId>
+      <artifactId>features-controller</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
     </dependency>
index c073619ad16e9f22ff019c4ccadb1f7397e81af7..7a55549f4f67644f4b674af60c216ad95b450f73 100644 (file)
@@ -18,10 +18,12 @@ import org.ops4j.pax.exam.options.DefaultCompositeOption;
 import org.ops4j.pax.exam.util.PathUtils;
 
 /**
+ * Helper utility for running IT tests.
+ *
  * @deprecated Use config-it and/or mdsal-it instead.
  */
-@Deprecated
-public class TestHelper {
+@Deprecated(forRemoval = true)
+public final class TestHelper {
 
     public static final String CONTROLLER = "org.opendaylight.controller";
     public static final String MDSAL = "org.opendaylight.mdsal";
@@ -30,19 +32,23 @@ public class TestHelper {
     public static final String CONTROLLER_MODELS = "org.opendaylight.controller.model";
     public static final String MDSAL_MODELS = "org.opendaylight.mdsal.model";
 
+    private TestHelper() {
+
+    }
+
     public static Option mdSalCoreBundles() {
-        return new DefaultCompositeOption( //
-                mavenBundle(YANGTOOLS, "concepts").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "util").versionAsInProject(), // //
-                mavenBundle(MDSAL, "yang-binding").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-common").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "object-cache-api").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "object-cache-guava").versionAsInProject(), // //
-                mavenBundle("tech.pantheon", "triemap").versionAsInProject(), // //
-                mavenBundle(CONTROLLER, "sal-common-api").versionAsInProject(), // //
-                mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
-
-                mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
+        return new DefaultCompositeOption(
+                mavenBundle(YANGTOOLS, "concepts").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "util").versionAsInProject(),
+                mavenBundle(MDSAL, "yang-binding").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-common").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "object-cache-api").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "object-cache-guava").versionAsInProject(),
+                mavenBundle("tech.pantheon", "triemap").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-common-api").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(),
+
+                mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(),
                 mavenBundle("com.google.guava", "guava").versionAsInProject(),
                 mavenBundle("com.github.romix", "java-concurrent-hash-trie-map").versionAsInProject()
         );
@@ -50,20 +56,20 @@ public class TestHelper {
 
     public static Option configMinumumBundles() {
         return new DefaultCompositeOption(
-                mavenBundle(CONTROLLER, "config-api").versionAsInProject(), // //
+                mavenBundle(CONTROLLER, "config-api").versionAsInProject(),
                 bindingAwareSalBundles(),
                 mavenBundle("commons-codec", "commons-codec").versionAsInProject(),
 
                 systemPackages("sun.nio.ch", "sun.misc"),
 
-                mavenBundle(CONTROLLER, "config-manager").versionAsInProject(), // //
-                mavenBundle(CONTROLLER, "config-util").versionAsInProject(), // //
-                mavenBundle("commons-io", "commons-io").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "config-manager-facade-xml").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "yang-jmx-generator").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "config-persister-api").versionAsInProject(), //
+                mavenBundle(CONTROLLER, "config-manager").versionAsInProject(),
+                mavenBundle(CONTROLLER, "config-util").versionAsInProject(),
+                mavenBundle("commons-io", "commons-io").versionAsInProject(),
+                mavenBundle(CONTROLLER, "config-manager-facade-xml").versionAsInProject(),
+                mavenBundle(CONTROLLER, "yang-jmx-generator").versionAsInProject(),
+                mavenBundle(CONTROLLER, "config-persister-api").versionAsInProject(),
 
-                mavenBundle(CONTROLLER, "config-persister-impl").versionAsInProject(), //
+                mavenBundle(CONTROLLER, "config-persister-impl").versionAsInProject(),
 
                 mavenBundle("org.apache.servicemix.bundles", "org.apache.servicemix.bundles.xerces", "2.11.0_1"),
                 mavenBundle("org.eclipse.birt.runtime.3_7_1", "org.apache.xml.resolver", "1.2.0"),
@@ -75,93 +81,90 @@ public class TestHelper {
     }
 
     public static Option bindingAwareSalBundles() {
-        return new DefaultCompositeOption( //
+        return new DefaultCompositeOption(
                 mdSalCoreBundles(),
 
-                mavenBundle("org.javassist", "javassist").versionAsInProject(), // //
-
-                mavenBundle(YANGTOOLS, "yang-data-api").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-data-util").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-data-impl").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-model-api").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-model-util").versionAsInProject(), // //
-                mavenBundle(YANGTOOLS, "yang-parser-api").versionAsInProject(), //
-                mavenBundle(YANGTOOLS, "yang-parser-impl").versionAsInProject(), //
-                mavenBundle(MDSAL, "mdsal-binding-generator-api").versionAsInProject(), //
-                mavenBundle(MDSAL, "mdsal-binding-generator-util").versionAsInProject(), //
+                mavenBundle("org.javassist", "javassist").versionAsInProject(),
+
+                mavenBundle(YANGTOOLS, "yang-data-api").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-data-util").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-data-impl").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-model-api").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-model-util").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-parser-api").versionAsInProject(),
+                mavenBundle(YANGTOOLS, "yang-parser-impl").versionAsInProject(),
+                mavenBundle(MDSAL, "mdsal-binding-generator-api").versionAsInProject(),
+                mavenBundle(MDSAL, "mdsal-binding-generator-util").versionAsInProject(),
                 mavenBundle(MDSAL, "mdsal-binding-generator-impl").versionAsInProject(),
                 mavenBundle(MDSAL, "mdsal-binding-dom-codec").versionAsInProject(),
-                mavenBundle("org.antlr", "antlr4-runtime").versionAsInProject(), // //
+                mavenBundle("org.antlr", "antlr4-runtime").versionAsInProject(),
 
-                mavenBundle(CONTROLLER, "sal-binding-util").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(), // //
+                mavenBundle(CONTROLLER, "sal-binding-util").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-common-util").versionAsInProject(),
 
 
-                mavenBundle(CONTROLLER, "sal-core-api").versionAsInProject().update(), //
-                mavenBundle(CONTROLLER, "sal-binding-api").versionAsInProject(), // //
+                mavenBundle(CONTROLLER, "sal-core-api").versionAsInProject().update(),
+                mavenBundle(CONTROLLER, "sal-binding-api").versionAsInProject(),
 
-                mavenBundle("com.lmax", "disruptor").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(), // //
-                mavenBundle(CONTROLLER, "sal-dom-config").versionAsInProject(), // //
+                mavenBundle("com.lmax", "disruptor").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-broker-impl").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-dom-config").versionAsInProject(),
 
-                mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(), //
-                mavenBundle(CONTROLLER, "sal-dom-broker-config").versionAsInProject(), // //
+                mavenBundle(CONTROLLER, "sal-inmemory-datastore").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-dom-broker-config").versionAsInProject(),
 
-                mavenBundle(CONTROLLER, "sal-core-spi").versionAsInProject().update(), //
+                mavenBundle(CONTROLLER, "sal-core-spi").versionAsInProject().update(),
 
-                mavenBundle(CONTROLLER, "sal-binding-broker-impl").versionAsInProject(), // //
-                mavenBundle(CONTROLLER, "sal-binding-config").versionAsInProject(), //
+                mavenBundle(CONTROLLER, "sal-binding-broker-impl").versionAsInProject(),
+                mavenBundle(CONTROLLER, "sal-binding-config").versionAsInProject(),
 
-                systemProperty("netconf.config.persister.active").value("1"), //
+                systemProperty("netconf.config.persister.active").value("1"),
                 systemProperty("netconf.config.persister.1.storageAdapterClass").value(
-                        "org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter"), //
+                        "org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter"),
                 systemProperty("netconf.config.persister.1.properties.fileStorage")
-                        .value(PathUtils.getBaseDir() + "/src/test/resources/controller.xml"), //
-                systemProperty("netconf.config.persister.1.properties.numberOfBackups").value("1") //
+                        .value(PathUtils.getBaseDir() + "/src/test/resources/controller.xml"),
+                systemProperty("netconf.config.persister.1.properties.numberOfBackups").value("1")
 
         );
-
     }
 
     public static Option bindingIndependentSalBundles() {
         return new DefaultCompositeOption(
 
         );
-
     }
 
     public static Option protocolFrameworkBundles() {
         return new DefaultCompositeOption(
-            mavenBundle("io.netty", "netty-common").versionAsInProject(), //
-            mavenBundle("io.netty", "netty-buffer").versionAsInProject(), //
-            mavenBundle("io.netty", "netty-handler").versionAsInProject(), //
-            mavenBundle("io.netty", "netty-codec").versionAsInProject(), //
-            mavenBundle("io.netty", "netty-transport").versionAsInProject(), //
-            mavenBundle(CONTROLLER, "netty-config-api").versionAsInProject(), //
+            mavenBundle("io.netty", "netty-common").versionAsInProject(),
+            mavenBundle("io.netty", "netty-buffer").versionAsInProject(),
+            mavenBundle("io.netty", "netty-handler").versionAsInProject(),
+            mavenBundle("io.netty", "netty-codec").versionAsInProject(),
+            mavenBundle("io.netty", "netty-transport").versionAsInProject(),
+            mavenBundle(CONTROLLER, "netty-config-api").versionAsInProject(),
             mavenBundle(CONTROLLER, "protocol-framework").versionAsInProject()
         );
-
     }
 
     public static Option flowCapableModelBundles() {
-        return new DefaultCompositeOption( //
-                mavenBundle(CONTROLLER_MODELS, "model-inventory").versionAsInProject() //
+        return new DefaultCompositeOption(
+                mavenBundle(CONTROLLER_MODELS, "model-inventory").versionAsInProject()
         );
-
     }
 
     /**
+     * Return an option containing models for testing purposes.
+     *
      * @return option containing models for testing purposes
      */
     public static Option salTestModelBundles() {
-        return new DefaultCompositeOption( //
+        return new DefaultCompositeOption(
                 mavenBundle(CONTROLLER, "sal-test-model").versionAsInProject()
         );
-
     }
 
     public static Option baseModelBundles() {
-        return new DefaultCompositeOption( //
+        return new DefaultCompositeOption(
                 mavenBundle(MDSAL + ".model", "yang-ext").versionAsInProject(),
                 mavenBundle(MDSAL_MODELS, "ietf-type-util").versionAsInProject(),
                 mavenBundle(MDSAL_MODELS, "ietf-inet-types").versionAsInProject(),
index 3ecaf7eb91cca6d160239b6a9e3a5918954187f2..0996dae3b938a44d76314d3f7392128c28ffba6d 100644 (file)
@@ -20,7 +20,7 @@ import org.ops4j.pax.exam.options.MavenUrlReference;
 public abstract class AbstractIT extends AbstractMdsalTestBase {
     @Override
     public MavenUrlReference getFeatureRepo() {
-        return maven().groupId("org.opendaylight.controller").artifactId("features-mdsal").classifier("features")
+        return maven().groupId("org.opendaylight.controller").artifactId("features-controller").classifier("features")
                 .type("xml").versionAsInProject();
     }
 
@@ -33,7 +33,6 @@ public abstract class AbstractIT extends AbstractMdsalTestBase {
     protected Option[] getAdditionalOptions() {
         return new Option[] {
                 mavenBundle("org.opendaylight.controller", "sal-test-model").versionAsInProject(),
-                mavenBundle("net.bytebuddy", "byte-buddy").versionAsInProject(),
         };
     }
 }
diff --git a/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractTestProvider.java b/opendaylight/md-sal/sal-binding-it/src/test/java/org/opendaylight/controller/test/sal/binding/it/AbstractTestProvider.java
deleted file mode 100644 (file)
index 6871a0a..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.test.sal.binding.it;
-
-import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
-
-public abstract class AbstractTestProvider implements BindingAwareProvider {
-
-
-}
index e10eb0c3a668e52d0651aa20fd1c24f9de64654c..833c710e328b9dccdf09ff42f791e446ffd1bb49 100644 (file)
@@ -12,13 +12,13 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import javax.inject.Inject;
 import org.junit.Test;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.WriteTransaction;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.Lists;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
@@ -28,7 +28,7 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.ops4j.pax.exam.util.Filter;
 
 /**
- * covers creating, reading and deleting of an item in dataStore
+ * Covers creating, reading and deleting of an item in dataStore.
  */
 public class DataServiceIT extends AbstractIT {
     @Inject
@@ -36,12 +36,8 @@ public class DataServiceIT extends AbstractIT {
     DataBroker dataBroker;
 
     /**
-     *
-     * Ignored this, because classes here are constructed from
-     * very different class loader as MD-SAL is run into,
+     * Ignored this, because classes here are constructed from very different class loader as MD-SAL is run into,
      * this is code is run from different classloader.
-     *
-     * @throws Exception
      */
     @Test
     public void test() throws Exception {
@@ -55,7 +51,7 @@ public class DataServiceIT extends AbstractIT {
         UnorderedList nodeData1 = createNode("0");
 
         transaction.put(LogicalDatastoreType.OPERATIONAL, node1, nodeData1);
-        transaction.submit().get(5, TimeUnit.SECONDS);
+        transaction.commit().get(5, TimeUnit.SECONDS);
 
         Optional<UnorderedList> readedData = dataBroker.newReadOnlyTransaction().read(LogicalDatastoreType.OPERATIONAL,
                 node1).get(5, TimeUnit.SECONDS);
@@ -67,14 +63,13 @@ public class DataServiceIT extends AbstractIT {
 
         transaction2.delete(LogicalDatastoreType.OPERATIONAL, node1);
 
-        transaction2.submit().get(5, TimeUnit.SECONDS);
+        transaction2.commit().get(5, TimeUnit.SECONDS);
 
         Optional<UnorderedList> readedData2 = dataBroker.newReadOnlyTransaction().read(LogicalDatastoreType.OPERATIONAL,
                 node1).get(5, TimeUnit.SECONDS);
         assertFalse(readedData2.isPresent());
     }
 
-
     private static InstanceIdentifier<UnorderedList> createNodeRef(final String string) {
         UnorderedListKey key = new UnorderedListKey(string);
         return InstanceIdentifier.builder(Lists.class).child(UnorderedContainer.class).child(UnorderedList.class, key)
index 468fc1b1fc834c60f3e924bc0dca5142853f33d5..163163bf2878ced066a12c16672c47d83324e96c 100644 (file)
@@ -10,15 +10,13 @@ package org.opendaylight.controller.test.sal.binding.it;
 import static org.junit.Assert.assertEquals;
 
 import java.util.ArrayList;
-import java.util.List;
 import javax.inject.Inject;
 import org.junit.Test;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OpendaylightTestNotificationListener;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.opendaylight.mdsal.binding.api.NotificationService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.notification.rev150205.OutOfPixieDustNotificationBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.NotificationListener;
+import org.opendaylight.yangtools.yang.common.Uint16;
 import org.ops4j.pax.exam.util.Filter;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -27,117 +25,78 @@ import org.slf4j.LoggerFactory;
  * covers registering of notification listener, publishing of notification and receiving of notification.
  */
 public class NotificationIT extends AbstractIT {
+    private static final Logger LOG = LoggerFactory.getLogger(NotificationIT.class);
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(NotificationIT.class);
+    @Inject
+    @Filter(timeout = 120 * 1000)
+    NotificationService notificationService;
 
     @Inject
     @Filter(timeout = 120 * 1000)
-    NotificationProviderService notificationService;
+    NotificationPublishService notificationPublishService;
 
     /**
-     * test of delivering of notification
-     * @throws Exception
+     * Test of delivering of notification.
      */
     @Test
     public void notificationTest() throws Exception {
-        NotificationTestListener listener1 = new NotificationTestListener();
-        ListenerRegistration<NotificationListener> listener1Reg =
-                notificationService.registerNotificationListener(listener1);
-
-        LOG.info("The notification of type FlowAdded with cookie ID 0 is created. The "
-                + "delay 100ms to make sure that the notification was delivered to "
-                + "listener.");
-        notificationService.publish(noDustNotification("rainy day", 42));
-        Thread.sleep(100);
-
-        /**
-         * Check that one notification was delivered and has correct cookie.
-         *
-         */
-        assertEquals(1, listener1.notificationBag.size());
-        assertEquals("rainy day", listener1.notificationBag.get(0).getReason());
-        assertEquals(42, listener1.notificationBag.get(0).getDaysTillNewDust().intValue());
-
-        LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
+        final var bag1 = new ArrayList<OutOfPixieDustNotification>();
+        try (var reg1 = notificationService.registerListener(OutOfPixieDustNotification.class, bag1::add)) {
+            LOG.info("""
+                The notification of type FlowAdded with cookie ID 0 is created. The\s\
+                delay 100ms to make sure that the notification was delivered to\s\
+                listener.""");
+            notificationPublishService.putNotification(noDustNotification("rainy day", 42));
+            Thread.sleep(100);
+
+            // Check that one notification was delivered and has correct cookie.
+            assertEquals(1, bag1.size());
+            assertEquals("rainy day", bag1.get(0).getReason());
+            assertEquals(42, bag1.get(0).getDaysTillNewDust().intValue());
+
+            LOG.info("The registration of the Consumer 2. SalFlowListener is registered "
                 + "registered as notification listener.");
 
-        NotificationTestListener listener2 = new NotificationTestListener();
-        final ListenerRegistration<NotificationListener> listener2Reg =
-                notificationService.registerNotificationListener(listener2);
-
-        LOG.info("3 notifications are published");
-        notificationService.publish(noDustNotification("rainy day", 5));
-        notificationService.publish(noDustNotification("rainy day", 10));
-        notificationService.publish(noDustNotification("tax collector", 2));
-
-        /**
-         * The delay 100ms to make sure that the notifications were delivered to
-         * listeners.
-         */
-        Thread.sleep(100);
-
-        /**
-         * Check that 3 notification was delivered to both listeners (first one
-         * received 4 in total, second 3 in total).
-         *
-         */
-        assertEquals(4, listener1.notificationBag.size());
-        assertEquals(3, listener2.notificationBag.size());
-
-        /**
-         * The second listener is closed (unregistered)
-         *
-         */
-        listener2Reg.close();
-
-        LOG.info("The notification 5 is published");
-        notificationService.publish(noDustNotification("entomologist hunt", 10));
-
-        /**
-         * The delay 100ms to make sure that the notification was delivered to
-         * listener.
-         */
-        Thread.sleep(100);
-
-        /**
-         * Check that first consumer received 5 notifications in total, second
-         * consumer received only three. Last notification was never received by
-         * second consumer because its listener was unregistered.
-         *
-         */
-        assertEquals(5, listener1.notificationBag.size());
-        assertEquals(3, listener2.notificationBag.size());
+            final var bag2 = new ArrayList<OutOfPixieDustNotification>();
+            try (var reg2 = notificationService.registerListener(OutOfPixieDustNotification.class, bag2::add)) {
+                LOG.info("3 notifications are published");
+                notificationPublishService.putNotification(noDustNotification("rainy day", 5));
+                notificationPublishService.putNotification(noDustNotification("rainy day", 10));
+                notificationPublishService.putNotification(noDustNotification("tax collector", 2));
+
+                // The delay 100ms to make sure that the notifications were delivered to listeners.
+                Thread.sleep(100);
+
+                // Check that 3 notification was delivered to both listeners (first one  received 4 in total, second 3
+                // in total).
+                assertEquals(4, bag1.size());
+                assertEquals(3, bag2.size());
 
+                // The second listener is closed (unregistered)
+                reg2.close();
+
+                LOG.info("The notification 5 is published");
+                notificationPublishService.putNotification(noDustNotification("entomologist hunt", 10));
+
+                // The delay 100ms to make sure that the notification was delivered to listener.
+                Thread.sleep(100);
+
+                // Check that first consumer received 5 notifications in total, second  consumer received only three.
+                // Last notification was never received by second consumer because its listener was unregistered.
+                assertEquals(5, bag1.size());
+                assertEquals(3, bag2.size());
+            }
+        }
     }
 
     /**
-     * Creates instance of the type OutOfPixieDustNotification. It is
-     * used only for testing purpose.
+     * Creates instance of the type OutOfPixieDustNotification. It is used only for testing purpose.
      *
-     * @param reason
-     * @param days
      * @return instance of the type OutOfPixieDustNotification
      */
-    public static OutOfPixieDustNotification noDustNotification(String reason, int days) {
+    public static OutOfPixieDustNotification noDustNotification(final String reason, final int days) {
         OutOfPixieDustNotificationBuilder ret = new OutOfPixieDustNotificationBuilder();
-        ret.setReason(reason).setDaysTillNewDust(days);
+        ret.setReason(reason).setDaysTillNewDust(Uint16.valueOf(days));
         return ret.build();
     }
-
-    /**
-     * Implements
-     * {@link OpendaylightTestNotificationListener} and contains attributes which keep lists of objects of
-     * the type {@link OutOfFairyDustNotification}.
-     */
-    public static class NotificationTestListener implements OpendaylightTestNotificationListener {
-
-        List<OutOfPixieDustNotification> notificationBag = new ArrayList<>();
-
-        @Override
-        public void onOutOfPixieDustNotification(OutOfPixieDustNotification arg0) {
-            notificationBag.add(arg0);
-        }
-
-    }
 }
index 7bd28020eaea6190e8a9963bb7bafb08829f20bc..5cdde319cd49d736e812dde3be3cebaa2624b518 100644 (file)
@@ -9,27 +9,28 @@ package org.opendaylight.controller.test.sal.binding.it;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertSame;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import com.google.common.util.concurrent.Futures;
+import java.util.Set;
 import javax.inject.Inject;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.OpendaylightTestRoutedRpcService;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRoute;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.RoutedSimpleRouteOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.rpc.routing.rev140701.TestContext;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.Lists;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.UnorderedContainer;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.store.rev140422.lists.unordered.container.UnorderedListKey;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.ops4j.pax.exam.util.Filter;
@@ -37,148 +38,128 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * covers routed rpc creation, registration, invocation, unregistration
+ * Covers routed rpc creation, registration, invocation, unregistration.
  */
 public class RoutedServiceIT extends AbstractIT {
+    private static final Logger LOG = LoggerFactory.getLogger(RoutedServiceIT.class);
 
-    private static final Logger LOG = LoggerFactory
-            .getLogger(RoutedServiceIT.class);
+    protected RoutedSimpleRoute routedSimpleRouteRpc1;
+    protected RoutedSimpleRoute routedSimpleRouteRpc2;
 
-    protected OpendaylightTestRoutedRpcService odlRoutedService1;
-    protected OpendaylightTestRoutedRpcService odlRoutedService2;
+    @Inject
+    @Filter(timeout = 120 * 1000)
+    RpcProviderService rpcProviderService;
 
     @Inject
     @Filter(timeout = 120 * 1000)
-    RpcProviderRegistry rpcProviderRegistry;
+    RpcService rpcService;
 
     /**
-     * prepare mocks
+     * Prepare mocks.
      */
     @Before
     public void setUp() {
-        odlRoutedService1 = mock(OpendaylightTestRoutedRpcService.class, "First Flow Service");
-        odlRoutedService2 = mock(OpendaylightTestRoutedRpcService.class, "Second Flow Service");
-        Mockito.when(odlRoutedService1.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
-            .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
-        Mockito.when(odlRoutedService2.routedSimpleRoute(Mockito.<RoutedSimpleRouteInput>any()))
-            .thenReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null));
+        routedSimpleRouteRpc1 = mock(RoutedSimpleRoute.class, "First Flow Rpc");
+        doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc1).implementedInterface();
+        doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc1)
+            .invoke(any());
+
+        routedSimpleRouteRpc2 = mock(RoutedSimpleRoute.class, "Second Flow Rpc");
+        doReturn(RoutedSimpleRoute.class).when(routedSimpleRouteRpc2).implementedInterface();
+        doReturn(Futures.<RpcResult<RoutedSimpleRouteOutput>>immediateFuture(null)).when(routedSimpleRouteRpc2)
+            .invoke(any());
     }
 
     @Test
     public void testServiceRegistration() {
-        LOG.info("Register provider 1 with first implementation of routeSimpleService - service1");
+        LOG.info("Register provider 1 with first implementation of routeSimpleService - rpc1 of node 1");
+        final InstanceIdentifier<UnorderedList> nodeOnePath = createNodeRef("foo:node:1");
+        final InstanceIdentifier<UnorderedList> nodeTwo = createNodeRef("foo:node:2");
 
-        RoutedRpcRegistration<OpendaylightTestRoutedRpcService> firstReg = rpcProviderRegistry
-                .addRoutedRpcImplementation(OpendaylightTestRoutedRpcService.class, odlRoutedService1);
+        Registration firstReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc1,
+            Set.of(nodeOnePath));
         assertNotNull("Registration should not be null", firstReg);
-        assertSame(odlRoutedService1, firstReg.getInstance());
 
-        LOG.info("Register provider 2 with second implementation of routeSimpleService - service2");
+        LOG.info("Register provider 2 with second implementation of routeSimpleService - rpc2 of node 2");
 
-        RoutedRpcRegistration<OpendaylightTestRoutedRpcService> secondReg = rpcProviderRegistry
-                .addRoutedRpcImplementation(OpendaylightTestRoutedRpcService.class, odlRoutedService2);
+        Registration secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeTwo));
         assertNotNull("Registration should not be null", firstReg);
-        assertSame(odlRoutedService2, secondReg.getInstance());
         assertNotSame(secondReg, firstReg);
 
-        OpendaylightTestRoutedRpcService consumerService =
-                rpcProviderRegistry.getRpcService(OpendaylightTestRoutedRpcService.class);
+        RoutedSimpleRoute consumerService = rpcService.getRpc(RoutedSimpleRoute.class);
         assertNotNull("MD-SAL instance of test Service should be returned", consumerService);
-        assertNotSame("Provider instance and consumer instance should not be same.", odlRoutedService1,
+        assertNotSame("Provider instance and consumer instance should not be same.", routedSimpleRouteRpc1,
                 consumerService);
 
-        final InstanceIdentifier<UnorderedList> nodeOnePath = createNodeRef("foo:node:1");
-
-        LOG.info("Provider 1 registers path of node 1");
-        firstReg.registerPath(TestContext.class, nodeOnePath);
-
         /**
-         * Consumer creates addFlow message for node one and sends it to the
-         * MD-SAL
+         * Consumer creates addFlow message for node one and sends it to the MD-SAL.
          */
         final RoutedSimpleRouteInput simpleRouteFirstFoo = createSimpleRouteInput(nodeOnePath);
-        consumerService.routedSimpleRoute(simpleRouteFirstFoo);
+        consumerService.invoke(simpleRouteFirstFoo);
 
         /**
-         * Verifies that implementation of the first provider received the same
-         * message from MD-SAL.
+         * Verifies that implementation of the first instance received the same message from MD-SAL.
          */
-        verify(odlRoutedService1).routedSimpleRoute(simpleRouteFirstFoo);
+        verify(routedSimpleRouteRpc1).invoke(simpleRouteFirstFoo);
         /**
          * Verifies that second instance was not invoked with first message
          */
-        verify(odlRoutedService2, times(0)).routedSimpleRoute(simpleRouteFirstFoo);
-
-        LOG.info("Provider 2 registers path of node 2");
-        final InstanceIdentifier<UnorderedList> nodeTwo = createNodeRef("foo:node:2");
-        secondReg.registerPath(TestContext.class, nodeTwo);
+        verify(routedSimpleRouteRpc2, times(0)).invoke(simpleRouteFirstFoo);
 
         /**
-         * Consumer sends message to nodeTwo for three times. Should be
-         * processed by second instance.
+         * Consumer sends message to nodeTwo for three times. Should be processed by second instance.
          */
         final RoutedSimpleRouteInput simpleRouteSecondFoo = createSimpleRouteInput(nodeTwo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
-        consumerService.routedSimpleRoute(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
+        consumerService.invoke(simpleRouteSecondFoo);
 
         /**
-         * Verifies that second instance was invoked 3 times with second message
-         * and first instance wasn't invoked.
-         *
+         * Verifies that second instance was invoked 3 times with second message and first instance wasn't invoked.
          */
-        verify(odlRoutedService2, times(3)).routedSimpleRoute(simpleRouteSecondFoo);
-        verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteSecondFoo);
+        verify(routedSimpleRouteRpc2, times(3)).invoke(simpleRouteSecondFoo);
+        verify(routedSimpleRouteRpc1, times(0)).invoke(simpleRouteSecondFoo);
 
         LOG.info("Unregistration of the path for the node one in the first provider");
-        firstReg.unregisterPath(TestContext.class, nodeOnePath);
+        firstReg.close();
 
         LOG.info("Provider 2 registers path of node 1");
-        secondReg.registerPath(TestContext.class, nodeOnePath);
+        secondReg.close();
+        secondReg = rpcProviderService.registerRpcImplementation(routedSimpleRouteRpc2, Set.of(nodeOnePath));
 
         /**
-         * A consumer sends third message to node 1
+         * A consumer sends third message to node 1.
          */
         final RoutedSimpleRouteInput simpleRouteThirdFoo = createSimpleRouteInput(nodeOnePath);
-        consumerService.routedSimpleRoute(simpleRouteThirdFoo);
+        consumerService.invoke(simpleRouteThirdFoo);
 
         /**
-         * Verifies that provider 1 wasn't invoked and provider 2 was invoked 1
-         * time.
+         * Verifies that provider 1 wasn't invoked and provider 2 was invoked 1 time.
          * TODO: fix unregister path
          */
-        //verify(odlRoutedService1, times(0)).routedSimpleRoute(simpleRouteThirdFoo);
-        verify(odlRoutedService2).routedSimpleRoute(simpleRouteThirdFoo);
-
+        verify(routedSimpleRouteRpc2).invoke(simpleRouteThirdFoo);
     }
 
     /**
-     * Returns node reference from string which represents path
+     * Returns node reference from string which represents path.
      *
-     * @param string
-     *            string with key(path)
+     * @param string string with key(path)
      * @return instance identifier to {@link UnorderedList}
      */
     private static InstanceIdentifier<UnorderedList> createNodeRef(final String string) {
-        final UnorderedListKey key = new UnorderedListKey(string);
-        final InstanceIdentifier<UnorderedList> path = InstanceIdentifier.builder(Lists.class)
+        return InstanceIdentifier.builder(Lists.class)
                 .child(UnorderedContainer.class)
-                .child(UnorderedList.class, key)
+                .child(UnorderedList.class, new UnorderedListKey(string))
                 .build();
-
-        return path;
     }
 
     /**
-     * Creates flow AddFlowInput for which only node and cookie are set
+     * Creates flow AddFlowInput for which only node and cookie are set.
      *
-     * @param node
-     *            NodeRef value
+     * @param node NodeRef value
      * @return simpleRouteInput instance
      */
     static RoutedSimpleRouteInput createSimpleRouteInput(final InstanceIdentifier<UnorderedList> node) {
-        final RoutedSimpleRouteInputBuilder ret = new RoutedSimpleRouteInputBuilder();
-        ret.setRoute(node);
-        return ret.build();
+        return new RoutedSimpleRouteInputBuilder().setRoute(node).build();
     }
 }
diff --git a/opendaylight/md-sal/sal-binding-util/pom.xml b/opendaylight/md-sal/sal-binding-util/pom.xml
deleted file mode 100644 (file)
index 439e1c1..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-binding-util</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalConsumerInstance.java b/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalConsumerInstance.java
deleted file mode 100644 (file)
index 61d1ec0..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.util;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.binding.api.NotificationListener;
-import org.opendaylight.controller.sal.binding.api.NotificationService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public abstract class AbstractBindingSalConsumerInstance<N extends NotificationService, R extends RpcConsumerRegistry>
-        implements RpcConsumerRegistry, NotificationService {
-
-    private final R rpcRegistry;
-    private final N notificationBroker;
-
-    protected final R getRpcRegistry() {
-        return rpcRegistry;
-    }
-
-    protected final N getNotificationBroker() {
-        return notificationBroker;
-    }
-
-    protected final R getRpcRegistryChecked() {
-        Preconditions.checkState(rpcRegistry != null,"Rpc Registry is not available.");
-        return rpcRegistry;
-    }
-
-    protected final N getNotificationBrokerChecked() {
-        Preconditions.checkState(notificationBroker != null,"Notification Broker is not available.");
-        return notificationBroker;
-    }
-
-    protected AbstractBindingSalConsumerInstance(R rpcRegistry, N notificationBroker) {
-        this.rpcRegistry = rpcRegistry;
-        this.notificationBroker = notificationBroker;
-    }
-
-    @Override
-    public <T extends RpcService> T getRpcService(Class<T> module) {
-        return getRpcRegistryChecked().getRpcService(module);
-    }
-
-    @Override
-    public <T extends Notification> ListenerRegistration<NotificationListener<T>> registerNotificationListener(
-            Class<T> notificationType, NotificationListener<T> listener) {
-        return getNotificationBrokerChecked().registerNotificationListener(notificationType, listener);
-    }
-
-    @Override
-    public ListenerRegistration<org.opendaylight.yangtools.yang.binding.NotificationListener>
-            registerNotificationListener(org.opendaylight.yangtools.yang.binding.NotificationListener listener) {
-        return getNotificationBrokerChecked().registerNotificationListener(listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalProviderInstance.java b/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/AbstractBindingSalProviderInstance.java
deleted file mode 100644 (file)
index 5cc0340..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.util;
-
-import java.util.concurrent.ExecutorService;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.Notification;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public abstract class AbstractBindingSalProviderInstance<N extends NotificationProviderService,
-                                                         R extends RpcProviderRegistry>
-        extends AbstractBindingSalConsumerInstance<N, R>
-        implements RpcProviderRegistry, NotificationProviderService {
-
-    public AbstractBindingSalProviderInstance(final R rpcRegistry, final N notificationBroker) {
-        super(rpcRegistry, notificationBroker);
-    }
-
-    @Override
-    public <T extends RpcService> RpcRegistration<T> addRpcImplementation(final Class<T> type, final T implementation)
-            throws IllegalStateException {
-        return getRpcRegistryChecked().addRpcImplementation(type, implementation);
-    }
-
-    @Override
-    public <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(final Class<T> type,
-            final T implementation) throws IllegalStateException {
-        return getRpcRegistryChecked().addRoutedRpcImplementation(type, implementation);
-    }
-
-    @Override
-    public void publish(final Notification notification) {
-        getNotificationBrokerChecked().publish(notification);
-    }
-
-    @Override
-    public void publish(final Notification notification, final ExecutorService service) {
-        getNotificationBrokerChecked().publish(notification, service);
-    }
-
-    @Override
-    public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L>
-            registerRouteChangeListener(final L listener) {
-        return getRpcRegistryChecked().registerRouteChangeListener(listener);
-    }
-
-    @Override
-    public ListenerRegistration<NotificationInterestListener> registerInterestListener(
-            final NotificationInterestListener interestListener) {
-        return getNotificationBrokerChecked().registerInterestListener(interestListener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/BindingContextUtils.java b/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/BindingContextUtils.java
deleted file mode 100644 (file)
index 3d7e277..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.util;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.MutableClassToInstanceMap;
-import org.opendaylight.controller.md.sal.common.api.routing.RouteChangeListener;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ConsumerContext;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.ProviderContext;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RoutedRpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker.RpcRegistration;
-import org.opendaylight.controller.sal.binding.api.BindingAwareConsumer;
-import org.opendaylight.controller.sal.binding.api.BindingAwareProvider;
-import org.opendaylight.controller.sal.binding.api.BindingAwareService;
-import org.opendaylight.controller.sal.binding.api.RpcConsumerRegistry;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.controller.sal.binding.api.rpc.RpcContextIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.binding.RpcService;
-
-public final class BindingContextUtils {
-    private BindingContextUtils() {
-    }
-
-    public static ConsumerContext createConsumerContext(BindingAwareConsumer consumer,
-            ClassToInstanceMap<BindingAwareService> serviceProvider) {
-        checkNotNull(consumer,"Consumer should not be null");
-        checkNotNull(serviceProvider,"Service map should not be null");
-        return new SingleConsumerContextImpl(serviceProvider);
-    }
-
-    public static ProviderContext createProviderContext(BindingAwareProvider provider,
-            ClassToInstanceMap<BindingAwareService> serviceProvider) {
-        checkNotNull(provider,"Provider should not be null");
-        checkNotNull(serviceProvider,"Service map should not be null");
-        return new SingleProviderContextImpl(serviceProvider);
-    }
-
-    public static ConsumerContext createConsumerContextAndInitialize(BindingAwareConsumer consumer,
-            ClassToInstanceMap<BindingAwareService> serviceProvider) {
-        ConsumerContext context = createConsumerContext(consumer, serviceProvider);
-        consumer.onSessionInitialized(context);
-        return context;
-    }
-
-    public static ProviderContext createProviderContextAndInitialize(BindingAwareProvider provider,
-            ClassToInstanceMap<BindingAwareService> serviceProvider) {
-        ProviderContext context = createProviderContext(provider, serviceProvider);
-        provider.onSessionInitiated(context);
-        return context;
-    }
-
-    public static <T extends BindingAwareService> T createContextProxyOrReturnService(Class<T> service, T instance) {
-        // FIXME: Create Proxy
-        return instance;
-    }
-
-    private static class SingleConsumerContextImpl implements ConsumerContext, AutoCloseable {
-
-        private ClassToInstanceMap<BindingAwareService> alreadyRetrievedServices;
-        private ClassToInstanceMap<BindingAwareService> serviceProvider;
-
-        SingleConsumerContextImpl(ClassToInstanceMap<BindingAwareService> serviceProvider) {
-            this.alreadyRetrievedServices = MutableClassToInstanceMap.create();
-            this.serviceProvider = serviceProvider;
-        }
-
-        @Override
-        public final <T extends RpcService> T getRpcService(Class<T> module) {
-            return getSALService(RpcConsumerRegistry.class).getRpcService(module);
-        }
-
-        @Override
-        public final <T extends BindingAwareService> T getSALService(Class<T> service) {
-            checkNotNull(service,"Service class should not be null.");
-            T potential = alreadyRetrievedServices.getInstance(service);
-            if (potential != null) {
-                return potential;
-            }
-            return tryToRetrieveSalService(service);
-        }
-
-        private synchronized <T extends BindingAwareService> T tryToRetrieveSalService(Class<T> service) {
-            final T potential = alreadyRetrievedServices.getInstance(service);
-            if (potential != null) {
-                return potential;
-            }
-            final T requested = serviceProvider.getInstance(service);
-            if (requested == null) {
-                throw new IllegalArgumentException("Requested service " + service.getName() + " is not available.");
-            }
-            final T retrieved = BindingContextUtils.createContextProxyOrReturnService(service,requested);
-            alreadyRetrievedServices.put(service, retrieved);
-            return retrieved;
-        }
-
-        @Override
-        public final void close() {
-            alreadyRetrievedServices = null;
-            serviceProvider = null;
-        }
-    }
-
-    private static class SingleProviderContextImpl extends SingleConsumerContextImpl implements ProviderContext {
-
-        SingleProviderContextImpl(ClassToInstanceMap<BindingAwareService> serviceProvider) {
-            super(serviceProvider);
-        }
-
-        @Override
-        public <L extends RouteChangeListener<RpcContextIdentifier, InstanceIdentifier<?>>> ListenerRegistration<L>
-                registerRouteChangeListener(L listener) {
-            return getSALService(RpcProviderRegistry.class).registerRouteChangeListener(listener);
-        }
-
-        @Override
-        public <T extends RpcService> RoutedRpcRegistration<T> addRoutedRpcImplementation(Class<T> type,
-                T implementation) throws IllegalStateException {
-            return getSALService(RpcProviderRegistry.class).addRoutedRpcImplementation(type, implementation);
-        }
-
-        @Override
-        public <T extends RpcService> RpcRegistration<T> addRpcImplementation(Class<T> type, T implementation)
-                throws IllegalStateException {
-            return getSALService(RpcProviderRegistry.class).addRpcImplementation(type, implementation);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/TypeSafeDataReader.java b/opendaylight/md-sal/sal-binding-util/src/main/java/org/opendaylight/controller/md/sal/binding/util/TypeSafeDataReader.java
deleted file mode 100644 (file)
index e71f646..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.binding.util;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataReader;
-import org.opendaylight.yangtools.yang.binding.DataObject;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-
-/**
- * Deprecated.
- *
- * @deprecated Use {@link org.opendaylight.controller.md.sal.binding.api.ReadTransaction#read(
- *     org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType, InstanceIdentifier)} instead.
- */
-@Deprecated
-public final class TypeSafeDataReader {
-
-    private final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate;
-
-    public DataReader<InstanceIdentifier<?>, DataObject> getDelegate() {
-        return delegate;
-    }
-
-    public TypeSafeDataReader(
-            final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
-        this.delegate = delegate;
-    }
-
-    @SuppressWarnings("unchecked")
-    public <D extends DataObject> D readConfigurationData(
-            final InstanceIdentifier<D> path) {
-        return (D) delegate.readConfigurationData(path);
-    }
-
-    @SuppressWarnings("unchecked")
-    public <D extends DataObject> D readOperationalData(
-            final InstanceIdentifier<D> path) {
-        return (D) delegate.readOperationalData(path);
-    }
-
-    public static TypeSafeDataReader forReader(
-            final DataReader<InstanceIdentifier<? extends DataObject>, DataObject> delegate) {
-        return new TypeSafeDataReader(delegate);
-    }
-}
index 7342894118684651e58d9a804169a32fead6a7be..ad8d996edce0f3ad1e0c2c9b16a7fdfcbc5ae19a 100644 (file)
@@ -4,13 +4,25 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-cluster-admin-api</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
+  <dependencies>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>cds-access-api</artifactId>
+    </dependency>
+  </dependencies>
 </project>
index a6ee3dd8043a611955988ca598457df6c86c6568..25c88475f6cfdff0f72bc45e83339137ddcd3d7c 100644 (file)
@@ -10,6 +10,8 @@ module cluster-admin {
         description "Initial revision.";
     }
 
+    import odl-controller-cds-types { prefix cds; }
+
     typedef data-store-type {
         type enumeration {
             enum config {
@@ -21,14 +23,29 @@ module cluster-admin {
         }
     }
 
-    grouping shard-operation-result {
+    grouping datastore-shard-id {
+        description "Grouping holding combined identifiers of a shard -- its name and datastore type";
+
         leaf shard-name {
-            type string;
+            description "The name of the shard.";
+            mandatory true;
+            type string {
+                length "1..max" {
+                    error-app-tag "odl-named-shards";
+                    error-message "Shard name must not be empty";
+                }
+            }
         }
 
         leaf data-store-type {
+            mandatory true;
             type data-store-type;
+            description "The type of the data store to which the shard belongs";
         }
+    }
+
+    grouping shard-operation-result {
+        uses datastore-shard-id;
 
         leaf succeeded {
             type boolean;
@@ -64,115 +81,42 @@ module cluster-admin {
 
     rpc add-shard-replica {
         input {
-            leaf shard-name {
-                mandatory true;
-                type string;
-                description "The name of the shard for which to create a replica.";
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
-            }
+            uses datastore-shard-id;
         }
 
         description "Adds a replica of a shard to this node and joins it to an existing cluster. The shard must
-            already have a module configuration defined for it and there must already be a shard existing on
-            another node with a leader. This RPC first contacts peer member seed nodes searching for a shard.
-            When found, an AddServer message is sent to the shard leader and applied as described in the Raft
-            paper.";
+                     already have a module configuration defined for it and there must already be a shard existing on
+                     another node with a leader. This RPC first contacts peer member seed nodes searching for a shard.
+                     When found, an AddServer message is sent to the shard leader and applied as described in the Raft
+                     paper.";
     }
 
     rpc remove-shard-replica {
         input {
-            leaf shard-name {
-                mandatory true;
-                type string;
-                description "The name of the shard for which to remove the replica.";
-            }
+            uses datastore-shard-id;
 
             leaf member-name {
                 mandatory true;
                 type string;
                 description "The cluster member from which the shard replica should be removed";
             }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
-            }
         }
 
         description "Removes an existing replica of a shard from this node via the RemoveServer mechanism as
-            described in the Raft paper.";
+                     described in the Raft paper.";
     }
 
     rpc make-leader-local {
         input {
-            leaf shard-name {
-                mandatory true;
-                type string;
-                description "The name of the shard for which to move the leader to the local node";
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the shard belongs";
-            }
+            uses datastore-shard-id;
         }
 
         description "Attempts to move the shard leader of the given module based shard to the local node.
-                The rpc returns a response after handling of the underlying MakeLeaderLocal message completes.
-                This operation fails if there is no current shard leader due to lack of network connectivity or
-                a cluster majority. In addition, if the local node is not up to date with the current leader,
-                an attempt is made to first sync the local node with the leader. If this cannot be achieved
-                within two election timeout periods the operation fails.";
-    }
-
-    rpc add-prefix-shard-replica {
-        input {
-            leaf shard-prefix {
-                mandatory true;
-                type instance-identifier;
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
-            }
-        }
-
-        description "Adds a replica of a shard to this node and joins it to an existing cluster. There must already be
-                    a shard existing on another node with a leader. This RPC first contacts peer member seed nodes
-                    searching for a shard. When found, an AddServer message is sent to the shard leader and applied as
-                    described in the Raft paper.";
-    }
-
-    rpc remove-prefix-shard-replica {
-        input {
-            leaf shard-prefix {
-                mandatory true;
-                type instance-identifier;
-            }
-            leaf member-name {
-                mandatory true;
-                type string;
-                description "The cluster member from which the shard replica should be removed";
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
-            }
-        }
-
-        description "Removes an existing replica of a prefix shard from this node via the RemoveServer mechanism as
-                    described in the Raft paper.";
+                     The rpc returns a response after handling of the underlying MakeLeaderLocal message completes.
+                     This operation fails if there is no current shard leader due to lack of network connectivity or
+                     a cluster majority. In addition, if the local node is not up to date with the current leader,
+                     an attempt is made to first sync the local node with the leader. If this cannot be achieved
+                     within two election timeout periods the operation fails.";
     }
 
     rpc add-replicas-for-all-shards {
@@ -181,7 +125,7 @@ module cluster-admin {
         }
 
         description "Adds replicas on this node for all currently defined shards. This is equivalent to issuing
-            an add-shard-replica RPC for all shards.";
+                     an add-shard-replica RPC for all shards.";
     }
 
     rpc remove-all-shard-replicas {
@@ -198,31 +142,20 @@ module cluster-admin {
         }
 
         description "Removes replicas for all shards on this node. This is equivalent to issuing
-            a remove-shard-replica for all shards and essentially removes this node from a cluster.";
+                     a remove-shard-replica for all shards and essentially removes this node from a cluster.";
     }
 
     rpc change-member-voting-states-for-shard {
         input {
-            leaf shard-name {
-                mandatory true;
-                type string;
-                description "The name of the shard for which to change voting state.";
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the shard belongs";
-            }
-
+            uses datastore-shard-id;
             uses member-voting-states-input;
         }
 
         description "Changes the voting states, either voting or non-voting, of cluster members for a shard.
-            Non-voting members will no longer participate in leader elections and consensus but will be
-            replicated. This is useful for having a set of members serve as a backup cluster in case the
-            primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
-            and will be forwarded to the leader.";
+                     Non-voting members will no longer participate in leader elections and consensus but will be
+                     replicated. This is useful for having a set of members serve as a backup cluster in case the
+                     primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
+                     and will be forwarded to the leader.";
     }
 
     rpc change-member-voting-states-for-all-shards {
@@ -235,10 +168,10 @@ module cluster-admin {
         }
 
         description "Changes the voting states, either voting or non-voting, of cluster members for all shards.
-            Non-voting members will no longer participate in leader elections and consensus but will be
-            replicated. This is useful for having a set of members serve as a backup cluster in case the
-            primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
-            and will be forwarded to the leader.";
+                     Non-voting members will no longer participate in leader elections and consensus but will be
+                     replicated. This is useful for having a set of members serve as a backup cluster in case the
+                     primary voting cluster suffers catastrophic failure. This RPC can be issued to any cluster member
+                     and will be forwarded to the leader.";
     }
 
     rpc flip-member-voting-states-for-all-shards {
@@ -247,7 +180,7 @@ module cluster-admin {
         }
 
         description "Flips the voting states of all cluster members for all shards, such that if a member
-            was voting it becomes non-voting and vice versa.";
+                     was voting it becomes non-voting and vice versa.";
     }
 
     rpc backup-datastore {
@@ -256,6 +189,15 @@ module cluster-admin {
               type string;
               description "The path and name of the file in which to store the backup.";
             }
+
+            leaf timeout {
+              type uint32 {
+                range 1..max;
+              }
+              units "seconds";
+              description "Optional timeout in seconds for the backup operation which will override all the different
+                           timeouts that are being hit on the backend.";
+            }
         }
 
         description "Creates a backup file of the datastore state";
@@ -263,50 +205,74 @@ module cluster-admin {
 
     rpc get-shard-role {
         input {
-            leaf shard-name {
-                mandatory true;
-                type string;
-                description "The name of the shard for which to create a replica.";
-            }
-
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
-            }
+            uses datastore-shard-id;
         }
 
         output {
             leaf role {
                 type string;
-                description "Current role for the given shard, if not present the shard currently doesn't have a role";
+                description "Current role for the given shard, if not present the shard currently does not have a role";
             }
         }
 
         description "Returns the current role for the requested module shard.";
     }
 
-    rpc get-prefix-shard-role {
+    rpc locate-shard {
+        description "Return the transport-level information about where a shard has a home.";
         input {
-            leaf shard-prefix {
-                mandatory true;
-                type instance-identifier;
-            }
+            uses datastore-shard-id;
+        }
 
-            leaf data-store-type {
-                mandatory true;
-                type data-store-type;
-                description "The type of the data store to which the replica belongs";
+        output {
+            choice member-node {
+                description "Location of the hypothetical cluster member node. Relationship to the input parameters
+                             and the transport protocol.";
+
+                leaf local {
+                    description "Local node is the best node to talk to when it comes from efficiency perspective
+                                 of underlying implementation. The requester of this RPC is advised to contact
+                                 any services to the specified shard via the channel on which this RPC was invoked.";
+                    type empty;
+                }
+
+                leaf leader-actor-ref {
+                    description "Actor reference to the actor which is currently acting as the leader.";
+                    type string;
+                }
             }
         }
+    }
+
+    rpc get-known-clients-for-all-shards {
+        description "Request all shards to report their known frontend clients. This is useful for determining what
+                     generation should a resurrected member node should use.";
 
         output {
-            leaf role {
-                type string;
-                description "Current role for the given shard, if not present the shard currently doesn't have a role";
+            uses shard-result-output {
+                augment shard-result {
+                    list known-clients {
+                        when "../succeeded = true";
+
+                        uses cds:client-identifier;
+                        key "member type";
+                    }
+                }
             }
         }
+    }
 
-        description "Returns the current role for the requested module shard.";
+    rpc activate-eos-datacenter {
+        description "Activates the datacenter that the node this rpc is called on belongs to. The caller must maintain
+                     only a single active datacenter at a time as the singleton components will interfere with each
+                     other otherwise. This only needs to be used if configuring multiple datacenters or if not using
+                     default datacenter.";
+    }
+
+    rpc deactivate-eos-datacenter {
+        description "Deactivates the datacenter that the node this rpc is called on belongs to. The caller must maintain
+                     only a single active datacenter at a time as the singleton components will interfere with each
+                     other otherwise. This only needs to be used if configuring multiple datacenters or if not using
+                     default datacenter.";
     }
-}
\ No newline at end of file
+}
index d85857bd988132803d4eb79aea81b6f3eb016fe7..cb905343c8ebefd5852da7c7e3cbd4a0c388b8b8 100644 (file)
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-cluster-admin-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-cluster-admin-api</artifactId>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
-    <!-- Tests -->
     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-dom-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>yang-binding</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-akka-raft</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
+      <artifactId>eos-dom-akka</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-distributed-datastore</artifactId>
-      <type>test-jar</type>
-      <version>1.10.0-SNAPSHOT</version>
-      <scope>test</scope>
+      <artifactId>cds-access-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>repackaged-akka</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller.samples</groupId>
-      <artifactId>clustering-it-model</artifactId>
-      <version>1.10.0-SNAPSHOT</version>
-      <scope>test</scope>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-akka-raft</artifactId>
     </dependency>
     <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-cluster-admin-api</artifactId>
     </dependency>
-
-    <!-- Akka -->
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-actor_2.12</artifactId>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-distributed-datastore</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.12</artifactId>
-      <scope>test</scope>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
     </dependency>
-
-    <!-- Scala -->
     <dependency>
       <groupId>org.scala-lang</groupId>
       <artifactId>scala-library</artifactId>
     </dependency>
 
-    <!-- OpenDaylight -->
+    <!-- Tests -->
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_2.13</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-distributed-datastore</artifactId>
+      <type>test-jar</type>
+      <version>${project.version}</version>
+      <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-api</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
+      <artifactId>yang-test-util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-model</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-simple</artifactId>
+      <scope>test</scope>
     </dependency>
-
   </dependencies>
 
   <build>
index 4012f2c70914c618c705a92a340ef99dc448fef6..8ad1553dba2268daa9c7a86fcad319997d8fa22e 100644 (file)
@@ -8,13 +8,16 @@
 package org.opendaylight.controller.cluster.datastore.admin;
 
 import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
 import akka.actor.Status.Success;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.base.Function;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -30,106 +33,157 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.stream.Collectors;
 import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
 import org.opendaylight.controller.cluster.datastore.messages.FlipShardMembersVotingStatus;
+import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
+import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardRole;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardRoleReply;
 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
-import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
+import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshotList;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutputBuilder;
+import org.opendaylight.controller.eos.akka.DataCenterControl;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ClusterAdminService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.get.known.clients._for.all.shards.output.ShardResult1Builder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.get.known.clients._for.all.shards.output.shard.result.KnownClients;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.get.known.clients._for.all.shards.output.shard.result.KnownClientsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.locate.shard.output.member.node.LeaderActorRefBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.locate.shard.output.member.node.LocalBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.Uint32;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
 
 /**
  * Implements the yang RPCs defined in the generated ClusterAdminService interface.
  *
  * @author Thomas Pantelis
  */
-public class ClusterAdminRpcService implements ClusterAdminService {
+public final class ClusterAdminRpcService {
     private static final Timeout SHARD_MGR_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
 
     private static final Logger LOG = LoggerFactory.getLogger(ClusterAdminRpcService.class);
+    private static final @NonNull RpcResult<LocateShardOutput> LOCAL_SHARD_RESULT =
+            RpcResultBuilder.success(new LocateShardOutputBuilder()
+                .setMemberNode(new LocalBuilder().setLocal(Empty.value()).build())
+                .build())
+            .build();
 
     private final DistributedDataStoreInterface configDataStore;
     private final DistributedDataStoreInterface operDataStore;
-    private final BindingNormalizedNodeSerializer serializer;
     private final Timeout makeLeaderLocalTimeout;
+    private final DataCenterControl dataCenterControl;
 
     public ClusterAdminRpcService(final DistributedDataStoreInterface configDataStore,
-            final DistributedDataStoreInterface operDataStore,
-            final BindingNormalizedNodeSerializer serializer) {
+                                  final DistributedDataStoreInterface operDataStore,
+                                  final DataCenterControl dataCenterControl) {
         this.configDataStore = configDataStore;
         this.operDataStore = operDataStore;
-        this.serializer = serializer;
 
-        this.makeLeaderLocalTimeout =
+        makeLeaderLocalTimeout =
                 new Timeout(configDataStore.getActorUtils().getDatastoreContext()
                         .getShardLeaderElectionTimeout().duration().$times(2));
+
+        this.dataCenterControl = dataCenterControl;
+    }
+
+    Registration registerWith(final RpcProviderService rpcProviderService) {
+        return rpcProviderService.registerRpcImplementations(
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .AddShardReplica) this::addShardReplica,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .RemoveShardReplica) this::removeShardReplica,
+            (LocateShard) this::locateShard,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .MakeLeaderLocal) this::makeLeaderLocal,
+            (AddReplicasForAllShards) this::addReplicasForAllShards,
+            (RemoveAllShardReplicas) this::removeAllShardReplicas,
+            (ChangeMemberVotingStatesForShard) this::changeMemberVotingStatesForShard,
+            (ChangeMemberVotingStatesForAllShards) this::changeMemberVotingStatesForAllShards,
+            (FlipMemberVotingStatesForAllShards) this::flipMemberVotingStatesForAllShards,
+            (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013
+                .GetShardRole) this::getShardRole,
+            (BackupDatastore) this::backupDatastore,
+            (GetKnownClientsForAllShards) this::getKnownClientsForAllShards,
+            (ActivateEosDatacenter) this::activateEosDatacenter,
+            (DeactivateEosDatacenter) this::deactivateEosDatacenter);
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+    @VisibleForTesting
+    ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -142,28 +196,27 @@ public class ClusterAdminRpcService implements ClusterAdminService {
 
         LOG.info("Adding replica for shard {}", shardName);
 
-        final SettableFuture<RpcResult<AddShardReplicaOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName));
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully added replica for shard {}", shardName);
-                returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
-            }
+        final var returnFuture = SettableFuture.<RpcResult<AddShardReplicaOutput>>create();
+        Futures.addCallback(sendMessageToShardManager(dataStoreType, new AddShardReplica(shardName)),
+            new FutureCallback<Success>() {
+                @Override
+                public void onSuccess(final Success success) {
+                    LOG.info("Successfully added replica for shard {}", shardName);
+                    returnFuture.set(newSuccessfulResult(new AddShardReplicaOutputBuilder().build()));
+                }
 
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
+                @Override
+                public void onFailure(final Throwable failure) {
+                    onMessageFailure(String.format("Failed to add replica for shard %s", shardName),
                         returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
+                }
+            }, MoreExecutors.directExecutor());
 
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
-            final RemoveShardReplicaInput input) {
+    @VisibleForTesting
+    ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(final RemoveShardReplicaInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -201,8 +254,49 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
+    private ListenableFuture<RpcResult<LocateShardOutput>> locateShard(final LocateShardInput input) {
+        final ActorUtils utils;
+        switch (input.getDataStoreType()) {
+            case Config:
+                utils = configDataStore.getActorUtils();
+                break;
+            case Operational:
+                utils = operDataStore.getActorUtils();
+                break;
+            default:
+                return newFailedRpcResultFuture("Unhandled datastore in " + input);
+        }
+
+        final SettableFuture<RpcResult<LocateShardOutput>> ret = SettableFuture.create();
+        utils.findPrimaryShardAsync(input.getShardName()).onComplete(new OnComplete<PrimaryShardInfo>() {
+            @Override
+            public void onComplete(final Throwable failure, final PrimaryShardInfo success) throws Throwable {
+                if (failure != null) {
+                    LOG.debug("Failed to find shard for {}", input, failure);
+                    ret.setException(failure);
+                    return;
+                }
+
+                // Data tree implies local leak
+                if (success.getLocalShardDataTree().isPresent()) {
+                    ret.set(LOCAL_SHARD_RESULT);
+                    return;
+                }
+
+                final ActorSelection actorPath = success.getPrimaryShardActor();
+                ret.set(newSuccessfulResult(new LocateShardOutputBuilder()
+                    .setMemberNode(new LeaderActorRefBuilder()
+                        .setLeaderActorRef(actorPath.toSerializationFormat())
+                        .build())
+                    .build()));
+            }
+        }, utils.getClientDispatcher());
+
+        return ret;
+    }
+
+    @VisibleForTesting
+    ListenableFuture<RpcResult<MakeLeaderLocalOutput>> makeLeaderLocal(final MakeLeaderLocalInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -219,8 +313,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         LOG.info("Moving leader to local node {} for shard {}, datastoreType {}",
                 actorUtils.getCurrentMemberName().getName(), shardName, dataStoreType);
 
-        final scala.concurrent.Future<ActorRef> localShardReply =
-                actorUtils.findLocalShardAsync(shardName);
+        final Future<ActorRef> localShardReply = actorUtils.findLocalShardAsync(shardName);
 
         final scala.concurrent.Promise<Object> makeLeaderLocalAsk = akka.dispatch.Futures.promise();
         localShardReply.onComplete(new OnComplete<ActorRef>() {
@@ -239,7 +332,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         }, actorUtils.getClientDispatcher());
 
         final SettableFuture<RpcResult<MakeLeaderLocalOutput>> future = SettableFuture.create();
-        makeLeaderLocalAsk.future().onComplete(new OnComplete<Object>() {
+        makeLeaderLocalAsk.future().onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object success) {
                 if (failure != null) {
@@ -257,104 +350,21 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return future;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddPrefixShardReplicaOutput>> addPrefixShardReplica(
-            final AddPrefixShardReplicaInput input) {
-
-        final InstanceIdentifier<?> identifier = input.getShardPrefix();
-        if (identifier == null) {
-            return newFailedRpcResultFuture("A valid shard identifier must be specified");
-        }
-
-        final DataStoreType dataStoreType = input.getDataStoreType();
-        if (dataStoreType == null) {
-            return newFailedRpcResultFuture("A valid DataStoreType must be specified");
-        }
-
-        LOG.info("Adding replica for shard {}, datastore type {}", identifier, dataStoreType);
-
-        final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
-        final SettableFuture<RpcResult<AddPrefixShardReplicaOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, new AddPrefixShardReplica(prefix));
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully added replica for shard {}", prefix);
-                returnFuture.set(newSuccessfulResult(new AddPrefixShardReplicaOutputBuilder().build()));
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to add replica for shard %s", prefix),
-                        returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return returnFuture;
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<RemovePrefixShardReplicaOutput>> removePrefixShardReplica(
-            final RemovePrefixShardReplicaInput input) {
-
-        final InstanceIdentifier<?> identifier = input.getShardPrefix();
-        if (identifier == null) {
-            return newFailedRpcResultFuture("A valid shard identifier must be specified");
-        }
-
-        final DataStoreType dataStoreType = input.getDataStoreType();
-        if (dataStoreType == null) {
-            return newFailedRpcResultFuture("A valid DataStoreType must be specified");
-        }
-
-        final String memberName = input.getMemberName();
-        if (Strings.isNullOrEmpty(memberName)) {
-            return newFailedRpcResultFuture("A valid member name must be specified");
-        }
-
-        LOG.info("Removing replica for shard {} memberName {}, datastoreType {}",
-                identifier, memberName, dataStoreType);
-        final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
-
-        final SettableFuture<RpcResult<RemovePrefixShardReplicaOutput>> returnFuture = SettableFuture.create();
-        final ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType,
-                new RemovePrefixShardReplica(prefix, MemberName.forName(memberName)));
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully removed replica for shard {}", prefix);
-                returnFuture.set(newSuccessfulResult(new RemovePrefixShardReplicaOutputBuilder().build()));
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to remove replica for shard %s", prefix),
-                        returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return returnFuture;
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
+    @VisibleForTesting ListenableFuture<RpcResult<AddReplicasForAllShardsOutput>> addReplicasForAllShards(
             final AddReplicasForAllShardsInput input) {
         LOG.info("Adding replicas for all shards");
 
         final List<Entry<ListenableFuture<Success>, ShardResultBuilder>> shardResultData = new ArrayList<>();
-        Function<String, Object> messageSupplier = AddShardReplica::new;
 
-        sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, messageSupplier);
-        sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, messageSupplier);
+        sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, AddShardReplica::new);
+        sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, AddShardReplica::new);
 
         return waitForShardResults(shardResultData, shardResults ->
                 new AddReplicasForAllShardsOutputBuilder().setShardResult(shardResults).build(),
                 "Failed to add replica");
     }
 
-
-    @Override
-    public ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
+    @VisibleForTesting ListenableFuture<RpcResult<RemoveAllShardReplicasOutput>> removeAllShardReplicas(
             final RemoveAllShardReplicasInput input) {
         LOG.info("Removing replicas for all shards");
 
@@ -370,56 +380,54 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         sendMessageToManagerForConfiguredShards(DataStoreType.Config, shardResultData, messageSupplier);
         sendMessageToManagerForConfiguredShards(DataStoreType.Operational, shardResultData, messageSupplier);
 
-        return waitForShardResults(shardResultData, shardResults ->
-                new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
-        "       Failed to remove replica");
+        return waitForShardResults(shardResultData,
+            shardResults -> new RemoveAllShardReplicasOutputBuilder().setShardResult(shardResults).build(),
+            "       Failed to remove replica");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> changeMemberVotingStatesForShard(
             final ChangeMemberVotingStatesForShardInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
         }
 
-        DataStoreType dataStoreType = input.getDataStoreType();
+        final var dataStoreType = input.getDataStoreType();
         if (dataStoreType == null) {
             return newFailedRpcResultFuture("A valid DataStoreType must be specified");
         }
 
-        List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
+        final var memberVotingStates = input.getMemberVotingState();
         if (memberVotingStates == null || memberVotingStates.isEmpty()) {
             return newFailedRpcResultFuture("No member voting state input was specified");
         }
 
-        ChangeShardMembersVotingStatus changeVotingStatus = toChangeShardMembersVotingStatus(shardName,
-                memberVotingStates);
-
+        final var changeVotingStatus = toChangeShardMembersVotingStatus(shardName, memberVotingStates);
         LOG.info("Change member voting states for shard {}: {}", shardName,
                 changeVotingStatus.getMeberVotingStatusMap());
 
-        final SettableFuture<RpcResult<ChangeMemberVotingStatesForShardOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<Success> future = sendMessageToShardManager(dataStoreType, changeVotingStatus);
-        Futures.addCallback(future, new FutureCallback<Success>() {
-            @Override
-            public void onSuccess(final Success success) {
-                LOG.info("Successfully changed member voting states for shard {}", shardName);
-                returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
-            }
+        final var returnFuture = SettableFuture.<RpcResult<ChangeMemberVotingStatesForShardOutput>>create();
+        Futures.addCallback(sendMessageToShardManager(dataStoreType, changeVotingStatus),
+            new FutureCallback<Success>() {
+                @Override
+                public void onSuccess(final Success success) {
+                    LOG.info("Successfully changed member voting states for shard {}", shardName);
+                    returnFuture.set(newSuccessfulResult(new ChangeMemberVotingStatesForShardOutputBuilder().build()));
+                }
 
-            @Override
-            public void onFailure(final Throwable failure) {
-                onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
+                @Override
+                public void onFailure(final Throwable failure) {
+                    onMessageFailure(String.format("Failed to change member voting states for shard %s", shardName),
                         returnFuture, failure);
-            }
-        }, MoreExecutors.directExecutor());
+                }
+            }, MoreExecutors.directExecutor());
 
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<ChangeMemberVotingStatesForAllShardsOutput>> changeMemberVotingStatesForAllShards(
             final ChangeMemberVotingStatesForAllShardsInput input) {
         List<MemberVotingState> memberVotingStates = input.getMemberVotingState();
         if (memberVotingStates == null || memberVotingStates.isEmpty()) {
@@ -440,11 +448,11 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 "Failed to change member voting states");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
+    @VisibleForTesting
+    ListenableFuture<RpcResult<FlipMemberVotingStatesForAllShardsOutput>> flipMemberVotingStatesForAllShards(
             final FlipMemberVotingStatesForAllShardsInput input) {
-        final List<Entry<ListenableFuture<Success>, ShardResultBuilder>> shardResultData = new ArrayList<>();
-        Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
+        final var shardResultData = new ArrayList<Entry<ListenableFuture<Success>, ShardResultBuilder>>();
+        final Function<String, Object> messageSupplier = FlipShardMembersVotingStatus::new;
 
         LOG.info("Flip member voting states for all shards");
 
@@ -456,8 +464,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 "Failed to change member voting states");
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
+    private ListenableFuture<RpcResult<GetShardRoleOutput>> getShardRole(final GetShardRoleInput input) {
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
             return newFailedRpcResultFuture("A valid shard name must be specified");
@@ -499,102 +506,152 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<GetPrefixShardRoleOutput>> getPrefixShardRole(
-            final GetPrefixShardRoleInput input) {
-        final InstanceIdentifier<?> identifier = input.getShardPrefix();
-        if (identifier == null) {
-            return newFailedRpcResultFuture("A valid shard identifier must be specified");
-        }
+    @VisibleForTesting
+    ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
+        LOG.debug("backupDatastore: {}", input);
 
-        final DataStoreType dataStoreType = input.getDataStoreType();
-        if (dataStoreType == null) {
-            return newFailedRpcResultFuture("A valid DataStoreType must be specified");
+        if (Strings.isNullOrEmpty(input.getFilePath())) {
+            return newFailedRpcResultFuture("A valid file path must be specified");
         }
 
-        LOG.info("Getting prefix shard role for shard: {}, datastore type {}", identifier, dataStoreType);
+        final Uint32 timeout = input.getTimeout();
+        final Timeout opTimeout = timeout != null ? Timeout.apply(timeout.longValue(), TimeUnit.SECONDS)
+                : SHARD_MGR_TIMEOUT;
 
-        final YangInstanceIdentifier prefix = serializer.toYangInstanceIdentifier(identifier);
-        final String shardName = ClusterUtils.getCleanShardName(prefix);
-        final SettableFuture<RpcResult<GetPrefixShardRoleOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<GetShardRoleReply> future = sendMessageToShardManager(dataStoreType,
-                new GetShardRole(shardName));
-        Futures.addCallback(future, new FutureCallback<GetShardRoleReply>() {
+        final SettableFuture<RpcResult<BackupDatastoreOutput>> returnFuture = SettableFuture.create();
+        ListenableFuture<List<DatastoreSnapshot>> future = sendMessageToShardManagers(new GetSnapshot(opTimeout));
+        Futures.addCallback(future, new FutureCallback<>() {
             @Override
-            public void onSuccess(final GetShardRoleReply reply) {
-                if (reply == null) {
-                    returnFuture.set(ClusterAdminRpcService.<GetPrefixShardRoleOutput>newFailedRpcResultBuilder(
-                            "No Shard role present. Please retry..").build());
-                    return;
-                }
-
-                LOG.info("Successfully received role:{} for shard {}", reply.getRole(), shardName);
-                final GetPrefixShardRoleOutputBuilder builder = new GetPrefixShardRoleOutputBuilder();
-                if (reply.getRole() != null) {
-                    builder.setRole(reply.getRole());
-                }
-                returnFuture.set(newSuccessfulResult(builder.build()));
+            public void onSuccess(final List<DatastoreSnapshot> snapshots) {
+                saveSnapshotsToFile(new DatastoreSnapshotList(snapshots), input.getFilePath(), returnFuture);
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                returnFuture.set(ClusterAdminRpcService.<GetPrefixShardRoleOutput>newFailedRpcResultBuilder(
-                        "Failed to get shard role.", failure).build());
+                onDatastoreBackupFailure(input.getFilePath(), returnFuture, failure);
             }
         }, MoreExecutors.directExecutor());
 
         return returnFuture;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<BackupDatastoreOutput>> backupDatastore(final BackupDatastoreInput input) {
-        LOG.debug("backupDatastore: {}", input);
+    private ListenableFuture<RpcResult<GetKnownClientsForAllShardsOutput>> getKnownClientsForAllShards(
+            final GetKnownClientsForAllShardsInput input) {
+        final ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> allShardReplies =
+                getAllShardLeadersClients();
+        return Futures.whenAllComplete(allShardReplies.values()).call(() -> processReplies(allShardReplies),
+            MoreExecutors.directExecutor());
+    }
 
-        if (Strings.isNullOrEmpty(input.getFilePath())) {
-            return newFailedRpcResultFuture("A valid file path must be specified");
-        }
+    private ListenableFuture<RpcResult<ActivateEosDatacenterOutput>> activateEosDatacenter(
+            final ActivateEosDatacenterInput input) {
+        LOG.debug("Activating EOS Datacenter");
+        final SettableFuture<RpcResult<ActivateEosDatacenterOutput>> future = SettableFuture.create();
+        Futures.addCallback(dataCenterControl.activateDataCenter(), new FutureCallback<>() {
+            @Override
+            public void onSuccess(final Empty result) {
+                LOG.debug("Successfully activated datacenter.");
+                future.set(RpcResultBuilder.<ActivateEosDatacenterOutput>success().build());
+            }
 
-        final SettableFuture<RpcResult<BackupDatastoreOutput>> returnFuture = SettableFuture.create();
-        ListenableFuture<List<DatastoreSnapshot>> future = sendMessageToShardManagers(GetSnapshot.INSTANCE);
-        Futures.addCallback(future, new FutureCallback<List<DatastoreSnapshot>>() {
             @Override
-            public void onSuccess(final List<DatastoreSnapshot> snapshots) {
-                saveSnapshotsToFile(new DatastoreSnapshotList(snapshots), input.getFilePath(), returnFuture);
+            public void onFailure(final Throwable failure) {
+                future.set(ClusterAdminRpcService.<ActivateEosDatacenterOutput>newFailedRpcResultBuilder(
+                        "Failed to activate datacenter.", failure).build());
+            }
+        }, MoreExecutors.directExecutor());
+
+        return future;
+    }
+
+    private ListenableFuture<RpcResult<DeactivateEosDatacenterOutput>> deactivateEosDatacenter(
+            final DeactivateEosDatacenterInput input) {
+        LOG.debug("Deactivating EOS Datacenter");
+        final SettableFuture<RpcResult<DeactivateEosDatacenterOutput>> future = SettableFuture.create();
+        Futures.addCallback(dataCenterControl.deactivateDataCenter(), new FutureCallback<>() {
+            @Override
+            public void onSuccess(final Empty result) {
+                LOG.debug("Successfully deactivated datacenter.");
+                future.set(RpcResultBuilder.<DeactivateEosDatacenterOutput>success().build());
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                onDatastoreBackupFailure(input.getFilePath(), returnFuture, failure);
+                future.set(ClusterAdminRpcService.<DeactivateEosDatacenterOutput>newFailedRpcResultBuilder(
+                        "Failed to deactivate datacenter.", failure).build());
             }
         }, MoreExecutors.directExecutor());
 
-        return returnFuture;
+        return future;
+    }
+
+    private static RpcResult<GetKnownClientsForAllShardsOutput> processReplies(
+            final ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> allShardReplies) {
+        final Map<ShardResultKey, ShardResult> result = Maps.newHashMapWithExpectedSize(allShardReplies.size());
+        for (Entry<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> entry : allShardReplies.entrySet()) {
+            final ListenableFuture<GetKnownClientsReply> future = entry.getValue();
+            final ShardResultBuilder builder = new ShardResultBuilder()
+                    .setDataStoreType(entry.getKey().getDataStoreType())
+                    .setShardName(entry.getKey().getShardName());
+
+            final GetKnownClientsReply reply;
+            try {
+                reply = Futures.getDone(future);
+            } catch (ExecutionException e) {
+                LOG.debug("Shard {} failed to answer", entry.getKey(), e);
+                final ShardResult sr = builder
+                        .setSucceeded(Boolean.FALSE)
+                        .setErrorMessage(e.getCause().getMessage())
+                        .build();
+                result.put(sr.key(), sr);
+                continue;
+            }
+
+            final ShardResult sr = builder
+                    .setSucceeded(Boolean.TRUE)
+                    .addAugmentation(new ShardResult1Builder()
+                        .setKnownClients(reply.getClients().stream()
+                            .map(client -> new KnownClientsBuilder()
+                                .setMember(client.getFrontendId().getMemberName().toYang())
+                                .setType(client.getFrontendId().getClientType().toYang())
+                                .setGeneration(client.getYangGeneration())
+                                .build())
+                            .collect(Collectors.toMap(KnownClients::key, Function.identity())))
+                        .build())
+                    .build();
+
+            result.put(sr.key(), sr);
+        }
+
+        return RpcResultBuilder.success(new GetKnownClientsForAllShardsOutputBuilder().setShardResult(result).build())
+                .build();
     }
 
     private static ChangeShardMembersVotingStatus toChangeShardMembersVotingStatus(final String shardName,
             final List<MemberVotingState> memberVotingStatus) {
         Map<String, Boolean> serverVotingStatusMap = new HashMap<>();
         for (MemberVotingState memberStatus: memberVotingStatus) {
-            serverVotingStatusMap.put(memberStatus.getMemberName(), memberStatus.isVoting());
+            serverVotingStatusMap.put(memberStatus.getMemberName(), memberStatus.getVoting());
         }
         return new ChangeShardMembersVotingStatus(shardName, serverVotingStatusMap);
     }
 
     private static <T> SettableFuture<RpcResult<T>> waitForShardResults(
             final List<Entry<ListenableFuture<Success>, ShardResultBuilder>> shardResultData,
-            final Function<List<ShardResult>, T> resultDataSupplier,
+            final Function<Map<ShardResultKey, ShardResult>, T> resultDataSupplier,
             final String failureLogMsgPrefix) {
         final SettableFuture<RpcResult<T>> returnFuture = SettableFuture.create();
-        final List<ShardResult> shardResults = new ArrayList<>();
+        final Map<ShardResultKey, ShardResult> shardResults = new HashMap<>();
         for (final Entry<ListenableFuture<Success>, ShardResultBuilder> entry : shardResultData) {
             Futures.addCallback(entry.getKey(), new FutureCallback<Success>() {
                 @Override
                 public void onSuccess(final Success result) {
                     synchronized (shardResults) {
-                        ShardResultBuilder shardResult = entry.getValue();
-                        LOG.debug("onSuccess for shard {}, type {}", shardResult.getShardName(),
-                                shardResult.getDataStoreType());
-                        shardResults.add(shardResult.setSucceeded(true).build());
+                        final ShardResultBuilder builder = entry.getValue();
+                        LOG.debug("onSuccess for shard {}, type {}", builder.getShardName(),
+                            builder.getDataStoreType());
+                        final ShardResult sr = builder.setSucceeded(Boolean.TRUE).build();
+                        shardResults.put(sr.key(), sr);
                         checkIfComplete();
                     }
                 }
@@ -602,11 +659,14 @@ public class ClusterAdminRpcService implements ClusterAdminService {
                 @Override
                 public void onFailure(final Throwable failure) {
                     synchronized (shardResults) {
-                        ShardResultBuilder shardResult = entry.getValue();
-                        LOG.warn("{} for shard {}, type {}", failureLogMsgPrefix, shardResult.getShardName(),
-                                shardResult.getDataStoreType(), failure);
-                        shardResults.add(shardResult.setSucceeded(false).setErrorMessage(
-                                Throwables.getRootCause(failure).getMessage()).build());
+                        ShardResultBuilder builder = entry.getValue();
+                        LOG.warn("{} for shard {}, type {}", failureLogMsgPrefix, builder.getShardName(),
+                                builder.getDataStoreType(), failure);
+                        final ShardResult sr = builder
+                                .setSucceeded(Boolean.FALSE)
+                                .setErrorMessage(Throwables.getRootCause(failure).getMessage())
+                                .build();
+                        shardResults.put(sr.key(), sr);
                         checkIfComplete();
                     }
                 }
@@ -684,7 +744,7 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         final SettableFuture<T> returnFuture = SettableFuture.create();
 
         @SuppressWarnings("unchecked")
-        scala.concurrent.Future<T> askFuture = (scala.concurrent.Future<T>) Patterns.ask(actor, message, timeout);
+        Future<T> askFuture = (Future<T>) Patterns.ask(actor, message, timeout);
         askFuture.onComplete(new OnComplete<T>() {
             @Override
             public void onComplete(final Throwable failure, final T resp) {
@@ -699,6 +759,38 @@ public class ClusterAdminRpcService implements ClusterAdminService {
         return returnFuture;
     }
 
+    private ImmutableMap<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> getAllShardLeadersClients() {
+        final ImmutableMap.Builder<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> builder =
+                ImmutableMap.builder();
+
+        addAllShardsClients(builder, DataStoreType.Config, configDataStore.getActorUtils());
+        addAllShardsClients(builder, DataStoreType.Operational, operDataStore.getActorUtils());
+
+        return builder.build();
+    }
+
+    private static void addAllShardsClients(
+            final ImmutableMap.Builder<ShardIdentifier, ListenableFuture<GetKnownClientsReply>> builder,
+            final DataStoreType type, final ActorUtils utils) {
+        for (String shardName : utils.getConfiguration().getAllShardNames()) {
+            final SettableFuture<GetKnownClientsReply> future = SettableFuture.create();
+            builder.put(new ShardIdentifier(type, shardName), future);
+
+            utils.findPrimaryShardAsync(shardName).flatMap(
+                info -> Patterns.ask(info.getPrimaryShardActor(), GetKnownClients.INSTANCE, SHARD_MGR_TIMEOUT),
+                utils.getClientDispatcher()).onComplete(new OnComplete<>() {
+                    @Override
+                    public void onComplete(final Throwable failure, final Object success) {
+                        if (failure == null) {
+                            future.set((GetKnownClientsReply) success);
+                        } else {
+                            future.setException(failure);
+                        }
+                    }
+                }, utils.getClientDispatcher());
+        }
+    }
+
     private static <T> ListenableFuture<RpcResult<T>> newFailedRpcResultFuture(final String message) {
         return ClusterAdminRpcService.<T>newFailedRpcResultBuilder(message).buildFuture();
     }
diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/OSGiClusterAdmin.java
new file mode 100644 (file)
index 0000000..bcbf408
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin;
+
+import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
+import org.opendaylight.controller.eos.akka.DataCenterControl;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(service = { })
+public final class OSGiClusterAdmin {
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiClusterAdmin.class);
+
+    private final Registration reg;
+
+    @Activate
+    public OSGiClusterAdmin(
+            @Reference(target = "(type=distributed-config)") final DistributedDataStoreInterface configDatastore,
+            @Reference(target = "(type=distributed-operational)") final DistributedDataStoreInterface operDatastore,
+            @Reference final RpcProviderService rpcProviderService,
+            @Reference final DataCenterControl dataCenterControls,
+            @Reference final DataCenterControl dataCenterControl) {
+        reg = new ClusterAdminRpcService(configDatastore, operDatastore, dataCenterControl)
+            .registerWith(rpcProviderService);
+        LOG.info("Cluster Admin services started");
+    }
+
+    @Deactivate
+    void deactivate() {
+        reg.close();
+        LOG.info("Cluster Admin services stopped");
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ShardIdentifier.java b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/java/org/opendaylight/controller/cluster/datastore/admin/ShardIdentifier.java
new file mode 100644 (file)
index 0000000..af3a111
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DatastoreShardId;
+import org.opendaylight.yangtools.concepts.Identifier;
+
+final class ShardIdentifier implements Identifier {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull String shardName;
+    private final @NonNull DataStoreType type;
+
+    ShardIdentifier(final DataStoreType type, final String shardName) {
+        this.type = requireNonNull(type);
+        this.shardName = requireNonNull(shardName);
+    }
+
+    ShardIdentifier(final DatastoreShardId id) {
+        this(id.getDataStoreType(), id.getShardName());
+    }
+
+    public @NonNull String getShardName() {
+        return shardName;
+    }
+
+    public @NonNull DataStoreType getDataStoreType() {
+        return type;
+    }
+
+    @Override
+    public int hashCode() {
+        return type.hashCode() * 31 + shardName.hashCode();
+    }
+
+    @Override
+    public boolean equals(final Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (!(obj instanceof ShardIdentifier)) {
+            return false;
+        }
+        final ShardIdentifier other = (ShardIdentifier) obj;
+        return type.equals(other.type) && shardName.equals(other.shardName);
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("type", type).add("shardName", shardName).toString();
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-impl/src/main/resources/OSGI-INF/blueprint/cluster-admin.xml b/opendaylight/md-sal/sal-cluster-admin-impl/src/main/resources/OSGI-INF/blueprint/cluster-admin.xml
deleted file mode 100644 (file)
index 258bc09..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-  <!-- ClusterAdminRpcService -->
-
-  <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
-      odl:type="distributed-config"/>
-
-  <reference id="operationalDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
-      odl:type="distributed-operational"/>
-
-  <reference id="normalizedNodeSerializer" interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"/>
-
-  <bean id="clusterAdminService" class="org.opendaylight.controller.cluster.datastore.admin.ClusterAdminRpcService">
-    <argument ref="configDatastore"/>
-    <argument ref="operationalDatastore"/>
-    <argument ref="normalizedNodeSerializer"/>
-  </bean>
-
-  <odl:rpc-implementation ref="clusterAdminService"/>
-
-</blueprint>
\ No newline at end of file
index b87c07e186910fcd4c30a19e08a61f86360a04cf..2239908877b01d91b2e02b3894cabeb796e6f1a3 100644 (file)
@@ -9,13 +9,14 @@ package org.opendaylight.controller.cluster.datastore.admin;
 
 import static java.lang.Boolean.FALSE;
 import static java.lang.Boolean.TRUE;
+import static java.util.Objects.requireNonNull;
 import static org.hamcrest.CoreMatchers.anyOf;
 import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyNoShardPresent;
@@ -26,43 +27,32 @@ import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Status.Success;
 import akka.cluster.Cluster;
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
 import java.io.File;
-import java.io.FileInputStream;
-import java.net.URI;
-import java.util.AbstractMap.SimpleEntry;
+import java.nio.file.Files;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.MemberNode;
 import org.opendaylight.controller.cluster.datastore.MemberNode.RaftStateVerifier;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.persisted.ServerConfigurationPayload;
 import org.opendaylight.controller.cluster.raft.persisted.ServerInfo;
@@ -70,54 +60,24 @@ import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEnt
 import org.opendaylight.controller.cluster.raft.persisted.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddPrefixShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetPrefixShardRoleOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemovePrefixShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResult;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultBuilder;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.shard.result.output.ShardResultKey;
 import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * Unit tests for ClusterAdminRpcService.
@@ -125,6 +85,12 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  * @author Thomas Pantelis
  */
 public class ClusterAdminRpcServiceTest {
+    record ExpState(String name, boolean voting) {
+        ExpState {
+            requireNonNull(name);
+        }
+    }
+
     private static final MemberName MEMBER_1 = MemberName.forName("member-1");
     private static final MemberName MEMBER_2 = MemberName.forName("member-2");
     private static final MemberName MEMBER_3 = MemberName.forName("member-3");
@@ -138,33 +104,38 @@ public class ClusterAdminRpcServiceTest {
 
     @After
     public void tearDown() {
-        for (MemberNode m : Lists.reverse(memberNodes)) {
-            m.cleanup();
+        for (var member : Lists.reverse(memberNodes)) {
+            member.cleanup();
         }
         memberNodes.clear();
     }
 
     @Test
     public void testBackupDatastore() throws Exception {
-        MemberNode node = MemberNode.builder(memberNodes).akkaConfig("Member1")
-                .moduleShardsConfig("module-shards-member1.conf").waitForShardLeader("cars", "people")
-                .testName("testBackupDatastore").build();
+        final var node = MemberNode.builder(memberNodes)
+            .akkaConfig("Member1")
+            .moduleShardsConfig("module-shards-member1.conf")
+            .waitForShardLeader("cars", "people")
+            .testName("testBackupDatastore")
+            .build();
 
-        String fileName = "target/testBackupDatastore";
-        new File(fileName).delete();
+        final var fileName = "target/testBackupDatastore";
+        final var file = new File(fileName);
+        file.delete();
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
+        final var service = new ClusterAdminRpcService(node.configDataStore(), node.operDataStore(), null);
 
-        RpcResult<BackupDatastoreOutput> rpcResult = service .backupDatastore(new BackupDatastoreInputBuilder()
-                .setFilePath(fileName).build()).get(5, TimeUnit.SECONDS);
+        var rpcResult = service.backupDatastore(new BackupDatastoreInputBuilder().setFilePath(fileName).build())
+            .get(5, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        try (FileInputStream fis = new FileInputStream(fileName)) {
-            List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
+        try (var fis = Files.newInputStream(file.toPath())) {
+            final List<DatastoreSnapshot> snapshots = SerializationUtils.deserialize(fis);
             assertEquals("DatastoreSnapshot size", 2, snapshots.size());
 
-            ImmutableMap<String, DatastoreSnapshot> map = ImmutableMap.of(snapshots.get(0).getType(), snapshots.get(0),
-                    snapshots.get(1).getType(), snapshots.get(1));
+            final var map = Map.of(
+                snapshots.get(0).getType(), snapshots.get(0),
+                snapshots.get(1).getType(), snapshots.get(1));
             verifyDatastoreSnapshot(node.configDataStore().getActorUtils().getDataStoreName(),
                     map.get(node.configDataStore().getActorUtils().getDataStoreName()), "cars", "people");
         } finally {
@@ -176,7 +147,7 @@ public class ClusterAdminRpcServiceTest {
         node.configDataStore().getActorUtils().getShardManager().tell(node.datastoreContextBuilder()
                 .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).build(), ActorRef.noSender());
 
-        ActorRef carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").get();
+        final var carsShardActor = node.configDataStore().getActorUtils().findLocalShard("cars").orElseThrow();
         node.kit().watch(carsShardActor);
         carsShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
         node.kit().expectTerminated(carsShardActor);
@@ -190,113 +161,12 @@ public class ClusterAdminRpcServiceTest {
     private static void verifyDatastoreSnapshot(final String type, final DatastoreSnapshot datastoreSnapshot,
             final String... expShardNames) {
         assertNotNull("Missing DatastoreSnapshot for type " + type, datastoreSnapshot);
-        Set<String> shardNames = new HashSet<>();
-        for (DatastoreSnapshot.ShardSnapshot s: datastoreSnapshot.getShardSnapshots()) {
-            shardNames.add(s.getName());
+        var shardNames = new HashSet<String>();
+        for (var snapshot : datastoreSnapshot.getShardSnapshots()) {
+            shardNames.add(snapshot.getName());
         }
 
-        assertEquals("DatastoreSnapshot shard names", Sets.newHashSet(expShardNames), shardNames);
-    }
-
-    @Test
-    public void testAddRemovePrefixShardReplica() throws Exception {
-        String name = "testAddPrefixShardReplica";
-        String moduleShardsConfig = "module-shards-default.conf";
-
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).build();
-
-        member1.waitForMembersUp("member-2", "member-3");
-        replicaNode2.kit().waitForMembersUp("member-1", "member-3");
-        replicaNode3.kit().waitForMembersUp("member-1", "member-2");
-
-        final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
-
-        shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
-                        new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
-                        "prefix", Collections.singleton(MEMBER_1))),
-                ActorRef.noSender());
-
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
-        final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
-        final BindingNormalizedNodeSerializer serializer = Mockito.mock(BindingNormalizedNodeSerializer.class);
-        Mockito.doReturn(CarsModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(identifier);
-
-        addPrefixShardReplica(replicaNode2, identifier, serializer,
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1");
-
-        addPrefixShardReplica(replicaNode3, identifier, serializer,
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-1", "member-2");
-
-        verifyRaftPeersPresent(member1.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
-                "member-2", "member-3");
-
-        removePrefixShardReplica(member1, identifier, "member-3", serializer,
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH), "member-2");
-
-        verifyNoShardPresent(replicaNode3.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-        verifyRaftPeersPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH),
-                "member-1");
-
-        removePrefixShardReplica(member1, identifier, "member-2", serializer,
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
-        verifyNoShardPresent(replicaNode2.configDataStore(), ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-    }
-
-    @Test
-    public void testGetShardRole() throws Exception {
-        String name = "testGetShardRole";
-        String moduleShardsConfig = "module-shards-default-member-1.conf";
-
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).build();
-
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
-
-        final RpcResult<GetShardRoleOutput> successResult =
-                getShardRole(member1, Mockito.mock(BindingNormalizedNodeSerializer.class), "default");
-        verifySuccessfulRpcResult(successResult);
-        assertEquals("Leader", successResult.getResult().getRole());
-
-        final RpcResult<GetShardRoleOutput> failedResult =
-                getShardRole(member1, Mockito.mock(BindingNormalizedNodeSerializer.class), "cars");
-
-        verifyFailedRpcResult(failedResult);
-
-        final ActorRef shardManager1 = member1.configDataStore().getActorUtils().getShardManager();
-
-        shardManager1.tell(new PrefixShardCreated(new PrefixShardConfiguration(
-                        new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH),
-                        "prefix", Collections.singleton(MEMBER_1))),
-                ActorRef.noSender());
-
-        member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(),
-                ClusterUtils.getCleanShardName(CarsModel.BASE_PATH));
-
-        final InstanceIdentifier<Cars> identifier = InstanceIdentifier.create(Cars.class);
-        final BindingNormalizedNodeSerializer serializer = Mockito.mock(BindingNormalizedNodeSerializer.class);
-        Mockito.doReturn(CarsModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(identifier);
-
-        final RpcResult<GetPrefixShardRoleOutput> prefixSuccessResult =
-                getPrefixShardRole(member1, identifier, serializer);
-
-        verifySuccessfulRpcResult(prefixSuccessResult);
-        assertEquals("Leader", prefixSuccessResult.getResult().getRole());
-
-        final InstanceIdentifier<People> peopleId = InstanceIdentifier.create(People.class);
-        Mockito.doReturn(PeopleModel.BASE_PATH).when(serializer).toYangInstanceIdentifier(peopleId);
-
-        final RpcResult<GetPrefixShardRoleOutput> prefixFail =
-                getPrefixShardRole(member1, peopleId, serializer);
-
-        verifyFailedRpcResult(prefixFail);
+        assertEquals("DatastoreSnapshot shard names", Set.of(expShardNames), shardNames);
     }
 
     @Test
@@ -304,12 +174,10 @@ public class ClusterAdminRpcServiceTest {
         String name = "testGetPrefixShardRole";
         String moduleShardsConfig = "module-shards-default-member-1.conf";
 
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         member1.kit().waitUntilLeader(member1.configDataStore().getActorUtils(), "default");
-
-
     }
 
     @Test
@@ -317,11 +185,11 @@ public class ClusterAdminRpcServiceTest {
         String name = "testModuleShardLeaderMovement";
         String moduleShardsConfig = "module-shards-member1.conf";
 
-        final MemberNode member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var member1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .waitForShardLeader("cars").moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         member1.waitForMembersUp("member-2", "member-3");
@@ -357,17 +225,17 @@ public class ClusterAdminRpcServiceTest {
     public void testAddShardReplica() throws Exception {
         String name = "testAddShardReplica";
         String moduleShardsConfig = "module-shards-cars-member-1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars").build();
 
-        MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-2");
 
         doAddShardReplica(newReplicaNode2, "cars", "member-1");
 
-        MemberNode newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        var newReplicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-3");
@@ -379,14 +247,14 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(newReplicaNode2.operDataStore(), "cars", "member-1", "member-3");
 
         // Write data to member-2's config datastore and read/verify via member-3
-        final NormalizedNode<?, ?> configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
+        final var configCarsNode = writeCarsNodeAndVerify(newReplicaNode2.configDataStore(),
                 newReplicaNode3.configDataStore());
 
         // Write data to member-3's oper datastore and read/verify via member-2
         writeCarsNodeAndVerify(newReplicaNode3.operDataStore(), newReplicaNode2.operDataStore());
 
         // Verify all data has been replicated. We expect 4 log entries and thus last applied index of 3 -
-        // 2 ServerConfigurationPayload entries,  the transaction payload entry plus a purge payload.
+        // 2 ServerConfigurationPayload entries, the transaction payload entry plus a purge payload.
 
         RaftStateVerifier verifier = raftState -> {
             assertEquals("Commit index", 3, raftState.getCommitIndex());
@@ -417,34 +285,36 @@ public class ClusterAdminRpcServiceTest {
     @Test
     public void testAddShardReplicaFailures() throws Exception {
         String name = "testAddShardReplicaFailures";
-        MemberNode memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var memberNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig("module-shards-cars-member-1.conf").build();
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
-                .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+                .setDataStoreType(DataStoreType.Config)
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars")
-                .build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("cars").build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
         rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName("people")
-                .setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+                .setDataStoreType(DataStoreType.Config)
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
     }
 
-    private static NormalizedNode<?, ?> writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
+    private static ContainerNode writeCarsNodeAndVerify(final AbstractDataStore writeToStore,
             final AbstractDataStore readFromStore) throws Exception {
-        DOMStoreWriteTransaction writeTx = writeToStore.newWriteOnlyTransaction();
-        NormalizedNode<?, ?> carsNode = CarsModel.create();
+        final var writeTx = writeToStore.newWriteOnlyTransaction();
+        final var carsNode = CarsModel.create();
         writeTx.write(CarsModel.BASE_PATH, carsNode);
 
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-        Boolean canCommit = cohort.canCommit().get(7, TimeUnit.SECONDS);
-        assertEquals("canCommit", TRUE, canCommit);
+        final var cohort = writeTx.ready();
+        assertEquals("canCommit", TRUE, cohort.canCommit().get(7, TimeUnit.SECONDS));
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
 
@@ -453,99 +323,31 @@ public class ClusterAdminRpcServiceTest {
     }
 
     private static void readCarsNodeAndVerify(final AbstractDataStore readFromStore,
-            final NormalizedNode<?, ?> expCarsNode) throws Exception {
-        java.util.Optional<NormalizedNode<?, ?>> optional = readFromStore.newReadOnlyTransaction()
-                .read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", expCarsNode, optional.get());
-    }
-
-    private static RpcResult<GetShardRoleOutput> getShardRole(final MemberNode memberNode,
-            final BindingNormalizedNodeSerializer serializer, final String shardName) throws Exception {
-
-        final GetShardRoleInput input = new GetShardRoleInputBuilder()
-                .setDataStoreType(DataStoreType.Config)
-                .setShardName(shardName)
-                .build();
-
-        final ClusterAdminRpcService service =
-                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
-        return service.getShardRole(input).get(10, TimeUnit.SECONDS);
-    }
-
-    private static RpcResult<GetPrefixShardRoleOutput> getPrefixShardRole(
-            final MemberNode memberNode,
-            final InstanceIdentifier<?> identifier,
-            final BindingNormalizedNodeSerializer serializer) throws Exception {
-
-        final GetPrefixShardRoleInput input = new GetPrefixShardRoleInputBuilder()
-                .setDataStoreType(DataStoreType.Config)
-                .setShardPrefix(identifier)
-                .build();
-
-        final ClusterAdminRpcService service =
-                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
-        return service.getPrefixShardRole(input).get(10, TimeUnit.SECONDS);
-    }
-
-    private static void addPrefixShardReplica(final MemberNode memberNode, final InstanceIdentifier<?> identifier,
-            final BindingNormalizedNodeSerializer serializer, final String shardName,
-            final String... peerMemberNames) throws Exception {
-
-        final AddPrefixShardReplicaInput input = new AddPrefixShardReplicaInputBuilder()
-                .setShardPrefix(identifier)
-                .setDataStoreType(DataStoreType.Config).build();
-
-        final ClusterAdminRpcService service =
-                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
-        final RpcResult<AddPrefixShardReplicaOutput> rpcResult = service.addPrefixShardReplica(input)
-                .get(10, TimeUnit.SECONDS);
-        verifySuccessfulRpcResult(rpcResult);
-
-        verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
-        Optional<ActorRef> optional = memberNode.configDataStore().getActorUtils().findLocalShard(shardName);
-        assertTrue("Replica shard not present", optional.isPresent());
-    }
-
-    private static void removePrefixShardReplica(final MemberNode memberNode, final InstanceIdentifier<?> identifier,
-            final String removeFromMember, final BindingNormalizedNodeSerializer serializer, final String shardName,
-            final String... peerMemberNames) throws Exception {
-        final RemovePrefixShardReplicaInput input = new RemovePrefixShardReplicaInputBuilder()
-                .setDataStoreType(DataStoreType.Config)
-                .setShardPrefix(identifier)
-                .setMemberName(removeFromMember).build();
-
-        final ClusterAdminRpcService service =
-                new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), serializer);
-
-        final RpcResult<RemovePrefixShardReplicaOutput> rpcResult = service.removePrefixShardReplica(input)
-                .get(10, TimeUnit.SECONDS);
-        verifySuccessfulRpcResult(rpcResult);
-
-        verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
+            final ContainerNode expCarsNode) throws Exception {
+        assertEquals(Optional.of(expCarsNode),
+            readFromStore.newReadOnlyTransaction().read(CarsModel.BASE_PATH).get(15, TimeUnit.SECONDS));
     }
 
     private static void doAddShardReplica(final MemberNode memberNode, final String shardName,
             final String... peerMemberNames) throws Exception {
         memberNode.waitForMembersUp(peerMemberNames);
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        RpcResult<AddShardReplicaOutput> rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
-            .setShardName(shardName).setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        var rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+            .setShardName(shardName)
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.configDataStore(), shardName, peerMemberNames);
 
-        Optional<ActorRef> optional = memberNode.operDataStore().getActorUtils().findLocalShard(shardName);
-        assertFalse("Oper shard present", optional.isPresent());
+        assertEquals(Optional.empty(), memberNode.operDataStore().getActorUtils().findLocalShard(shardName));
 
-        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder().setShardName(shardName)
-                .setDataStoreType(DataStoreType.Operational).build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service.addShardReplica(new AddShardReplicaInputBuilder()
+            .setShardName(shardName)
+            .setDataStoreType(DataStoreType.Operational)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(memberNode.operDataStore(), shardName, peerMemberNames);
@@ -553,12 +355,12 @@ public class ClusterAdminRpcServiceTest {
 
     private static void doMakeShardLeaderLocal(final MemberNode memberNode, final String shardName,
             final String newLeader) throws Exception {
-        ClusterAdminRpcService service = new ClusterAdminRpcService(memberNode.configDataStore(),
-                memberNode.operDataStore(), null);
+        final var service = new ClusterAdminRpcService(memberNode.configDataStore(), memberNode.operDataStore(), null);
 
-        final RpcResult<MakeLeaderLocalOutput> rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
-                .setDataStoreType(DataStoreType.Config).setShardName(shardName).build())
-                .get(10, TimeUnit.SECONDS);
+        final var rpcResult = service.makeLeaderLocal(new MakeLeaderLocalInputBuilder()
+            .setDataStoreType(DataStoreType.Config)
+            .setShardName(shardName)
+            .build()).get(10, TimeUnit.SECONDS);
 
         verifySuccessfulRpcResult(rpcResult);
 
@@ -568,8 +370,9 @@ public class ClusterAdminRpcServiceTest {
 
     private static <T> T verifySuccessfulRpcResult(final RpcResult<T> rpcResult) {
         if (!rpcResult.isSuccessful()) {
-            if (rpcResult.getErrors().size() > 0) {
-                RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+            final var errors = rpcResult.getErrors();
+            if (errors.size() > 0) {
+                final var error = errors.get(0);
                 throw new AssertionError("Rpc failed with error: " + error, error.getCause());
             }
 
@@ -581,8 +384,9 @@ public class ClusterAdminRpcServiceTest {
 
     private static void verifyFailedRpcResult(final RpcResult<?> rpcResult) {
         assertFalse("RpcResult", rpcResult.isSuccessful());
-        assertEquals("RpcResult errors size", 1, rpcResult.getErrors().size());
-        RpcError error = Iterables.getFirst(rpcResult.getErrors(), null);
+        final var errors = rpcResult.getErrors();
+        assertEquals("RpcResult errors size", 1, errors.size());
+        final var error = errors.get(0);
         assertNotNull("RpcResult error message null", error.getMessage());
     }
 
@@ -590,15 +394,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveShardReplica() throws Exception {
         String name = "testRemoveShardReplica";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -609,12 +413,13 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to remove it's local shard
 
-        ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<RemoveShardReplicaOutput> rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
-                .setShardName("cars").setMemberName("member-3").setDataStoreType(DataStoreType.Config).build())
-                .get(10, TimeUnit.SECONDS);
+        var rpcResult = service3.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars").setMemberName("member-3")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars", "member-2");
@@ -626,7 +431,7 @@ public class ClusterAdminRpcServiceTest {
         Cluster.get(leaderNode1.kit().getSystem()).down(Cluster.get(replicaNode2.kit().getSystem()).selfAddress());
         replicaNode2.cleanup();
 
-        MemberNode newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newPeplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         newPeplicaNode2.configDataStore().waitTillReady();
@@ -634,11 +439,14 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-1 to remove member-2
 
-        ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder().setShardName("cars")
-                .setMemberName("member-2").setDataStoreType(DataStoreType.Config).build()).get(10, TimeUnit.SECONDS);
+        rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars")
+            .setMemberName("member-2")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftPeersPresent(leaderNode1.configDataStore(), "cars");
@@ -649,15 +457,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveShardLeaderReplica() throws Exception {
         String name = "testRemoveShardLeaderReplica";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -670,12 +478,14 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on leader member-1 to remove it's local shard
 
-        ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        RpcResult<RemoveShardReplicaOutput> rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
-                .setShardName("cars").setMemberName("member-1").setDataStoreType(DataStoreType.Config).build())
-                .get(10, TimeUnit.SECONDS);
+        final var rpcResult = service1.removeShardReplica(new RemoveShardReplicaInputBuilder()
+            .setShardName("cars")
+            .setMemberName("member-1")
+            .setDataStoreType(DataStoreType.Config)
+            .build()).get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
         verifyRaftState(replicaNode2.configDataStore(), "cars", raftState ->
@@ -691,18 +501,17 @@ public class ClusterAdminRpcServiceTest {
     public void testAddReplicasForAllShards() throws Exception {
         String name = "testAddReplicasForAllShards";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).waitForShardLeader("cars", "people").build();
 
-        ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
-                                                                                 "pets", null,
-                                                                                 Collections.singletonList(MEMBER_1));
+        final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+            null, List.of(MEMBER_1));
         leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
         leaderNode1.kit().waitUntilLeader(leaderNode1.configDataStore().getActorUtils(), "pets");
 
-        MemberNode newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var newReplicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.waitForMembersUp("member-2");
@@ -712,20 +521,18 @@ public class ClusterAdminRpcServiceTest {
                 new CreateShard(petsModuleConfig, Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        newReplicaNode2.operDataStore().getActorUtils().getShardManager().tell(
-                new CreateShard(new ModuleShardConfiguration(URI.create("no-leader-ns"), "no-leader-module",
-                                                             "no-leader", null,
-                                                             Collections.singletonList(MEMBER_1)),
-                                Shard.builder(), null),
-                                newReplicaNode2.kit().getRef());
+        newReplicaNode2.operDataStore().getActorUtils().getShardManager()
+            .tell(new CreateShard(new ModuleShardConfiguration(XMLNamespace.of("no-leader-ns"), "no-leader-module",
+                "no-leader", null, List.of(MEMBER_1)),
+                Shard.builder(), null), newReplicaNode2.kit().getRef());
         newReplicaNode2.kit().expectMsgClass(Success.class);
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
-                newReplicaNode2.operDataStore(), null);
+        final var service = new ClusterAdminRpcService(newReplicaNode2.configDataStore(),
+            newReplicaNode2.operDataStore(), null);
 
-        RpcResult<AddReplicasForAllShardsOutput> rpcResult = service.addReplicasForAllShards(
-            new AddReplicasForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        AddReplicasForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service.addReplicasForAllShards(new AddReplicasForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("pets", DataStoreType.Config),
@@ -744,15 +551,15 @@ public class ClusterAdminRpcServiceTest {
     public void testRemoveAllShardReplicas() throws Exception {
         String name = "testRemoveAllShardReplicas";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -760,8 +567,8 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode2.configDataStore(), "cars", "member-1", "member-3");
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "cars", "member-1", "member-2");
 
-        ModuleShardConfiguration petsModuleConfig = new ModuleShardConfiguration(URI.create("pets-ns"), "pets-module",
-                "pets", null, Arrays.asList(MEMBER_1, MEMBER_2, MEMBER_3));
+        final var petsModuleConfig = new ModuleShardConfiguration(XMLNamespace.of("pets-ns"), "pets-module", "pets",
+            null, List.of(MEMBER_1, MEMBER_2, MEMBER_3));
         leaderNode1.configDataStore().getActorUtils().getShardManager().tell(
                 new CreateShard(petsModuleConfig, Shard.builder(), null), leaderNode1.kit().getRef());
         leaderNode1.kit().expectMsgClass(Success.class);
@@ -778,12 +585,13 @@ public class ClusterAdminRpcServiceTest {
         verifyRaftPeersPresent(replicaNode2.configDataStore(), "pets", "member-1", "member-3");
         verifyRaftPeersPresent(replicaNode3.configDataStore(), "pets", "member-1", "member-2");
 
-        ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<RemoveAllShardReplicasOutput> rpcResult = service3.removeAllShardReplicas(
-                new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build()).get(10, TimeUnit.SECONDS);
-        RemoveAllShardReplicasOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service3.removeAllShardReplicas(
+                new RemoveAllShardReplicasInputBuilder().setMemberName("member-3").build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("pets", DataStoreType.Config),
@@ -805,15 +613,15 @@ public class ClusterAdminRpcServiceTest {
     public void testChangeMemberVotingStatesForShard() throws Exception {
         String name = "testChangeMemberVotingStatusForShard";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -824,32 +632,31 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service3
-                .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
-                        .setShardName("cars").setDataStoreType(DataStoreType.Config)
-                        .setMemberVotingState(ImmutableList.of(
-                                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
-                                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
-                        .build())
-                .get(10, TimeUnit.SECONDS);
+        var rpcResult = service3.changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
+            .setShardName("cars").setDataStoreType(DataStoreType.Config)
+            .setMemberVotingState(List.of(
+                new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
+                new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+            .build())
+            .get(10, TimeUnit.SECONDS);
         verifySuccessfulRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
-        verifyVotingStates(replicaNode2.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
-        verifyVotingStates(replicaNode3.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+        verifyVotingStates(replicaNode2.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
+        verifyVotingStates(replicaNode3.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
     }
 
     @Test
     public void testChangeMemberVotingStatesForSingleNodeShard() throws Exception {
         String name = "testChangeMemberVotingStatesForSingleNodeShard";
         String moduleShardsConfig = "module-shards-member1.conf";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
@@ -858,34 +665,39 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        ClusterAdminRpcService service = new ClusterAdminRpcService(leaderNode.configDataStore(),
-                leaderNode.operDataStore(), null);
-
-        RpcResult<ChangeMemberVotingStatesForShardOutput> rpcResult = service
-                .changeMemberVotingStatesForShard(new ChangeMemberVotingStatesForShardInputBuilder()
-                        .setShardName("cars").setDataStoreType(DataStoreType.Config)
-                        .setMemberVotingState(ImmutableList
-                                .of(new MemberVotingStateBuilder().setMemberName("member-1").setVoting(FALSE).build()))
-                        .build())
-                .get(10, TimeUnit.SECONDS);
+        final var service = new ClusterAdminRpcService(leaderNode.configDataStore(), leaderNode.operDataStore(), null);
+
+        final var rpcResult = service.changeMemberVotingStatesForShard(
+            new ChangeMemberVotingStatesForShardInputBuilder()
+                .setShardName("cars").setDataStoreType(DataStoreType.Config)
+                .setMemberVotingState(List.of(new MemberVotingStateBuilder()
+                    .setMemberName("member-1")
+                    .setVoting(FALSE)
+                    .build()))
+                .build())
+            .get(10, TimeUnit.SECONDS);
         verifyFailedRpcResult(rpcResult);
 
-        verifyVotingStates(leaderNode.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE));
+        verifyVotingStates(leaderNode.configDataStore(), "cars", new ExpState("member-1", true));
     }
 
     @Test
     public void testChangeMemberVotingStatesForAllShards() throws Exception {
         String name = "testChangeMemberVotingStatesForAllShards";
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
-                        DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
-                .build();
-
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes)
+            .akkaConfig("Member1")
+            .testName(name)
+            .moduleShardsConfig(moduleShardsConfig)
+            .datastoreContextBuilder(DatastoreContext.newBuilder()
+                .shardHeartbeatIntervalInMillis(300)
+                .shardElectionTimeoutFactor(1))
+            .build();
+
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
@@ -898,75 +710,78 @@ public class ClusterAdminRpcServiceTest {
 
         // Invoke RPC service on member-3 to change voting status
 
-        ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
                 replicaNode3.operDataStore(), null);
 
-        RpcResult<ChangeMemberVotingStatesForAllShardsOutput> rpcResult = service3.changeMemberVotingStatesForAllShards(
-                new ChangeMemberVotingStatesForAllShardsInputBuilder().setMemberVotingState(ImmutableList.of(
+        final var rpcResult = service3.changeMemberVotingStatesForAllShards(
+            new ChangeMemberVotingStatesForAllShardsInputBuilder()
+                .setMemberVotingState(List.of(
                         new MemberVotingStateBuilder().setMemberName("member-2").setVoting(FALSE).build(),
-                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build())).build())
+                        new MemberVotingStateBuilder().setMemberName("member-3").setVoting(FALSE).build()))
+                .build())
                 .get(10, TimeUnit.SECONDS);
-        ChangeMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"}, new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", false), new ExpState("member-3", false));
     }
 
     @Test
     public void testFlipMemberVotingStates() throws Exception {
         String name = "testFlipMemberVotingStates";
 
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
-                new ServerInfo("member-1", true), new ServerInfo("member-2", true),
-                new ServerInfo("member-3", false)));
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
+            new ServerInfo("member-1", true), new ServerInfo("member-2", true), new ServerInfo("member-3", false)));
 
         setupPersistedServerConfigPayload(persistedServerConfig, "member-1", name, "cars", "people");
         setupPersistedServerConfigPayload(persistedServerConfig, "member-2", name, "cars", "people");
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(DatastoreContext.newBuilder()
                         .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
         replicaNode3.configDataStore().waitTillReady();
         replicaNode3.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
 
-        ClusterAdminRpcService service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(),
-                replicaNode3.operDataStore(), null);
+        final var service3 = new ClusterAdminRpcService(replicaNode3.configDataStore(), replicaNode3.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service3.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        var rpcResult = service3.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
-                new SimpleEntry<>("member-3", TRUE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", true));
 
         // Leadership should have transferred to member 3 since it is the only remaining voting member.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -984,19 +799,20 @@ public class ClusterAdminRpcServiceTest {
         // Flip the voting states back to the original states.
 
         rpcResult = service3.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
         result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
-                new SimpleEntry<>("member-3", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", false));
 
         // Leadership should have transferred to member 1 or 2.
         verifyRaftState(leaderNode1.configDataStore(), "cars", raftState -> {
@@ -1012,7 +828,7 @@ public class ClusterAdminRpcServiceTest {
 
         // Members 1, 2, and 3 are initially started up as non-voting. Members 4, 5, and 6 are initially
         // non-voting and simulated as down by not starting them up.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("member-1", false), new ServerInfo("member-2", false),
                 new ServerInfo("member-3", false), new ServerInfo("member-4", true),
                 new ServerInfo("member-5", true), new ServerInfo("member-6", true)));
@@ -1022,47 +838,47 @@ public class ClusterAdminRpcServiceTest {
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var replicaNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         // Initially there won't be a leader b/c all the up nodes are non-voting.
 
         replicaNode1.waitForMembersUp("member-2", "member-3");
 
-        verifyVotingStates(replicaNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", FALSE),
-                new SimpleEntry<>("member-2", FALSE), new SimpleEntry<>("member-3", FALSE),
-                new SimpleEntry<>("member-4", TRUE), new SimpleEntry<>("member-5", TRUE),
-                new SimpleEntry<>("member-6", TRUE));
+        verifyVotingStates(replicaNode1.configDataStore(), "cars",
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+            new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
 
         verifyRaftState(replicaNode1.configDataStore(), "cars", raftState ->
             assertEquals("Expected raft state", RaftState.Follower.toString(), raftState.getRaftState()));
 
-        ClusterAdminRpcService service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(),
-                replicaNode1.operDataStore(), null);
+        final var service1 = new ClusterAdminRpcService(replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
-        verifyVotingStates(new AbstractDataStore[]{replicaNode1.configDataStore(), replicaNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", TRUE), new SimpleEntry<>("member-2", TRUE),
-                new SimpleEntry<>("member-3", TRUE), new SimpleEntry<>("member-4", FALSE),
-                new SimpleEntry<>("member-5", FALSE), new SimpleEntry<>("member-6", FALSE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            replicaNode1.configDataStore(), replicaNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+            new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
 
         // Since member 1 was changed to voting and there was no leader, it should've started and election
         // and become leader
@@ -1084,7 +900,7 @@ public class ClusterAdminRpcServiceTest {
         String name = "testFlipMemberVotingStatesWithVotingMembersDown";
 
         // Members 4, 5, and 6 are initially non-voting and simulated as down by not starting them up.
-        ServerConfigurationPayload persistedServerConfig = new ServerConfigurationPayload(Arrays.asList(
+        final var persistedServerConfig = new ServerConfigurationPayload(List.of(
                 new ServerInfo("member-1", true), new ServerInfo("member-2", true),
                 new ServerInfo("member-3", true), new ServerInfo("member-4", false),
                 new ServerInfo("member-5", false), new ServerInfo("member-6", false)));
@@ -1094,43 +910,43 @@ public class ClusterAdminRpcServiceTest {
         setupPersistedServerConfigPayload(persistedServerConfig, "member-3", name, "cars", "people");
 
         String moduleShardsConfig = "module-shards-member1-and-2-and-3.conf";
-        final MemberNode leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
+        final var leaderNode1 = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).datastoreContextBuilder(
                         DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(1))
                 .build();
 
-        final MemberNode replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
+        final var replicaNode2 = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
-        final MemberNode replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
+        final var replicaNode3 = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
                 .moduleShardsConfig(moduleShardsConfig).build();
 
         leaderNode1.configDataStore().waitTillReady();
         leaderNode1.operDataStore().waitTillReady();
-        verifyVotingStates(leaderNode1.configDataStore(), "cars", new SimpleEntry<>("member-1", TRUE),
-                new SimpleEntry<>("member-2", TRUE), new SimpleEntry<>("member-3", TRUE),
-                new SimpleEntry<>("member-4", FALSE), new SimpleEntry<>("member-5", FALSE),
-                new SimpleEntry<>("member-6", FALSE));
+        verifyVotingStates(leaderNode1.configDataStore(), "cars",
+            new ExpState("member-1", true), new ExpState("member-2", true), new ExpState("member-3", true),
+            new ExpState("member-4", false), new ExpState("member-5", false), new ExpState("member-6", false));
 
-        ClusterAdminRpcService service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(),
-                leaderNode1.operDataStore(), null);
+        final var service1 = new ClusterAdminRpcService(leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            null);
 
-        RpcResult<FlipMemberVotingStatesForAllShardsOutput> rpcResult = service1.flipMemberVotingStatesForAllShards(
-            new FlipMemberVotingStatesForAllShardsInputBuilder().build()).get(10, TimeUnit.SECONDS);
-        FlipMemberVotingStatesForAllShardsOutput result = verifySuccessfulRpcResult(rpcResult);
+        final var rpcResult = service1.flipMemberVotingStatesForAllShards(
+            new FlipMemberVotingStatesForAllShardsInputBuilder().build())
+            .get(10, TimeUnit.SECONDS);
+        final var result = verifySuccessfulRpcResult(rpcResult);
         verifyShardResults(result.getShardResult(), successShardResult("cars", DataStoreType.Config),
                 successShardResult("people", DataStoreType.Config),
                 successShardResult("cars", DataStoreType.Operational),
                 successShardResult("people", DataStoreType.Operational));
 
         // Members 2 and 3 are now non-voting but should get replicated with the new new server config.
-        verifyVotingStates(new AbstractDataStore[]{leaderNode1.configDataStore(), leaderNode1.operDataStore(),
-                replicaNode2.configDataStore(), replicaNode2.operDataStore(),
-                replicaNode3.configDataStore(), replicaNode3.operDataStore()},
-                new String[]{"cars", "people"},
-                new SimpleEntry<>("member-1", FALSE), new SimpleEntry<>("member-2", FALSE),
-                new SimpleEntry<>("member-3", FALSE), new SimpleEntry<>("member-4", TRUE),
-                new SimpleEntry<>("member-5", TRUE), new SimpleEntry<>("member-6", TRUE));
+        verifyVotingStates(new ClientBackedDataStore[] {
+            leaderNode1.configDataStore(), leaderNode1.operDataStore(),
+            replicaNode2.configDataStore(), replicaNode2.operDataStore(),
+            replicaNode3.configDataStore(), replicaNode3.operDataStore()
+        }, new String[] { "cars", "people" },
+            new ExpState("member-1", false), new ExpState("member-2", false), new ExpState("member-3", false),
+            new ExpState("member-4", true), new ExpState("member-5", true), new ExpState("member-6", true));
 
         // The leader (member 1) was changed to non-voting but it shouldn't be able to step down as leader yet
         // b/c it can't get a majority consensus with all voting members down. So verify it remains the leader.
@@ -1142,16 +958,16 @@ public class ClusterAdminRpcServiceTest {
 
     private static void setupPersistedServerConfigPayload(final ServerConfigurationPayload serverConfig,
             final String member, final String datastoreTypeSuffix, final String... shards) {
-        String[] datastoreTypes = {"config_", "oper_"};
+        String[] datastoreTypes = { "config_", "oper_" };
         for (String type : datastoreTypes) {
             for (String shard : shards) {
-                List<ServerInfo> newServerInfo = new ArrayList<>(serverConfig.getServerConfig().size());
-                for (ServerInfo info : serverConfig.getServerConfig()) {
-                    newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.getId()),
+                final var newServerInfo = new ArrayList<ServerInfo>(serverConfig.getServerConfig().size());
+                for (var info : serverConfig.getServerConfig()) {
+                    newServerInfo.add(new ServerInfo(ShardIdentifier.create(shard, MemberName.forName(info.peerId()),
                             type + datastoreTypeSuffix).toString(), info.isVoting()));
                 }
 
-                String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
+                final String shardID = ShardIdentifier.create(shard, MemberName.forName(member),
                         type + datastoreTypeSuffix).toString();
                 InMemoryJournal.addEntry(shardID, 1, new UpdateElectionTerm(1, null));
                 InMemoryJournal.addEntry(shardID, 2, new SimpleReplicatedLogEntry(0, 1,
@@ -1160,48 +976,47 @@ public class ClusterAdminRpcServiceTest {
         }
     }
 
-    @SafeVarargs
-    private static void verifyVotingStates(final AbstractDataStore[] datastores, final String[] shards,
-            final SimpleEntry<String, Boolean>... expStates) throws Exception {
-        for (AbstractDataStore datastore: datastores) {
-            for (String shard: shards) {
+    private static void verifyVotingStates(final ClientBackedDataStore[] datastores, final String[] shards,
+            final ExpState... expStates) throws Exception {
+        for (var datastore : datastores) {
+            for (String shard : shards) {
                 verifyVotingStates(datastore, shard, expStates);
             }
         }
     }
 
-    @SafeVarargs
-    private static void verifyVotingStates(final AbstractDataStore datastore, final String shardName,
-            final SimpleEntry<String, Boolean>... expStates) throws Exception {
+    private static void verifyVotingStates(final ClientBackedDataStore datastore, final String shardName,
+            final ExpState... expStates) throws Exception {
         String localMemberName = datastore.getActorUtils().getCurrentMemberName().getName();
-        Map<String, Boolean> expStateMap = new HashMap<>();
-        for (Entry<String, Boolean> e: expStates) {
-            expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(e.getKey()),
-                    datastore.getActorUtils().getDataStoreName()).toString(), e.getValue());
+        var expStateMap = new HashMap<String, Boolean>();
+        for (var expState : expStates) {
+            expStateMap.put(ShardIdentifier.create(shardName, MemberName.forName(expState.name),
+                datastore.getActorUtils().getDataStoreName()).toString(), expState.voting);
         }
 
         verifyRaftState(datastore, shardName, raftState -> {
             String localPeerId = ShardIdentifier.create(shardName, MemberName.forName(localMemberName),
                     datastore.getActorUtils().getDataStoreName()).toString();
             assertEquals("Voting state for " + localPeerId, expStateMap.get(localPeerId), raftState.isVoting());
-            for (Entry<String, Boolean> e: raftState.getPeerVotingStates().entrySet()) {
-                assertEquals("Voting state for " + e.getKey(), expStateMap.get(e.getKey()), e.getValue());
+            for (var entry : raftState.getPeerVotingStates().entrySet()) {
+                assertEquals("Voting state for " + entry.getKey(), expStateMap.get(entry.getKey()), entry.getValue());
             }
         });
     }
 
-    private static void verifyShardResults(final List<ShardResult> shardResults, final ShardResult... expShardResults) {
-        Map<String, ShardResult> expResultsMap = new HashMap<>();
-        for (ShardResult r: expShardResults) {
+    private static void verifyShardResults(final Map<ShardResultKey, ShardResult> shardResults,
+            final ShardResult... expShardResults) {
+        var expResultsMap = new HashMap<String, ShardResult>();
+        for (var r : expShardResults) {
             expResultsMap.put(r.getShardName() + "-" + r.getDataStoreType(), r);
         }
 
-        for (ShardResult result: shardResults) {
-            ShardResult exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
+        for (var result : shardResults.values()) {
+            var exp = expResultsMap.remove(result.getShardName() + "-" + result.getDataStoreType());
             assertNotNull(String.format("Unexpected result for shard %s, type %s", result.getShardName(),
                     result.getDataStoreType()), exp);
-            assertEquals("isSucceeded", exp.isSucceeded(), result.isSucceeded());
-            if (exp.isSucceeded()) {
+            assertEquals("isSucceeded", exp.getSucceeded(), result.getSucceeded());
+            if (exp.getSucceeded()) {
                 assertNull("Expected null error message", result.getErrorMessage());
             } else {
                 assertNotNull("Expected error message", result.getErrorMessage());
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/pom.xml
new file mode 100644 (file)
index 0000000..481adfd
--- /dev/null
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>mdsal-parent</artifactId>
+        <groupId>org.opendaylight.controller</groupId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../parent/pom.xml</relativePath>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>sal-cluster-admin-karaf-cli</artifactId>
+    <packaging>bundle</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.karaf.shell</groupId>
+            <artifactId>org.apache.karaf.shell.core</artifactId>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-cluster-admin-impl</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.karaf.tooling</groupId>
+                <artifactId>karaf-services-maven-plugin</artifactId>
+                <version>${karaf.version}</version>
+                <executions>
+                    <execution>
+                        <id>service-metadata-generate</id>
+                        <phase>process-classes</phase>
+                        <goals>
+                            <goal>service-metadata-generate</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AbstractRpcAction.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AbstractRpcAction.java
new file mode 100644 (file)
index 0000000..f16ac25
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+
+/**
+ * Common base class for all commands which end up invoking an RPC.
+ */
+public abstract class AbstractRpcAction implements Action {
+    @Override
+    @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+    public final Object execute() throws InterruptedException, ExecutionException {
+        final RpcResult<?> result = invokeRpc().get();
+        if (!result.isSuccessful()) {
+            // FIXME: is there a better way to report errors?
+            System.out.println("Invocation failed: " + result.getErrors());
+            return null;
+        } else {
+            return result.getResult();
+        }
+    }
+
+    protected abstract ListenableFuture<? extends RpcResult<?>> invokeRpc();
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ActivateEosDatacenterCommand.java
new file mode 100644 (file)
index 0000000..ec23a08
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ActivateEosDatacenterInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "activate-eos-datacenter", description = "Run an activate-eos-datacenter test")
+public class ActivateEosDatacenterCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(ActivateEosDatacenter.class)
+            .invoke(new ActivateEosDatacenterInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddReplicasForAllShardsCommand.java
new file mode 100644 (file)
index 0000000..51f086b
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddReplicasForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "add-replicas-for-all-shards",
+        description = "Run an add-replicas-for-all-shards test")
+public class AddReplicasForAllShardsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(AddReplicasForAllShards.class)
+            .invoke(new AddReplicasForAllShardsInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/AddShardReplicaCommand.java
new file mode 100644 (file)
index 0000000..3639fbd
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplica;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.AddShardReplicaInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "add-shard-replica", description = "Run an add-shard-replica test")
+public class AddShardReplicaCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(AddShardReplica.class)
+                .invoke(new AddShardReplicaInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/BackupDatastoreCommand.java
new file mode 100644 (file)
index 0000000..4f19d0f
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastore;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.BackupDatastoreInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "cluster-admin", name = "backup-datastore", description = "Run a backup-datastore test")
+public class BackupDatastoreCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "file-path", required = true)
+    private String filePath;
+    @Argument(index = 1, name = "timeout", required = true)
+    private long timeout;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(BackupDatastore.class)
+                .invoke(new BackupDatastoreInputBuilder()
+                        .setFilePath(filePath)
+                        .setTimeout(Uint32.valueOf(timeout))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForAllShardsCommand.java
new file mode 100644 (file)
index 0000000..cef7e9d
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForAllShardsInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+
+@Service
+@Command(scope = "cluster-admin", name = "change-member-voting-states-for-all-shards",
+        description = "Run a change-member-voting-states-for-all-shards test")
+public class ChangeMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "member-name", required = true)
+    private String memberName;
+    @Argument(index = 1, name = "voting", required = true)
+    private boolean voting;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        final MemberVotingState memberVotingState = new MemberVotingStateBuilder()
+                .setMemberName(memberName)
+                .setVoting(voting)
+                .build();
+
+        return rpcService.getRpc(ChangeMemberVotingStatesForAllShards.class)
+                .invoke(new ChangeMemberVotingStatesForAllShardsInputBuilder()
+                        .setMemberVotingState(List.of(memberVotingState))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/ChangeMemberVotingStatesForShardCommand.java
new file mode 100644 (file)
index 0000000..e98a309
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.List;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShard;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.ChangeMemberVotingStatesForShardInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingState;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.member.voting.states.input.MemberVotingStateBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "change-member-voting-states-for-shard",
+        description = "Run a change-member-voting-states-for-shard test")
+public class ChangeMemberVotingStatesForShardCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+    @Argument(index = 2, name = "member-name", required = true)
+    private String memberName;
+    @Argument(index = 3, name = "voting", required = true)
+    private boolean voting;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        final MemberVotingState memberVotingState = new MemberVotingStateBuilder()
+                .setMemberName(memberName)
+                .setVoting(voting)
+                .build();
+
+        return rpcService.getRpc(ChangeMemberVotingStatesForShard.class)
+                .invoke(new ChangeMemberVotingStatesForShardInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .setMemberVotingState(List.of(memberVotingState))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/DeactivateEosDatacenterCommand.java
new file mode 100644 (file)
index 0000000..0ca0003
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenter;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DeactivateEosDatacenterInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "deactivate-eos-datacenter",
+        description = "Run a deactivate-eos-datacenter test")
+public class DeactivateEosDatacenterCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(DeactivateEosDatacenter.class)
+                .invoke(new DeactivateEosDatacenterInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/FlipMemberVotingStatesForAllShardsCommand.java
new file mode 100644 (file)
index 0000000..78c0b6f
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.FlipMemberVotingStatesForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin",name = "flip-member-voting-states-for-all-shards",
+        description = "Run a flip-member-voting-states-for-all-shards test")
+public class FlipMemberVotingStatesForAllShardsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(FlipMemberVotingStatesForAllShards.class)
+                .invoke(new FlipMemberVotingStatesForAllShardsInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetKnownClientsForAllShardsCommand.java
new file mode 100644 (file)
index 0000000..c002846
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShards;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetKnownClientsForAllShardsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "get-known-clients-for-all-shards",
+        description = "Run a get-known-clients-for-all-shards test")
+public class GetKnownClientsForAllShardsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(GetKnownClientsForAllShards.class)
+                .invoke(new GetKnownClientsForAllShardsInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/GetShardRoleCommand.java
new file mode 100644 (file)
index 0000000..50a88e9
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRole;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.GetShardRoleInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "get-shard-role", description = "Run a get-shard-role test")
+public class GetShardRoleCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(GetShardRole.class)
+                .invoke(new GetShardRoleInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/LocateShardCommand.java
new file mode 100644 (file)
index 0000000..6a1b173
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShard;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.LocateShardInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "locate-shard", description = "Run a locate-shard test")
+public class LocateShardCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(LocateShard.class)
+                .invoke(new LocateShardInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/MakeLeaderLocalCommand.java
new file mode 100644 (file)
index 0000000..90aa8fc
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocal;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.MakeLeaderLocalInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "make-leader-local", description = "Run a make-leader-local test")
+public class MakeLeaderLocalCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(MakeLeaderLocal.class)
+                .invoke(new MakeLeaderLocalInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveAllShardReplicasCommand.java
new file mode 100644 (file)
index 0000000..c33b663
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicas;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveAllShardReplicasInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "remove-all-shard-replicas",
+        description = "Run a remove-all-shard-replicas test")
+public class RemoveAllShardReplicasCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "member-name",required = true)
+    private String memberName;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RemoveAllShardReplicas.class)
+                .invoke(new RemoveAllShardReplicasInputBuilder()
+                        .setMemberName(memberName)
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java b/opendaylight/md-sal/sal-cluster-admin-karaf-cli/src/main/java/org/opendaylight/controller/cluster/datastore/admin/command/RemoveShardReplicaCommand.java
new file mode 100644 (file)
index 0000000..9738f4f
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.admin.command;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.DataStoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplica;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.cluster.admin.rev151013.RemoveShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "cluster-admin", name = "remove-shard-replica", description = "Run a remove-shard-replica")
+public class RemoveShardReplicaCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+    @Argument(index = 1, name = "data-store-type", required = true, description = "config / operational")
+    private String dataStoreType;
+    @Argument(index = 2, name = "member-name", required = true)
+    private String memberName;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RemoveShardReplica.class)
+                .invoke(new RemoveShardReplicaInputBuilder()
+                        .setShardName(shardName)
+                        .setDataStoreType(DataStoreType.forName(dataStoreType))
+                        .setMemberName(memberName)
+                        .build());
+    }
+}
index ad682904f59922b3c8579a00b9927dc4ddbef390..14056f32ca4989be1886020ad148e46cef208da1 100644 (file)
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <artifactId>sal-clustering-commons</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
-    <!-- Java -->
     <dependency>
-      <groupId>org.xmlunit</groupId>
-      <artifactId>xmlunit-legacy</artifactId>
+      <!-- Enforce Netty’s optional dependency on servlet API -->
+      <!-- FIXME: is this really needed ? -->
+      <groupId>javax.servlet</groupId>
+      <artifactId>javax.servlet-api</artifactId>
     </dependency>
 
     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <scope>provided</scope>
+      <optional>true</optional>
     </dependency>
-
-    <!-- Apache -->
     <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <scope>test</scope>
+      <groupId>com.typesafe</groupId>
+      <artifactId>config</artifactId>
     </dependency>
     <dependency>
-      <groupId>commons-io</groupId>
-      <artifactId>commons-io</artifactId>
-      <scope>test</scope>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-      <scope>test</scope>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-graphite</artifactId>
     </dependency>
-
-    <!-- Akka -->
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-actor_2.12</artifactId>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-jmx</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-cluster_2.12</artifactId>
+      <groupId>org.checkerframework</groupId>
+      <artifactId>checker-qual</artifactId>
+      <optional>true</optional>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-osgi_2.12</artifactId>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence_2.12</artifactId>
+      <groupId>org.kohsuke.metainf-services</groupId>
+      <artifactId>metainf-services</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-remote_2.12</artifactId>
+      <groupId>org.lz4</groupId>
+      <artifactId>lz4-java</artifactId>
+      <version>1.8.0</version>
     </dependency>
     <dependency>
-      <!-- Enforce Netty’s optional dependency on servlet API -->
-      <!-- FIXME: is this really needed ? -->
-      <groupId>javax.servlet</groupId>
-      <artifactId>javax.servlet-api</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-slf4j_2.12</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>util</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.12</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-common</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence-tck_2.12</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-api</artifactId>
     </dependency>
-
-    <!-- Codahale -->
     <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-binfmt</artifactId>
     </dependency>
     <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-graphite</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-impl</artifactId>
     </dependency>
     <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-jmx</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-api</artifactId>
     </dependency>
-
-    <!-- Google -->
     <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava-testlib</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-repo-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-repo-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>repackaged-akka</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
     </dependency>
-
-    <!-- Scala -->
     <dependency>
       <groupId>org.scala-lang</groupId>
       <artifactId>scala-library</artifactId>
     </dependency>
 
-    <!-- OpenDaylight -->
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>util</artifactId>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava-testlib</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-persistence-tck_2.13</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_2.13</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-api</artifactId>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-util</artifactId>
+      <artifactId>yang-test-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatestplus</groupId>
+      <artifactId>junit-4-13_2.13</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-simple</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.xmlunit</groupId>
+      <artifactId>xmlunit-core</artifactId>
     </dependency>
   </dependencies>
 
         </configuration>
       </plugin>
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
           <execution>
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <artifactId>maven-source-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar-no-fork</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 
index c655dcdb891488b52f1f42741046594e9651a5e6..44afa634ccfc0b812761e9bb9fe97c6be9061900 100644 (file)
@@ -9,7 +9,10 @@
 package org.opendaylight.controller.cluster;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * DataPersistenceProvider provides methods to persist data and is an abstraction of the akka-persistence persistence
@@ -70,4 +73,20 @@ public interface DataPersistenceProvider {
      * @return the last sequence number
      */
     long getLastSequenceNumber();
+
+    /**
+     * Receive and potentially handle a {@link JournalProtocol} response.
+     *
+     * @param response A {@link JournalProtocol} response
+     * @return {@code true} if the response was handled
+     */
+    boolean handleJournalResponse(JournalProtocol.@NonNull Response response);
+
+    /**
+     * Receive and potentially handle a {@link SnapshotProtocol} response.
+     *
+     * @param response A {@link SnapshotProtocol} response
+     * @return {@code true} if the response was handled
+     */
+    boolean handleSnapshotResponse(SnapshotProtocol.@NonNull Response response);
 }
index f1a20fcc8e54f4e2f4908a8ec032477a6bd89b0f..3210819225b11e7b349772b8fc6a2735800a5bee 100644 (file)
@@ -8,6 +8,8 @@
 package org.opendaylight.controller.cluster;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
 
 /**
@@ -18,11 +20,11 @@ import akka.persistence.SnapshotSelectionCriteria;
 public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
     private DataPersistenceProvider delegate;
 
-    public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+    public DelegatingPersistentDataProvider(final DataPersistenceProvider delegate) {
         this.delegate = delegate;
     }
 
-    public void setDelegate(DataPersistenceProvider delegate) {
+    public void setDelegate(final DataPersistenceProvider delegate) {
         this.delegate = delegate;
     }
 
@@ -36,27 +38,27 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider
     }
 
     @Override
-    public <T> void persist(T entry, Procedure<T> procedure) {
+    public <T> void persist(final T entry, final Procedure<T> procedure) {
         delegate.persist(entry, procedure);
     }
 
     @Override
-    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
         delegate.persistAsync(entry, procedure);
     }
 
     @Override
-    public void saveSnapshot(Object entry) {
+    public void saveSnapshot(final Object entry) {
         delegate.saveSnapshot(entry);
     }
 
     @Override
-    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+    public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
         delegate.deleteSnapshots(criteria);
     }
 
     @Override
-    public void deleteMessages(long sequenceNumber) {
+    public void deleteMessages(final long sequenceNumber) {
         delegate.deleteMessages(sequenceNumber);
     }
 
@@ -64,4 +66,14 @@ public class DelegatingPersistentDataProvider implements DataPersistenceProvider
     public long getLastSequenceNumber() {
         return delegate.getLastSequenceNumber();
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return delegate.handleJournalResponse(response);
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return delegate.handleSnapshotResponse(response);
+    }
 }
index 9a4a34cf596d00dbf1d1eca8dd1973b88f3d236d..5461689d2aebc84739b165657db6330e5feba59f 100644 (file)
@@ -10,6 +10,8 @@ package org.opendaylight.controller.cluster;
 import static java.util.Objects.requireNonNull;
 
 import akka.japi.Procedure;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
 import org.opendaylight.controller.cluster.common.actor.ExecuteInSelfActor;
 import org.slf4j.Logger;
@@ -70,4 +72,14 @@ public class NonPersistentDataProvider implements DataPersistenceProvider {
             LOG.error("An unexpected error occurred", e);
         }
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return false;
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return false;
+    }
 }
index 1d676fa4a6577f7ff00fef4359fde56f485db3eb..1faee47f526ac119d384ab41a4b29362a236c89f 100644 (file)
@@ -7,20 +7,24 @@
  */
 package org.opendaylight.controller.cluster;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.japi.Procedure;
+import akka.persistence.AbstractPersistentActor;
+import akka.persistence.DeleteMessagesSuccess;
+import akka.persistence.DeleteSnapshotsSuccess;
+import akka.persistence.JournalProtocol;
+import akka.persistence.SnapshotProtocol;
 import akka.persistence.SnapshotSelectionCriteria;
-import akka.persistence.UntypedPersistentActor;
-import com.google.common.base.Preconditions;
 
 /**
  * A DataPersistenceProvider implementation with persistence enabled.
  */
 public class PersistentDataProvider implements DataPersistenceProvider {
+    private final AbstractPersistentActor persistentActor;
 
-    private final UntypedPersistentActor persistentActor;
-
-    public PersistentDataProvider(UntypedPersistentActor persistentActor) {
-        this.persistentActor = Preconditions.checkNotNull(persistentActor, "persistentActor can't be null");
+    public PersistentDataProvider(final AbstractPersistentActor persistentActor) {
+        this.persistentActor = requireNonNull(persistentActor, "persistentActor can't be null");
     }
 
     @Override
@@ -29,27 +33,27 @@ public class PersistentDataProvider implements DataPersistenceProvider {
     }
 
     @Override
-    public <T> void persist(T entry, Procedure<T> procedure) {
+    public <T> void persist(final T entry, final Procedure<T> procedure) {
         persistentActor.persist(entry, procedure);
     }
 
     @Override
-    public <T> void persistAsync(T entry, Procedure<T> procedure) {
+    public <T> void persistAsync(final T entry, final Procedure<T> procedure) {
         persistentActor.persistAsync(entry, procedure);
     }
 
     @Override
-    public void saveSnapshot(Object snapshot) {
+    public void saveSnapshot(final Object snapshot) {
         persistentActor.saveSnapshot(snapshot);
     }
 
     @Override
-    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+    public void deleteSnapshots(final SnapshotSelectionCriteria criteria) {
         persistentActor.deleteSnapshots(criteria);
     }
 
     @Override
-    public void deleteMessages(long sequenceNumber) {
+    public void deleteMessages(final long sequenceNumber) {
         persistentActor.deleteMessages(sequenceNumber);
     }
 
@@ -57,4 +61,14 @@ public class PersistentDataProvider implements DataPersistenceProvider {
     public long getLastSequenceNumber() {
         return persistentActor.lastSequenceNr();
     }
+
+    @Override
+    public boolean handleJournalResponse(final JournalProtocol.Response response) {
+        return response instanceof DeleteMessagesSuccess;
+    }
+
+    @Override
+    public boolean handleSnapshotResponse(final SnapshotProtocol.Response response) {
+        return response instanceof DeleteSnapshotsSuccess;
+    }
 }
index c0e260ae66a9f2dcc6c73e254449da1167f403ff..f66a77f66eeee8c4844c42e4406f330e05634a18 100644 (file)
@@ -8,19 +8,20 @@
 
 package org.opendaylight.controller.cluster.common.actor;
 
+import akka.actor.AbstractActor;
 import akka.actor.ActorRef;
-import akka.actor.UntypedActor;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public abstract class AbstractUntypedActor extends UntypedActor implements ExecuteInSelfActor {
+public abstract class AbstractUntypedActor extends AbstractActor implements ExecuteInSelfActor {
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
+    @SuppressFBWarnings(value = "SLF4J_LOGGER_SHOULD_BE_PRIVATE", justification = "Class identity is required")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected AbstractUntypedActor() {
         LOG.debug("Actor created {}", getSelf());
         getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
@@ -33,12 +34,11 @@ public abstract class AbstractUntypedActor extends UntypedActor implements Execu
     }
 
     @Override
-    public final void onReceive(final Object message) {
-        if (message instanceof ExecuteInSelfMessage) {
-            ((ExecuteInSelfMessage) message).run();
-        } else {
-            handleReceive(message);
-        }
+    public Receive createReceive() {
+        return receiveBuilder()
+                .match(ExecuteInSelfMessage.class, ExecuteInSelfMessage::run)
+                .matchAny(this::handleReceive)
+                .build();
     }
 
     /**
index 2124b24faf29ac92859b50834c8cbbe654a84bc3..d20ceb525224a2e91844d4e1be7f5dab140adcd6 100644 (file)
@@ -7,21 +7,24 @@
  */
 package org.opendaylight.controller.cluster.common.actor;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 /**
  * Actor with its behaviour metered. Metering is enabled by configuration.
  */
 public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
-
-    //this is used in the metric name. Some transient actors do not have defined names
+    // this is used in the metric name. Some transient actors do not have defined names
     private String actorNameOverride;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     public AbstractUntypedActorWithMetering() {
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
         }
     }
 
-    public AbstractUntypedActorWithMetering(String actorNameOverride) {
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
+    public AbstractUntypedActorWithMetering(final String actorNameOverride) {
         this.actorNameOverride = actorNameOverride;
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
@@ -29,8 +32,7 @@ public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedAc
     }
 
     private boolean isMetricsCaptureEnabled() {
-        CommonConfig config = new CommonConfig(getContext().system().settings().config());
-        return config.isMetricCaptureEnabled();
+        return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
     }
 
     public String getActorNameOverride() {
index 5ee3c499881e5a54bc0d6a7e4858a24edad1604b..8bf657e134939dea8b9bb3284149ffd4e314bfdf 100644 (file)
@@ -5,23 +5,25 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.ActorRef;
-import akka.persistence.UntypedPersistentActor;
+import akka.persistence.AbstractPersistentActor;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.eclipse.jdt.annotation.NonNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor implements ExecuteInSelfActor {
+// FIXME: override getContext(), getSelf() and others to be final to get rid of
+//        SpotBugs MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR violation
+public abstract class AbstractUntypedPersistentActor extends AbstractPersistentActor implements ExecuteInSelfActor {
 
     // The member name should be lower case but it's referenced in many subclasses. Suppressing the CS warning for now.
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     @SuppressWarnings("checkstyle:MemberName")
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected AbstractUntypedPersistentActor() {
         LOG.trace("Actor created {}", getSelf());
         getContext().system().actorSelection("user/termination-monitor").tell(new Monitor(getSelf()), getSelf());
@@ -35,26 +37,16 @@ public abstract class AbstractUntypedPersistentActor extends UntypedPersistentAc
     }
 
     @Override
-    public final void onReceiveCommand(final Object message) throws Exception {
-        final String messageType = message.getClass().getSimpleName();
-        LOG.trace("Received message {}", messageType);
-
-        if (message instanceof ExecuteInSelfMessage) {
-            LOG.trace("Executing {}", message);
-            ((ExecuteInSelfMessage) message).run();
-        } else {
-            handleCommand(message);
-        }
-
-        LOG.trace("Done handling message {}", messageType);
+    public final Receive createReceive() {
+        return receiveBuilder()
+                .match(ExecuteInSelfMessage.class, ExecuteInSelfMessage::run)
+                .matchAny(this::handleCommand)
+                .build();
     }
 
     @Override
-    public final void onReceiveRecover(final Object message) throws Exception {
-        final String messageType = message.getClass().getSimpleName();
-        LOG.trace("Received message {}", messageType);
-        handleRecover(message);
-        LOG.trace("Done handling message {}", messageType);
+    public final Receive createReceiveRecover() {
+        return receiveBuilder().matchAny(this::handleRecover).build();
     }
 
     protected abstract void handleRecover(Object message) throws Exception;
index ed03d334919ed7b2422a64802cf8dceef0cb52b3..760f0bd0fd7f92c321c6b26872022c51577051d6 100644 (file)
@@ -7,11 +7,13 @@
  */
 package org.opendaylight.controller.cluster.common.actor;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+
 /**
  * Actor with its behaviour metered. Metering is enabled by configuration.
  */
 public abstract class AbstractUntypedPersistentActorWithMetering extends AbstractUntypedPersistentActor {
-
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     public AbstractUntypedPersistentActorWithMetering() {
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
@@ -19,7 +21,6 @@ public abstract class AbstractUntypedPersistentActorWithMetering extends Abstrac
     }
 
     private boolean isMetricsCaptureEnabled() {
-        CommonConfig config = new CommonConfig(getContext().system().settings().config());
-        return config.isMetricCaptureEnabled();
+        return new CommonConfig(getContext().system().settings().config()).isMetricCaptureEnabled();
     }
 }
index aaa9ad6c81a57d917ed2db687be9880740bbf067..62043e24c34d446c9436566fcc6b78ed1dc256cb 100644 (file)
@@ -5,10 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.common.actor;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import scala.concurrent.ExecutionContext;
 
 public class Dispatchers {
@@ -50,8 +50,7 @@ public class Dispatchers {
     }
 
     public Dispatchers(final akka.dispatch.Dispatchers dispatchers) {
-        Preconditions.checkNotNull(dispatchers, "dispatchers should not be null");
-        this.dispatchers = dispatchers;
+        this.dispatchers = requireNonNull(dispatchers, "dispatchers should not be null");
     }
 
     public ExecutionContext getDispatcher(final DispatcherType dispatcherType) {
index 883dbd7210a97f320c12e29d2a6353d4a9b7467d..7951f5d3cc887113adaf8d5120e96415af251087 100644 (file)
@@ -5,22 +5,33 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.common.actor;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+
 import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
 import java.io.File;
+import javax.inject.Singleton;
+import org.kohsuke.MetaInfServices;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+@Component(immediate = true)
+@MetaInfServices
+@Singleton
 public class FileAkkaConfigurationReader implements AkkaConfigurationReader {
+    private static final Logger LOG = LoggerFactory.getLogger(FileAkkaConfigurationReader.class);
     private static final String CUSTOM_AKKA_CONF_PATH = "./configuration/initial/akka.conf";
     private static final String FACTORY_AKKA_CONF_PATH = "./configuration/factory/akka.conf";
 
     @Override
     public Config read() {
         File customConfigFile = new File(CUSTOM_AKKA_CONF_PATH);
-        Preconditions.checkState(customConfigFile.exists(), "%s is missing", customConfigFile);
+        checkState(customConfigFile.exists(), "%s is missing", customConfigFile);
 
         File factoryConfigFile = new File(FACTORY_AKKA_CONF_PATH);
         if (factoryConfigFile.exists()) {
@@ -29,4 +40,14 @@ public class FileAkkaConfigurationReader implements AkkaConfigurationReader {
 
         return ConfigFactory.parseFile(customConfigFile);
     }
+
+    @Activate
+    void activate() {
+        LOG.info("File-based Akka configuration reader enabled");
+    }
+
+    @Deactivate
+    void deactivate() {
+        LOG.info("File-based Akka configuration reader disabled");
+    }
 }
index 65cef56a2bb9624a7b30d847b5932d1c79f82d14..1ba5ac6bdeb485a53fee0948df88b824a4b32ba6 100644 (file)
@@ -5,22 +5,22 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.common.actor;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static java.util.concurrent.TimeUnit.NANOSECONDS;
 
 import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
 import com.google.common.base.Ticker;
 import com.google.common.collect.ImmutableList;
 import java.util.LinkedList;
 import java.util.List;
-import javax.annotation.concurrent.NotThreadSafe;
+import java.util.Optional;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -53,9 +53,11 @@ import org.slf4j.LoggerFactory;
  *     }
  *
  * </pre>
+ *
+ * <p>
+ * This class is NOT thread-safe.
  */
 @Beta
-@NotThreadSafe
 public final class MessageTracker {
     public abstract static class Context implements AutoCloseable {
         Context() {
@@ -82,7 +84,7 @@ public final class MessageTracker {
         private final long elapsedTimeInNanos;
 
         MessageProcessingTime(final Class<?> messageClass, final long elapsedTimeInNanos) {
-            this.messageClass = Preconditions.checkNotNull(messageClass);
+            this.messageClass = requireNonNull(messageClass);
             this.elapsedTimeInNanos = elapsedTimeInNanos;
         }
 
@@ -111,7 +113,7 @@ public final class MessageTracker {
 
         @Override
         public Optional<Error> error() {
-            return Optional.absent();
+            return Optional.empty();
         }
     };
 
@@ -132,10 +134,10 @@ public final class MessageTracker {
     @VisibleForTesting
     MessageTracker(final Class<?> expectedMessageClass, final long expectedArrivalIntervalInMillis,
             final Ticker ticker) {
-        Preconditions.checkArgument(expectedArrivalIntervalInMillis >= 0);
-        this.expectedMessageClass = Preconditions.checkNotNull(expectedMessageClass);
+        checkArgument(expectedArrivalIntervalInMillis >= 0);
+        this.expectedMessageClass = requireNonNull(expectedMessageClass);
         this.expectedArrivalInterval = MILLISECONDS.toNanos(expectedArrivalIntervalInMillis);
-        this.ticker = Preconditions.checkNotNull(ticker);
+        this.ticker = requireNonNull(ticker);
         this.expectedMessageWatch = Stopwatch.createUnstarted(ticker);
         this.currentMessageContext = new CurrentMessageContext();
     }
@@ -223,11 +225,11 @@ public final class MessageTracker {
 
         @Override
         public String toString() {
-            StringBuilder builder = new StringBuilder();
-            builder.append("\n> Last Expected Message = ").append(lastExpectedMessage);
-            builder.append("\n> Current Expected Message = ").append(currentExpectedMessage);
-            builder.append("\n> Expected time in between messages = ").append(expectedTimeInMillis);
-            builder.append("\n> Actual time in between messages = ").append(actualTimeInMillis);
+            StringBuilder builder = new StringBuilder()
+                    .append("\n> Last Expected Message = ").append(lastExpectedMessage)
+                    .append("\n> Current Expected Message = ").append(currentExpectedMessage)
+                    .append("\n> Expected time in between messages = ").append(expectedTimeInMillis)
+                    .append("\n> Actual time in between messages = ").append(actualTimeInMillis);
             for (MessageProcessingTime time : messagesSinceLastExpectedMessage) {
                 builder.append("\n\t> ").append(time);
             }
@@ -251,9 +253,9 @@ public final class MessageTracker {
         private Object message;
 
         void reset(final Object newMessage) {
-            this.message = Preconditions.checkNotNull(newMessage);
-            Preconditions.checkState(!stopwatch.isRunning(),
-                "Trying to reset a context that is not done (%s). currentMessage = %s", this, newMessage);
+            this.message = requireNonNull(newMessage);
+            checkState(!stopwatch.isRunning(), "Trying to reset a context that is not done (%s). currentMessage = %s",
+                this, newMessage);
             stopwatch.start();
         }
 
@@ -269,7 +271,7 @@ public final class MessageTracker {
 
         @Override
         public Optional<Error> error() {
-            return Optional.absent();
+            return Optional.empty();
         }
     }
 
@@ -279,8 +281,8 @@ public final class MessageTracker {
         private final Error error;
 
         ErrorContext(final Object message, final Error error) {
-            this.message = Preconditions.checkNotNull(message);
-            this.error = Preconditions.checkNotNull(error);
+            this.message = requireNonNull(message);
+            this.error = requireNonNull(error);
         }
 
         @Override
index c3f61c66fef8389e7f06d6bfcbd38cc38aa1cb48..d234532751e94d871162a82b00d7d051e101ab6c 100644 (file)
@@ -74,7 +74,7 @@ public class MeteredBoundedMailbox implements MailboxType,
             registry.register(fullName, metric);
         } catch (IllegalArgumentException e) {
             // already registered - shouldn't happen here since we check above...
-            LOG.debug("Unable to register '{}' in metrics registry: {}", e);
+            LOG.debug("Unable to register '{}' in metrics registry", fullName);
         }
     }
 
index 63958912c48e8832a4c4b144eab84822e1b24b7a..30331930d6b8f882a9fab12835c5ea20da0bc6fd 100644 (file)
@@ -7,16 +7,16 @@
  */
 package org.opendaylight.controller.cluster.common.actor;
 
-import akka.actor.UntypedActor;
-import akka.japi.Procedure;
+import akka.actor.AbstractActor;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
 import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import scala.PartialFunction;
+import scala.runtime.AbstractPartialFunction;
+import scala.runtime.BoxedUnit;
 
 /**
- * Represents behaviour that can be exhibited by actors of type {@link akka.actor.UntypedActor}
+ * Represents behaviour that can be exhibited by actors of type {@link AbstractActor}
  *
  * <p>
  * This behaviour meters actor's default behaviour. It captures 2 metrics:
@@ -26,17 +26,21 @@ import org.opendaylight.controller.cluster.reporting.MetricsReporter;
  * </ul>
  * The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
  */
-public class MeteringBehavior implements Procedure<Object> {
+public class MeteringBehavior extends AbstractPartialFunction<Object, BoxedUnit> {
     public static final String DOMAIN = "org.opendaylight.controller.actor.metric";
 
     private static final String MSG_PROCESSING_RATE = "msg-rate";
 
-    private final UntypedActor meteredActor;
-
     private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DOMAIN).getMetricsRegistry();
-
-    private String actorQualifiedName;
-    private Timer msgProcessingTimer;
+    private final String actorQualifiedName;
+    private final Timer msgProcessingTimer;
+    private final PartialFunction<Object, BoxedUnit> receive;
+
+    private MeteringBehavior(final String actorName, final AbstractActor meteredActor) {
+        actorQualifiedName = meteredActor.getSelf().path().parent().toStringWithoutAddress() + "/" + actorName;
+        msgProcessingTimer = metricRegistry.timer(MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE));
+        receive = meteredActor.createReceive().onMessage();
+    }
 
     /**
      * Constructs an instance.
@@ -44,28 +48,17 @@ public class MeteringBehavior implements Procedure<Object> {
      * @param actor whose behaviour needs to be metered
      */
     public MeteringBehavior(final AbstractUntypedActorWithMetering actor) {
-        Preconditions.checkArgument(actor != null, "actor must not be null");
-        this.meteredActor = actor;
-
-        String actorName = actor.getActorNameOverride() != null ? actor.getActorNameOverride()
-                                                                : actor.getSelf().path().name();
-        init(actorName);
+        this(actor.getActorNameOverride() != null ? actor.getActorNameOverride() : actor.getSelf().path().name(),
+                actor);
     }
 
-    public MeteringBehavior(final UntypedActor actor) {
-        Preconditions.checkArgument(actor != null, "actor must not be null");
-        this.meteredActor = actor;
-
-        String actorName = actor.getSelf().path().name();
-        init(actorName);
+    public MeteringBehavior(final AbstractActor actor) {
+        this(actor.getSelf().path().name(), actor);
     }
 
-    private void init(final String actorName) {
-        actorQualifiedName = meteredActor.getSelf().path().parent().toStringWithoutAddress()
-                + "/" + actorName;
-
-        final String msgProcessingTime = MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE);
-        msgProcessingTimer = metricRegistry.timer(msgProcessingTime);
+    @Override
+    public boolean isDefinedAt(final Object obj) {
+        return receive.isDefinedAt(obj);
     }
 
     /**
@@ -84,16 +77,12 @@ public class MeteringBehavior implements Procedure<Object> {
      *     http://dropwizard.github.io/metrics/manual/core/#timers</a>
      *
      * @param message the message to process
-     * @throws Exception on message failure
      */
-    @SuppressWarnings("checkstyle:IllegalCatch")
     @Override
-    public void apply(final Object message) throws Exception {
+    public BoxedUnit apply(final Object message) {
         final String messageType = message.getClass().getSimpleName();
-
         final String msgProcessingTimeByMsgType =
                 MetricRegistry.name(actorQualifiedName, MSG_PROCESSING_RATE, messageType);
-
         final Timer msgProcessingTimerByMsgType = metricRegistry.timer(msgProcessingTimeByMsgType);
 
         //start timers
@@ -101,10 +90,7 @@ public class MeteringBehavior implements Procedure<Object> {
         final Timer.Context contextByMsgType = msgProcessingTimerByMsgType.time();
 
         try {
-            meteredActor.onReceive(message);
-        } catch (Throwable e) {
-            Throwables.propagateIfPossible(e, Exception.class);
-            throw new RuntimeException(e);
+            return receive.apply(message);
         } finally {
             //stop timers
             contextByMsgType.stop();
index 52df6ab7b388314879e72160890a624fc0104529..c5c19d8d37ebf81300e7021d84c1784064d12116 100644 (file)
@@ -8,10 +8,18 @@
 
 package org.opendaylight.controller.cluster.common.actor;
 
+import akka.actor.Address;
 import akka.actor.Props;
 import akka.actor.UntypedAbstractActor;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent;
 import akka.japi.Effect;
-import akka.remote.ThisActorSystemQuarantinedEvent;
+import akka.remote.AssociationErrorEvent;
+import akka.remote.RemotingLifecycleEvent;
+import akka.remote.artery.ThisActorSystemQuarantinedEvent;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.HashSet;
+import java.util.Set;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -24,20 +32,25 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class QuarantinedMonitorActor extends UntypedAbstractActor {
+    public static final String ADDRESS = "quarantined-monitor";
 
     private static final Logger LOG = LoggerFactory.getLogger(QuarantinedMonitorActor.class);
-
-    public static final String ADDRESS = "quarantined-monitor";
+    private static final Integer MESSAGE_THRESHOLD = 10;
 
     private final Effect callback;
     private boolean quarantined;
 
+    private final Set<Address> addressSet = new HashSet<>();
+    private int count = 0;
+
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     protected QuarantinedMonitorActor(final Effect callback) {
         this.callback = callback;
 
         LOG.debug("Created QuarantinedMonitorActor");
 
-        getContext().system().eventStream().subscribe(getSelf(), ThisActorSystemQuarantinedEvent.class);
+        getContext().system().eventStream().subscribe(getSelf(), RemotingLifecycleEvent.class);
+        getContext().system().eventStream().subscribe(getSelf(), ClusterEvent.MemberDowned.class);
     }
 
     @Override
@@ -55,13 +68,39 @@ public class QuarantinedMonitorActor extends UntypedAbstractActor {
             return;
         }
 
-        if (message instanceof ThisActorSystemQuarantinedEvent) {
-            final ThisActorSystemQuarantinedEvent event = (ThisActorSystemQuarantinedEvent) message;
+        if (message instanceof ThisActorSystemQuarantinedEvent event) {
             LOG.warn("Got quarantined by {}", event.remoteAddress());
             quarantined = true;
 
             // execute the callback
             callback.apply();
+        } else if (message instanceof AssociationErrorEvent event) {
+            final String errorMessage = message.toString();
+            LOG.trace("errorMessage:{}", errorMessage);
+            if (errorMessage.contains("The remote system has a UID that has been quarantined")) {
+                final Address address = event.getRemoteAddress();
+                addressSet.add(address);
+                count++;
+                LOG.trace("address:{} addressSet: {} count:{}", address, addressSet, count);
+                if (count >= MESSAGE_THRESHOLD && addressSet.size() > 1) {
+                    count = 0;
+                    addressSet.clear();
+                    LOG.warn("Got quarantined via AssociationEvent by {}", event.remoteAddress());
+                    quarantined = true;
+
+                    // execute the callback
+                    callback.apply();
+                }
+            } else if (errorMessage.contains("The remote system explicitly disassociated")) {
+                count = 0;
+                addressSet.clear();
+            }
+        } else if (message instanceof ClusterEvent.MemberDowned event) {
+            if (Cluster.get(getContext().system()).selfMember().equals(event.member())) {
+                LOG.warn("This member has been downed, restarting");
+
+                callback.apply();
+            }
         }
     }
 
index a4e0b70a0216abb66af500c45bcfaff90d5bd869..19df464f9e132c02a17e75294be460bad812887b 100644 (file)
@@ -5,12 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.node.utils;
 
-import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import static java.util.Objects.requireNonNull;
+
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MixinNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -20,64 +18,52 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
  * NormalizedNodeNavigator walks a {@link NormalizedNodeVisitor} through the NormalizedNode.
  */
 public class NormalizedNodeNavigator {
+    private final NormalizedNodeVisitor visitor;
 
-    private final org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeVisitor visitor;
-
-    public NormalizedNodeNavigator(
-            org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeVisitor visitor) {
-        Preconditions.checkNotNull(visitor, "visitor should not be null");
-        this.visitor = visitor;
+    public NormalizedNodeNavigator(final NormalizedNodeVisitor visitor) {
+        this.visitor = requireNonNull(visitor, "visitor should not be null");
     }
 
-    public void navigate(String parentPath, NormalizedNode<?, ?> normalizedNode) {
+    public void navigate(String parentPath, final NormalizedNode normalizedNode) {
         if (parentPath == null) {
             parentPath = "";
         }
         navigateNormalizedNode(0, parentPath, normalizedNode);
     }
 
-    private void navigateDataContainerNode(int level, final String parentPath,
-            final DataContainerNode<?> dataContainerNode) {
+    private void navigateDataContainerNode(final int level, final String parentPath,
+            final DataContainerNode dataContainerNode) {
         visitor.visitNode(level, parentPath, dataContainerNode);
 
-        String newParentPath = parentPath + "/" + dataContainerNode.getIdentifier().toString();
+        String newParentPath = parentPath + "/" + dataContainerNode.name().toString();
 
-        final Iterable<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>> value = dataContainerNode
-                .getValue();
-        for (NormalizedNode<?, ?> node : value) {
-            if (node instanceof MixinNode && node instanceof NormalizedNodeContainer) {
-                navigateNormalizedNodeContainerMixin(level, newParentPath, (NormalizedNodeContainer<?, ?, ?>) node);
+        for (var node : dataContainerNode.body()) {
+            if (node instanceof MixinNode && node instanceof NormalizedNodeContainer<?> container) {
+                navigateNormalizedNodeContainerMixin(level, newParentPath, container);
             } else {
                 navigateNormalizedNode(level, newParentPath, node);
             }
         }
-
     }
 
-    private void navigateNormalizedNodeContainerMixin(int level, final String parentPath,
-            NormalizedNodeContainer<?, ?, ?> node) {
+    private void navigateNormalizedNodeContainerMixin(final int level, final String parentPath,
+            final NormalizedNodeContainer<?> node) {
         visitor.visitNode(level, parentPath, node);
 
-        String newParentPath = parentPath + "/" + node.getIdentifier().toString();
+        String newParentPath = parentPath + "/" + node.name().toString();
 
-        final Iterable<? extends NormalizedNode<?, ?>> value = node.getValue();
-        for (NormalizedNode<?, ?> normalizedNode : value) {
-            if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer) {
-                navigateNormalizedNodeContainerMixin(level + 1, newParentPath,
-                        (NormalizedNodeContainer<?, ?, ?>) normalizedNode);
+        for (var normalizedNode : node.body()) {
+            if (normalizedNode instanceof MixinNode && normalizedNode instanceof NormalizedNodeContainer<?> container) {
+                navigateNormalizedNodeContainerMixin(level + 1, newParentPath, container);
             } else {
                 navigateNormalizedNode(level, newParentPath, normalizedNode);
             }
         }
-
     }
 
-    private void navigateNormalizedNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode) {
-        if (normalizedNode instanceof DataContainerNode) {
-
-            final DataContainerNode<?> dataContainerNode = (DataContainerNode<?>) normalizedNode;
-
-            navigateDataContainerNode(level + 1, parentPath, dataContainerNode);
+    private void navigateNormalizedNode(final int level, final String parentPath, final NormalizedNode normalizedNode) {
+        if (normalizedNode instanceof DataContainerNode dataContainer) {
+            navigateDataContainerNode(level + 1, parentPath, dataContainer);
         } else {
             visitor.visitNode(level + 1, parentPath, normalizedNode);
         }
index c04f0ced5f2e44b007d9c59743cab1c25318ae7b..d455c41754001b4c88d7ce47adec7741bbde952f 100644 (file)
@@ -5,11 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.node.utils;
 
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 public interface NormalizedNodeVisitor {
-    void visitNode(int level, String parentPath, NormalizedNode<?, ?> normalizedNode);
+    void visitNode(int level, String parentPath, NormalizedNode normalizedNode);
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactory.java
deleted file mode 100644 (file)
index 5c7f6d1..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils;
-
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import org.opendaylight.yangtools.yang.common.QName;
-
-public final class QNameFactory {
-
-    private static final int MAX_QNAME_CACHE_SIZE = 10000;
-
-    private QNameFactory() {
-    }
-
-    private static final LoadingCache<String, QName> CACHE = CacheBuilder.newBuilder().maximumSize(MAX_QNAME_CACHE_SIZE)
-            .softValues().build(new CacheLoader<String, QName>() {
-                @Override
-                public QName load(String key) {
-                    return QName.create(key);
-                }
-            });
-
-    public static QName create(String name) {
-        return CACHE.getUnchecked(name);
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/AbstractNormalizedNodeDataOutput.java
deleted file mode 100755 (executable)
index 02bebeb..0000000
+++ /dev/null
@@ -1,499 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.io.StringWriter;
-import java.nio.charset.StandardCharsets;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Set;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.TransformerFactoryConfigurationError;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-abstract class AbstractNormalizedNodeDataOutput implements NormalizedNodeDataOutput, NormalizedNodeStreamWriter {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodeDataOutput.class);
-
-    private final DataOutput output;
-
-    private NormalizedNodeWriter normalizedNodeWriter;
-    private boolean headerWritten;
-    private QName lastLeafSetQName;
-
-    AbstractNormalizedNodeDataOutput(final DataOutput output) {
-        this.output = Preconditions.checkNotNull(output);
-    }
-
-    private void ensureHeaderWritten() throws IOException {
-        if (!headerWritten) {
-            output.writeByte(TokenTypes.SIGNATURE_MARKER);
-            output.writeShort(streamVersion());
-            headerWritten = true;
-        }
-    }
-
-    protected abstract short streamVersion();
-
-    protected abstract void writeQName(QName qname) throws IOException;
-
-    protected abstract void writeString(String string) throws IOException;
-
-    @Override
-    public final void write(final int value) throws IOException {
-        ensureHeaderWritten();
-        output.write(value);
-    }
-
-    @Override
-    public final void write(final byte[] bytes) throws IOException {
-        ensureHeaderWritten();
-        output.write(bytes);
-    }
-
-    @Override
-    public final void write(final byte[] bytes, final int off, final int len) throws IOException {
-        ensureHeaderWritten();
-        output.write(bytes, off, len);
-    }
-
-    @Override
-    public final void writeBoolean(final boolean value) throws IOException {
-        ensureHeaderWritten();
-        output.writeBoolean(value);
-    }
-
-    @Override
-    public final void writeByte(final int value) throws IOException {
-        ensureHeaderWritten();
-        output.writeByte(value);
-    }
-
-    @Override
-    public final void writeShort(final int value) throws IOException {
-        ensureHeaderWritten();
-        output.writeShort(value);
-    }
-
-    @Override
-    public final void writeChar(final int value) throws IOException {
-        ensureHeaderWritten();
-        output.writeChar(value);
-    }
-
-    @Override
-    public final void writeInt(final int value) throws IOException {
-        ensureHeaderWritten();
-        output.writeInt(value);
-    }
-
-    @Override
-    public final void writeLong(final long value) throws IOException {
-        ensureHeaderWritten();
-        output.writeLong(value);
-    }
-
-    @Override
-    public final void writeFloat(final float value) throws IOException {
-        ensureHeaderWritten();
-        output.writeFloat(value);
-    }
-
-    @Override
-    public final void writeDouble(final double value) throws IOException {
-        ensureHeaderWritten();
-        output.writeDouble(value);
-    }
-
-    @Override
-    public final void writeBytes(final String str) throws IOException {
-        ensureHeaderWritten();
-        output.writeBytes(str);
-    }
-
-    @Override
-    public final void writeChars(final String str) throws IOException {
-        ensureHeaderWritten();
-        output.writeChars(str);
-    }
-
-    @Override
-    public final void writeUTF(final String str) throws IOException {
-        ensureHeaderWritten();
-        output.writeUTF(str);
-    }
-
-    private NormalizedNodeWriter normalizedNodeWriter() {
-        if (normalizedNodeWriter == null) {
-            normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(this);
-        }
-
-        return normalizedNodeWriter;
-    }
-
-    @Override
-    public void writeNormalizedNode(final NormalizedNode<?, ?> node) throws IOException {
-        ensureHeaderWritten();
-        normalizedNodeWriter().write(node);
-    }
-
-    @Override
-    public void leafNode(final NodeIdentifier name, final Object value) throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Writing a new leaf node");
-        startNode(name.getNodeType(), NodeTypes.LEAF_NODE);
-
-        writeObject(value);
-    }
-
-    @Override
-    public void startLeafSet(final NodeIdentifier name, final int childSizeHint)
-
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new leaf set");
-
-        lastLeafSetQName = name.getNodeType();
-        startNode(name.getNodeType(), NodeTypes.LEAF_SET);
-    }
-
-    @Override
-    public void startOrderedLeafSet(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new ordered leaf set");
-
-        lastLeafSetQName = name.getNodeType();
-        startNode(name.getNodeType(), NodeTypes.ORDERED_LEAF_SET);
-    }
-
-    @Override
-    public void leafSetEntryNode(final QName name, final Object value) throws IOException, IllegalArgumentException {
-        LOG.trace("Writing a new leaf set entry node");
-
-        output.writeByte(NodeTypes.LEAF_SET_ENTRY_NODE);
-
-        // lastLeafSetQName is set if the parent LeafSetNode was previously written. Otherwise this is a
-        // stand alone LeafSetEntryNode so write out it's name here.
-        if (lastLeafSetQName == null) {
-            writeQName(name);
-        }
-
-        writeObject(value);
-    }
-
-    @Override
-    public void startContainerNode(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-
-        LOG.trace("Starting a new container node");
-
-        startNode(name.getNodeType(), NodeTypes.CONTAINER_NODE);
-    }
-
-    @Override
-    public void startYangModeledAnyXmlNode(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-
-        LOG.trace("Starting a new yang modeled anyXml node");
-
-        startNode(name.getNodeType(), NodeTypes.YANG_MODELED_ANY_XML_NODE);
-    }
-
-    @Override
-    public void startUnkeyedList(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new unkeyed list");
-
-        startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST);
-    }
-
-    @Override
-    public void startUnkeyedListItem(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalStateException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new unkeyed list item");
-
-        startNode(name.getNodeType(), NodeTypes.UNKEYED_LIST_ITEM);
-    }
-
-    @Override
-    public void startMapNode(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new map node");
-
-        startNode(name.getNodeType(), NodeTypes.MAP_NODE);
-    }
-
-    @Override
-    public void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(identifier, "Node identifier should not be null");
-        LOG.trace("Starting a new map entry node");
-        startNode(identifier.getNodeType(), NodeTypes.MAP_ENTRY_NODE);
-
-        writeKeyValueMap(identifier.getKeyValues());
-
-    }
-
-    @Override
-    public void startOrderedMapNode(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new ordered map node");
-
-        startNode(name.getNodeType(), NodeTypes.ORDERED_MAP_NODE);
-    }
-
-    @Override
-    public void startChoiceNode(final NodeIdentifier name, final int childSizeHint)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Starting a new choice node");
-
-        startNode(name.getNodeType(), NodeTypes.CHOICE_NODE);
-    }
-
-    @Override
-    public void startAugmentationNode(final AugmentationIdentifier identifier)
-            throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(identifier, "Node identifier should not be null");
-        LOG.trace("Starting a new augmentation node");
-
-        output.writeByte(NodeTypes.AUGMENTATION_NODE);
-        writeQNameSet(identifier.getPossibleChildNames());
-    }
-
-    @Override
-    public void anyxmlNode(final NodeIdentifier name, final Object value) throws IOException, IllegalArgumentException {
-        Preconditions.checkNotNull(name, "Node identifier should not be null");
-        LOG.trace("Writing any xml node");
-
-        startNode(name.getNodeType(), NodeTypes.ANY_XML_NODE);
-
-        try {
-            StreamResult xmlOutput = new StreamResult(new StringWriter());
-            TransformerFactory.newInstance().newTransformer().transform((DOMSource)value, xmlOutput);
-            writeObject(xmlOutput.getWriter().toString());
-        } catch (TransformerException | TransformerFactoryConfigurationError e) {
-            throw new IOException("Error writing anyXml", e);
-        }
-    }
-
-    @Override
-    public void endNode() throws IOException, IllegalStateException {
-        LOG.trace("Ending the node");
-        lastLeafSetQName = null;
-        output.writeByte(NodeTypes.END_NODE);
-    }
-
-    @Override
-    public void close() throws IOException {
-        flush();
-    }
-
-    @Override
-    public void flush() throws IOException {
-        if (output instanceof OutputStream) {
-            ((OutputStream)output).flush();
-        }
-    }
-
-    private void startNode(final QName qname, final byte nodeType) throws IOException {
-        Preconditions.checkNotNull(qname, "QName of node identifier should not be null.");
-
-        ensureHeaderWritten();
-
-        // First write the type of node
-        output.writeByte(nodeType);
-        // Write Start Tag
-        writeQName(qname);
-    }
-
-    private void writeObjSet(final Set<?> set) throws IOException {
-        output.writeInt(set.size());
-        for (Object o : set) {
-            Preconditions.checkArgument(o instanceof String, "Expected value type to be String but was %s (%s)",
-                o.getClass(), o);
-
-            writeString((String) o);
-        }
-    }
-
-    @Override
-    public void writeSchemaPath(final SchemaPath path) throws IOException {
-        ensureHeaderWritten();
-        output.writeBoolean(path.isAbsolute());
-
-        final Collection<QName> qnames = path.getPath();
-        output.writeInt(qnames.size());
-        for (QName qname : qnames) {
-            writeQName(qname);
-        }
-    }
-
-    @Override
-    public void writeYangInstanceIdentifier(final YangInstanceIdentifier identifier) throws IOException {
-        ensureHeaderWritten();
-        writeYangInstanceIdentifierInternal(identifier);
-    }
-
-    private void writeYangInstanceIdentifierInternal(final YangInstanceIdentifier identifier) throws IOException {
-        Collection<PathArgument> pathArguments = identifier.getPathArguments();
-        output.writeInt(pathArguments.size());
-
-        for (PathArgument pathArgument : pathArguments) {
-            writePathArgument(pathArgument);
-        }
-    }
-
-    @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST",
-            justification = "The casts in the switch clauses are indirectly confirmed via the determination of 'type'.")
-    @Override
-    public void writePathArgument(final PathArgument pathArgument) throws IOException {
-
-        byte type = PathArgumentTypes.getSerializablePathArgumentType(pathArgument);
-
-        output.writeByte(type);
-
-        switch (type) {
-            case PathArgumentTypes.NODE_IDENTIFIER:
-
-                NodeIdentifier nodeIdentifier = (NodeIdentifier) pathArgument;
-
-                writeQName(nodeIdentifier.getNodeType());
-                break;
-
-            case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES:
-
-                NodeIdentifierWithPredicates nodeIdentifierWithPredicates =
-                    (NodeIdentifierWithPredicates) pathArgument;
-                writeQName(nodeIdentifierWithPredicates.getNodeType());
-
-                writeKeyValueMap(nodeIdentifierWithPredicates.getKeyValues());
-                break;
-
-            case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
-
-                NodeWithValue<?> nodeWithValue = (NodeWithValue<?>) pathArgument;
-
-                writeQName(nodeWithValue.getNodeType());
-                writeObject(nodeWithValue.getValue());
-                break;
-
-            case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
-
-                AugmentationIdentifier augmentationIdentifier = (AugmentationIdentifier) pathArgument;
-
-                // No Qname in augmentation identifier
-                writeQNameSet(augmentationIdentifier.getPossibleChildNames());
-                break;
-            default :
-                throw new IllegalStateException("Unknown node identifier type is found : "
-                        + pathArgument.getClass().toString());
-        }
-    }
-
-    private void writeKeyValueMap(final Map<QName, Object> keyValueMap) throws IOException {
-        if (keyValueMap != null && !keyValueMap.isEmpty()) {
-            output.writeInt(keyValueMap.size());
-
-            for (Map.Entry<QName, Object> entry : keyValueMap.entrySet()) {
-                writeQName(entry.getKey());
-                writeObject(entry.getValue());
-            }
-        } else {
-            output.writeInt(0);
-        }
-    }
-
-    private void writeQNameSet(final Set<QName> children) throws IOException {
-        // Write each child's qname separately, if list is empty send count as 0
-        if (children != null && !children.isEmpty()) {
-            output.writeInt(children.size());
-            for (QName qname : children) {
-                writeQName(qname);
-            }
-        } else {
-            LOG.debug("augmentation node does not have any child");
-            output.writeInt(0);
-        }
-    }
-
-    private void writeObject(final Object value) throws IOException {
-
-        byte type = ValueTypes.getSerializableType(value);
-        // Write object type first
-        output.writeByte(type);
-
-        switch (type) {
-            case ValueTypes.BOOL_TYPE:
-                output.writeBoolean((Boolean) value);
-                break;
-            case ValueTypes.QNAME_TYPE:
-                writeQName((QName) value);
-                break;
-            case ValueTypes.INT_TYPE:
-                output.writeInt((Integer) value);
-                break;
-            case ValueTypes.BYTE_TYPE:
-                output.writeByte((Byte) value);
-                break;
-            case ValueTypes.LONG_TYPE:
-                output.writeLong((Long) value);
-                break;
-            case ValueTypes.SHORT_TYPE:
-                output.writeShort((Short) value);
-                break;
-            case ValueTypes.BITS_TYPE:
-                writeObjSet((Set<?>) value);
-                break;
-            case ValueTypes.BINARY_TYPE:
-                byte[] bytes = (byte[]) value;
-                output.writeInt(bytes.length);
-                output.write(bytes);
-                break;
-            case ValueTypes.YANG_IDENTIFIER_TYPE:
-                writeYangInstanceIdentifierInternal((YangInstanceIdentifier) value);
-                break;
-            case ValueTypes.EMPTY_TYPE:
-                break;
-            case ValueTypes.STRING_BYTES_TYPE:
-                final byte[] valueBytes = value.toString().getBytes(StandardCharsets.UTF_8);
-                output.writeInt(valueBytes.length);
-                output.write(valueBytes);
-                break;
-            default:
-                output.writeUTF(value.toString());
-                break;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/InvalidNormalizedNodeStreamException.java
deleted file mode 100644 (file)
index da60496..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import java.io.IOException;
-
-/**
- * Exception thrown from NormalizedNodeInputStreamReader when the input stream does not contain
- * valid serialized data.
- *
- * @author Thomas Pantelis
- */
-public class InvalidNormalizedNodeStreamException extends IOException {
-    private static final long serialVersionUID = 1L;
-
-    public InvalidNormalizedNodeStreamException(String message) {
-        super(message);
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NodeTypes.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NodeTypes.java
deleted file mode 100644 (file)
index dc034cc..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-final class NodeTypes {
-    public static final byte LEAF_NODE = 1;
-    public static final byte LEAF_SET = 2;
-    public static final byte LEAF_SET_ENTRY_NODE = 3;
-    public static final byte CONTAINER_NODE = 4;
-    public static final byte UNKEYED_LIST = 5;
-    public static final byte UNKEYED_LIST_ITEM = 6;
-    public static final byte MAP_NODE = 7;
-    public static final byte MAP_ENTRY_NODE = 8;
-    public static final byte ORDERED_MAP_NODE = 9;
-    public static final byte CHOICE_NODE = 10;
-    public static final byte AUGMENTATION_NODE = 11;
-    public static final byte ANY_XML_NODE = 12;
-    public static final byte END_NODE = 13;
-    public static final byte ORDERED_LEAF_SET = 14;
-    public static final byte YANG_MODELED_ANY_XML_NODE = 15;
-
-    private NodeTypes() {
-        throw new UnsupportedOperationException("utility class");
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataInput.java
deleted file mode 100644 (file)
index 7d793b1..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.annotations.Beta;
-import java.io.DataInput;
-import java.io.IOException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Interface for reading {@link NormalizedNode}s, {@link YangInstanceIdentifier}s, {@link PathArgument}s
- * and {@link SchemaPath}s.
- */
-@Beta
-public interface NormalizedNodeDataInput extends DataInput {
-    /**
-     * Read a normalized node from the reader.
-     *
-     * @return Next node from the stream, or null if end of stream has been reached.
-     * @throws IOException if an error occurs
-     * @throws IllegalStateException if the dictionary has been detached
-     */
-    NormalizedNode<?, ?> readNormalizedNode() throws IOException;
-
-    YangInstanceIdentifier readYangInstanceIdentifier() throws IOException;
-
-    PathArgument readPathArgument() throws IOException;
-
-    SchemaPath readSchemaPath() throws IOException;
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeDataOutput.java
deleted file mode 100644 (file)
index 7c42da1..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.annotations.Beta;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Interface for emitting {@link NormalizedNode}s, {@link YangInstanceIdentifier}s, {@link PathArgument}s
- * and {@link SchemaPath}s.
- */
-@Beta
-public interface NormalizedNodeDataOutput extends AutoCloseable, DataOutput {
-    void writeNormalizedNode(NormalizedNode<?, ?> normalizedNode) throws IOException;
-
-    void writePathArgument(PathArgument pathArgument) throws IOException;
-
-    void writeYangInstanceIdentifier(YangInstanceIdentifier identifier) throws IOException;
-
-    void writeSchemaPath(SchemaPath path) throws IOException;
-
-    @Override
-    void close() throws IOException;
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputOutput.java
deleted file mode 100644 (file)
index 5ad195a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.annotations.Beta;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.eclipse.jdt.annotation.NonNull;
-
-@Beta
-public final class NormalizedNodeInputOutput {
-    private NormalizedNodeInputOutput() {
-        throw new UnsupportedOperationException();
-    }
-
-    /**
-     * Creates a new {@link NormalizedNodeDataInput} instance that reads from the given input. This method first reads
-     * and validates that the input contains a valid NormalizedNode stream.
-     *
-     * @param input the DataInput to read from
-     * @return a new {@link NormalizedNodeDataInput} instance
-     * @throws IOException if an error occurs reading from the input
-     */
-    public static NormalizedNodeDataInput newDataInput(final @NonNull DataInput input) throws IOException {
-        final byte marker = input.readByte();
-        if (marker != TokenTypes.SIGNATURE_MARKER) {
-            throw new InvalidNormalizedNodeStreamException(String.format("Invalid signature marker: %d", marker));
-        }
-
-        final short version = input.readShort();
-        switch (version) {
-            case TokenTypes.LITHIUM_VERSION:
-                return new NormalizedNodeInputStreamReader(input, true);
-            default:
-                throw new InvalidNormalizedNodeStreamException(String.format("Unhandled stream version %s", version));
-        }
-    }
-
-    /**
-     * Creates a new {@link NormalizedNodeDataInput} instance that reads from the given input. This method does not
-     * perform any initial validation of the input stream.
-     *
-     * @param input the DataInput to read from
-     * @return a new {@link NormalizedNodeDataInput} instance
-     */
-    public static NormalizedNodeDataInput newDataInputWithoutValidation(final @NonNull DataInput input) {
-        return new NormalizedNodeInputStreamReader(input, false);
-    }
-
-    /**
-     * Creates a new {@link NormalizedNodeDataOutput} instance that writes to the given output.
-     *
-     * @param output the DataOutput to write to
-     * @return a new {@link NormalizedNodeDataOutput} instance
-     */
-    public static NormalizedNodeDataOutput newDataOutput(final @NonNull DataOutput output) {
-        return new NormalizedNodeOutputStreamWriter(output);
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputStreamReader.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeInputStreamReader.java
deleted file mode 100755 (executable)
index b042963..0000000
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import java.io.DataInput;
-import java.io.IOException;
-import java.io.StringReader;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.dom.DOMSource;
-import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
-import org.opendaylight.yangtools.util.ImmutableOffsetMapTemplate;
-import org.opendaylight.yangtools.yang.common.Empty;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeContainerBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Element;
-import org.xml.sax.InputSource;
-import org.xml.sax.SAXException;
-
-/**
- * NormalizedNodeInputStreamReader reads the byte stream and constructs the normalized node including its children
- * nodes. This process goes in recursive manner, where each NodeTypes object signifies the start of the object, except
- * END_NODE. If a node can have children, then that node's end is calculated based on appearance of END_NODE.
- */
-public class NormalizedNodeInputStreamReader implements NormalizedNodeDataInput {
-
-    private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodeInputStreamReader.class);
-
-    private static final String REVISION_ARG = "?revision=";
-
-    private final DataInput input;
-
-    private final List<String> codedStringMap = new ArrayList<>();
-
-    private QName lastLeafSetQName;
-
-    private NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier,
-                                      Object, LeafNode<Object>> leafBuilder;
-
-    @SuppressWarnings("rawtypes")
-    private NormalizedNodeAttrBuilder<NodeWithValue, Object, LeafSetEntryNode<Object>> leafSetEntryBuilder;
-
-    private final StringBuilder reusableStringBuilder = new StringBuilder(50);
-
-    private boolean readSignatureMarker = true;
-
-    NormalizedNodeInputStreamReader(final DataInput input, final boolean versionChecked) {
-        this.input = Preconditions.checkNotNull(input);
-        readSignatureMarker = !versionChecked;
-    }
-
-    @Override
-    public NormalizedNode<?, ?> readNormalizedNode() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return readNormalizedNodeInternal();
-    }
-
-    private void readSignatureMarkerAndVersionIfNeeded() throws IOException {
-        if (readSignatureMarker) {
-            readSignatureMarker = false;
-
-            final byte marker = input.readByte();
-            if (marker != TokenTypes.SIGNATURE_MARKER) {
-                throw new InvalidNormalizedNodeStreamException(String.format(
-                        "Invalid signature marker: %d", marker));
-            }
-
-            final short version = input.readShort();
-            if (version != TokenTypes.LITHIUM_VERSION) {
-                throw new InvalidNormalizedNodeStreamException(String.format("Unhandled stream version %s", version));
-            }
-        }
-    }
-
-    private NormalizedNode<?, ?> readNormalizedNodeInternal() throws IOException {
-        // each node should start with a byte
-        byte nodeType = input.readByte();
-
-        if (nodeType == NodeTypes.END_NODE) {
-            LOG.trace("End node reached. return");
-            lastLeafSetQName = null;
-            return null;
-        }
-
-        switch (nodeType) {
-            case NodeTypes.AUGMENTATION_NODE :
-                YangInstanceIdentifier.AugmentationIdentifier augIdentifier =
-                    new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
-
-                LOG.trace("Reading augmentation node {} ", augIdentifier);
-
-                return addDataContainerChildren(Builders.augmentationBuilder()
-                        .withNodeIdentifier(augIdentifier)).build();
-
-            case NodeTypes.LEAF_SET_ENTRY_NODE :
-                QName name = lastLeafSetQName;
-                if (name == null) {
-                    name = readQName();
-                }
-
-                Object value = readObject();
-                NodeWithValue<Object> leafIdentifier = new NodeWithValue<>(name, value);
-
-                LOG.trace("Reading leaf set entry node {}, value {}", leafIdentifier, value);
-
-                return leafSetEntryBuilder().withNodeIdentifier(leafIdentifier).withValue(value).build();
-
-            case NodeTypes.MAP_ENTRY_NODE :
-                NodeIdentifierWithPredicates entryIdentifier = readNormalizedNodeWithPredicates();
-
-                LOG.trace("Reading map entry node {} ", entryIdentifier);
-
-                return addDataContainerChildren(Builders.mapEntryBuilder()
-                        .withNodeIdentifier(entryIdentifier)).build();
-
-            default :
-                return readNodeIdentifierDependentNode(nodeType, new NodeIdentifier(readQName()));
-        }
-    }
-
-    private NormalizedNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier,
-                                      Object, LeafNode<Object>> leafBuilder() {
-        if (leafBuilder == null) {
-            leafBuilder = Builders.leafBuilder();
-        }
-
-        return leafBuilder;
-    }
-
-    @SuppressWarnings("rawtypes")
-    private NormalizedNodeAttrBuilder<NodeWithValue, Object,
-                                      LeafSetEntryNode<Object>> leafSetEntryBuilder() {
-        if (leafSetEntryBuilder == null) {
-            leafSetEntryBuilder = Builders.leafSetEntryBuilder();
-        }
-
-        return leafSetEntryBuilder;
-    }
-
-    private NormalizedNode<?, ?> readNodeIdentifierDependentNode(final byte nodeType, final NodeIdentifier identifier)
-        throws IOException {
-
-        switch (nodeType) {
-            case NodeTypes.LEAF_NODE :
-                LOG.trace("Read leaf node {}", identifier);
-                // Read the object value
-                return leafBuilder().withNodeIdentifier(identifier).withValue(readObject()).build();
-
-            case NodeTypes.ANY_XML_NODE :
-                LOG.trace("Read xml node");
-                return Builders.anyXmlBuilder().withNodeIdentifier(identifier).withValue(readDOMSource()).build();
-
-            case NodeTypes.MAP_NODE :
-                LOG.trace("Read map node {}", identifier);
-                return addDataContainerChildren(Builders.mapBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.CHOICE_NODE:
-                LOG.trace("Read choice node {}", identifier);
-                return addDataContainerChildren(Builders.choiceBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.ORDERED_MAP_NODE:
-                LOG.trace("Reading ordered map node {}", identifier);
-                return addDataContainerChildren(Builders.orderedMapBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.UNKEYED_LIST:
-                LOG.trace("Read unkeyed list node {}", identifier);
-                return addDataContainerChildren(Builders.unkeyedListBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.UNKEYED_LIST_ITEM:
-                LOG.trace("Read unkeyed list item node {}", identifier);
-                return addDataContainerChildren(Builders.unkeyedListEntryBuilder()
-                        .withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.CONTAINER_NODE:
-                LOG.trace("Read container node {}", identifier);
-                return addDataContainerChildren(Builders.containerBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.LEAF_SET :
-                LOG.trace("Read leaf set node {}", identifier);
-                return addLeafSetChildren(identifier.getNodeType(),
-                        Builders.leafSetBuilder().withNodeIdentifier(identifier)).build();
-
-            case NodeTypes.ORDERED_LEAF_SET:
-                LOG.trace("Read ordered leaf set node {}", identifier);
-                return addLeafSetChildren(identifier.getNodeType(),
-                        Builders.orderedLeafSetBuilder().withNodeIdentifier(identifier)).build();
-
-            default:
-                return null;
-        }
-    }
-
-    private DOMSource readDOMSource() throws IOException {
-        String xml = readObject().toString();
-        try {
-            DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-            factory.setNamespaceAware(true);
-            Element node = factory.newDocumentBuilder().parse(
-                    new InputSource(new StringReader(xml))).getDocumentElement();
-            return new DOMSource(node);
-        } catch (SAXException | ParserConfigurationException e) {
-            throw new IOException("Error parsing XML: " + xml, e);
-        }
-    }
-
-    private QName readQName() throws IOException {
-        // Read in the same sequence of writing
-        String localName = readCodedString();
-        String namespace = readCodedString();
-        String revision = readCodedString();
-
-        String qname;
-        if (!Strings.isNullOrEmpty(revision)) {
-            qname = reusableStringBuilder.append('(').append(namespace).append(REVISION_ARG).append(revision)
-                    .append(')').append(localName).toString();
-        } else {
-            qname = reusableStringBuilder.append('(').append(namespace).append(')').append(localName).toString();
-        }
-
-        reusableStringBuilder.delete(0, reusableStringBuilder.length());
-        return QNameFactory.create(qname);
-    }
-
-
-    private String readCodedString() throws IOException {
-        final byte valueType = input.readByte();
-        switch (valueType) {
-            case TokenTypes.IS_NULL_VALUE:
-                return null;
-            case TokenTypes.IS_CODE_VALUE:
-                final int code = input.readInt();
-                try {
-                    return codedStringMap.get(code);
-                } catch (IndexOutOfBoundsException e) {
-                    throw new IOException("String code " + code + " was not found", e);
-                }
-            case TokenTypes.IS_STRING_VALUE:
-                final String value = input.readUTF().intern();
-                codedStringMap.add(value);
-                return value;
-            default:
-                throw new IOException("Unhandled string value type " + valueType);
-        }
-    }
-
-    private Set<QName> readQNameSet() throws IOException {
-        // Read the children count
-        int count = input.readInt();
-        Set<QName> children = new HashSet<>(count);
-        for (int i = 0; i < count; i++) {
-            children.add(readQName());
-        }
-        return children;
-    }
-
-    private NodeIdentifierWithPredicates readNormalizedNodeWithPredicates() throws IOException {
-        final QName qname = readQName();
-        final int count = input.readInt();
-        switch (count) {
-            case 0:
-                return new NodeIdentifierWithPredicates(qname);
-            case 1:
-                return new NodeIdentifierWithPredicates(qname, readQName(), readObject());
-            default:
-                // ImmutableList is used by ImmutableOffsetMapTemplate for lookups, hence we use that.
-                final Builder<QName> keys = ImmutableList.builderWithExpectedSize(count);
-                final Object[] values = new Object[count];
-                for (int i = 0; i < count; i++) {
-                    keys.add(readQName());
-                    values[i] = readObject();
-                }
-
-                return new NodeIdentifierWithPredicates(qname, ImmutableOffsetMapTemplate.ordered(keys.build())
-                    .instantiateWithValues(values));
-        }
-    }
-
-    private Object readObject() throws IOException {
-        byte objectType = input.readByte();
-        switch (objectType) {
-            case ValueTypes.BITS_TYPE:
-                return readObjSet();
-
-            case ValueTypes.BOOL_TYPE :
-                return input.readBoolean();
-
-            case ValueTypes.BYTE_TYPE :
-                return input.readByte();
-
-            case ValueTypes.INT_TYPE :
-                return input.readInt();
-
-            case ValueTypes.LONG_TYPE :
-                return input.readLong();
-
-            case ValueTypes.QNAME_TYPE :
-                return readQName();
-
-            case ValueTypes.SHORT_TYPE :
-                return input.readShort();
-
-            case ValueTypes.STRING_TYPE :
-                return input.readUTF();
-
-            case ValueTypes.STRING_BYTES_TYPE:
-                return readStringBytes();
-
-            case ValueTypes.BIG_DECIMAL_TYPE :
-                return new BigDecimal(input.readUTF());
-
-            case ValueTypes.BIG_INTEGER_TYPE :
-                return new BigInteger(input.readUTF());
-
-            case ValueTypes.BINARY_TYPE :
-                byte[] bytes = new byte[input.readInt()];
-                input.readFully(bytes);
-                return bytes;
-
-            case ValueTypes.YANG_IDENTIFIER_TYPE :
-                return readYangInstanceIdentifierInternal();
-
-            case ValueTypes.EMPTY_TYPE:
-            // Leaf nodes no longer allow null values and thus we no longer emit null values. Previously, the "empty"
-            // yang type was represented as null so we translate an incoming null value to Empty. It was possible for
-            // a BI user to set a string leaf to null and we're rolling the dice here but the chances for that are
-            // very low. We'd have to know the yang type but, even if we did, we can't let a null value pass upstream
-            // so we'd have to drop the leaf which might cause other issues.
-            case ValueTypes.NULL_TYPE:
-                return Empty.getInstance();
-
-            default :
-                return null;
-        }
-    }
-
-    private String readStringBytes() throws IOException {
-        byte[] bytes = new byte[input.readInt()];
-        input.readFully(bytes);
-        return new String(bytes, StandardCharsets.UTF_8);
-    }
-
-    @Override
-    public SchemaPath readSchemaPath() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-
-        final boolean absolute = input.readBoolean();
-        final int size = input.readInt();
-
-        final Builder<QName> qnames = ImmutableList.builderWithExpectedSize(size);
-        for (int i = 0; i < size; ++i) {
-            qnames.add(readQName());
-        }
-        return SchemaPath.create(qnames.build(), absolute);
-    }
-
-    @Override
-    public YangInstanceIdentifier readYangInstanceIdentifier() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return readYangInstanceIdentifierInternal();
-    }
-
-    private YangInstanceIdentifier readYangInstanceIdentifierInternal() throws IOException {
-        int size = input.readInt();
-        final Builder<PathArgument> pathArguments = ImmutableList.builderWithExpectedSize(size);
-        for (int i = 0; i < size; i++) {
-            pathArguments.add(readPathArgument());
-        }
-        return YangInstanceIdentifier.create(pathArguments.build());
-    }
-
-    private Set<String> readObjSet() throws IOException {
-        int count = input.readInt();
-        Set<String> children = new HashSet<>(count);
-        for (int i = 0; i < count; i++) {
-            children.add(readCodedString());
-        }
-        return children;
-    }
-
-    @Override
-    public PathArgument readPathArgument() throws IOException {
-        // read Type
-        int type = input.readByte();
-
-        switch (type) {
-
-            case PathArgumentTypes.AUGMENTATION_IDENTIFIER :
-                return new YangInstanceIdentifier.AugmentationIdentifier(readQNameSet());
-
-            case PathArgumentTypes.NODE_IDENTIFIER :
-                return new NodeIdentifier(readQName());
-
-            case PathArgumentTypes.NODE_IDENTIFIER_WITH_PREDICATES :
-                return readNormalizedNodeWithPredicates();
-
-            case PathArgumentTypes.NODE_IDENTIFIER_WITH_VALUE :
-                return new NodeWithValue<>(readQName(), readObject());
-
-            default :
-                return null;
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    private ListNodeBuilder<Object, LeafSetEntryNode<Object>> addLeafSetChildren(final QName nodeType,
-            final ListNodeBuilder<Object, LeafSetEntryNode<Object>> builder) throws IOException {
-
-        LOG.trace("Reading children of leaf set");
-
-        lastLeafSetQName = nodeType;
-
-        LeafSetEntryNode<Object> child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
-
-        while (child != null) {
-            builder.withChild(child);
-            child = (LeafSetEntryNode<Object>)readNormalizedNodeInternal();
-        }
-        return builder;
-    }
-
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    private NormalizedNodeContainerBuilder addDataContainerChildren(
-            final NormalizedNodeContainerBuilder builder) throws IOException {
-        LOG.trace("Reading data container (leaf nodes) nodes");
-
-        NormalizedNode<?, ?> child = readNormalizedNodeInternal();
-
-        while (child != null) {
-            builder.addChild(child);
-            child = readNormalizedNodeInternal();
-        }
-        return builder;
-    }
-
-    @Override
-    public void readFully(final byte[] value) throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        input.readFully(value);
-    }
-
-    @Override
-    public void readFully(final byte[] str, final int off, final int len) throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        input.readFully(str, off, len);
-    }
-
-    @Override
-    public int skipBytes(final int num) throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.skipBytes(num);
-    }
-
-    @Override
-    public boolean readBoolean() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readBoolean();
-    }
-
-    @Override
-    public byte readByte() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readByte();
-    }
-
-    @Override
-    public int readUnsignedByte() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readUnsignedByte();
-    }
-
-    @Override
-    public short readShort() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readShort();
-    }
-
-    @Override
-    public int readUnsignedShort() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readUnsignedShort();
-    }
-
-    @Override
-    public char readChar() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readChar();
-    }
-
-    @Override
-    public int readInt() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readInt();
-    }
-
-    @Override
-    public long readLong() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readLong();
-    }
-
-    @Override
-    public float readFloat() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readFloat();
-    }
-
-    @Override
-    public double readDouble() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readDouble();
-    }
-
-    @Override
-    public String readLine() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readLine();
-    }
-
-    @Override
-    public String readUTF() throws IOException {
-        readSignatureMarkerAndVersionIfNeeded();
-        return input.readUTF();
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeOutputStreamWriter.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeOutputStreamWriter.java
deleted file mode 100644 (file)
index 40ca4f2..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.Revision;
-
-/**
- * NormalizedNodeOutputStreamWriter will be used by distributed datastore to send normalized node in
- * a stream.
- * A stream writer wrapper around this class will write node objects to stream in recursive manner.
- * for example - If you have a ContainerNode which has a two LeafNode as children, then
- * you will first call
- * {@link #startContainerNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, int)},
- * then will call
- * {@link #leafNode(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier, Object)} twice
- * and then, {@link #endNode()} to end container node.
- *
- * <p>Based on the each node, the node type is also written to the stream, that helps in reconstructing the object,
- * while reading.
- */
-final class NormalizedNodeOutputStreamWriter extends AbstractNormalizedNodeDataOutput {
-    private final Map<String, Integer> stringCodeMap = new HashMap<>();
-
-    NormalizedNodeOutputStreamWriter(final DataOutput output) {
-        super(output);
-    }
-
-    @Override
-    protected short streamVersion() {
-        return TokenTypes.LITHIUM_VERSION;
-    }
-
-    @Override
-    protected void writeQName(final QName qname) throws IOException {
-        writeString(qname.getLocalName());
-        writeString(qname.getNamespace().toString());
-        writeString(qname.getRevision().map(Revision::toString).orElse(null));
-    }
-
-    @Override
-    protected void writeString(final String string) throws IOException {
-        if (string != null) {
-            final Integer value = stringCodeMap.get(string);
-            if (value == null) {
-                stringCodeMap.put(string, stringCodeMap.size());
-                writeByte(TokenTypes.IS_STRING_VALUE);
-                writeUTF(string);
-            } else {
-                writeByte(TokenTypes.IS_CODE_VALUE);
-                writeInt(value);
-            }
-        } else {
-            writeByte(TokenTypes.IS_NULL_VALUE);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/PathArgumentTypes.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/PathArgumentTypes.java
deleted file mode 100644 (file)
index b372d8f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-final class PathArgumentTypes {
-    public static final byte AUGMENTATION_IDENTIFIER = 1;
-    public static final byte NODE_IDENTIFIER = 2;
-    public static final byte NODE_IDENTIFIER_WITH_VALUE = 3;
-    public static final byte NODE_IDENTIFIER_WITH_PREDICATES = 4;
-
-    private PathArgumentTypes() {
-        throw new UnsupportedOperationException("Utility class");
-    }
-
-    private static final Map<Class<?>, Byte> CLASS_TO_ENUM_MAP = ImmutableMap.<Class<?>, Byte>builder()
-            .put(YangInstanceIdentifier.AugmentationIdentifier.class, AUGMENTATION_IDENTIFIER)
-            .put(YangInstanceIdentifier.NodeIdentifier.class, NODE_IDENTIFIER)
-            .put(YangInstanceIdentifier.NodeIdentifierWithPredicates.class, NODE_IDENTIFIER_WITH_PREDICATES)
-            .put(YangInstanceIdentifier.NodeWithValue.class, NODE_IDENTIFIER_WITH_VALUE).build();
-
-    public static byte getSerializablePathArgumentType(YangInstanceIdentifier.PathArgument pathArgument) {
-        final Byte type = CLASS_TO_ENUM_MAP.get(pathArgument.getClass());
-        Preconditions.checkArgument(type != null, "Unknown type of PathArgument = %s", pathArgument);
-        return type;
-    }
-
-}
index 3a04347a54faeef7fe74cfedea65ef55a0d8ad42..51e61ea47f3a75357f0bf3d56053353726602906 100644 (file)
@@ -7,16 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore.node.utils.stream;
 
-import com.google.common.base.Preconditions;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
-import java.io.DataInputStream;
 import java.io.DataOutput;
-import java.io.DataOutputStream;
 import java.io.IOException;
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 
 /**
  * Provides various utility methods for serialization and de-serialization.
@@ -24,120 +25,96 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  * @author Thomas Pantelis
  */
 public final class SerializationUtils {
-    public static final ThreadLocal<NormalizedNodeDataOutput> REUSABLE_WRITER_TL = new ThreadLocal<>();
-    public static final ThreadLocal<NormalizedNodeDataInput> REUSABLE_READER_TL = new ThreadLocal<>();
-
     private SerializationUtils() {
+
     }
 
+    @FunctionalInterface
     public interface Applier<T> {
-        void apply(T instance, YangInstanceIdentifier path, NormalizedNode<?, ?> node);
+        void apply(T instance, YangInstanceIdentifier path, NormalizedNode node);
     }
 
-    private static NormalizedNodeDataOutput streamWriter(DataOutput out) {
-        NormalizedNodeDataOutput streamWriter = REUSABLE_WRITER_TL.get();
-        if (streamWriter == null) {
-            streamWriter = NormalizedNodeInputOutput.newDataOutput(out);
+    public static Optional<NormalizedNode> readNormalizedNode(final DataInput in) throws IOException {
+        if (!in.readBoolean()) {
+            return Optional.empty();
         }
+        return Optional.of(NormalizedNodeDataInput.newDataInput(in).readNormalizedNode());
+    }
 
-        return streamWriter;
+    public static void writeNormalizedNode(final DataOutput out, final @Nullable NormalizedNode node)
+            throws IOException {
+        writeNormalizedNode(out, NormalizedNodeStreamVersion.POTASSIUM, node);
     }
 
-    private static NormalizedNodeDataInput streamReader(DataInput in) throws IOException {
-        NormalizedNodeDataInput streamReader = REUSABLE_READER_TL.get();
-        if (streamReader == null) {
-            streamReader = NormalizedNodeInputOutput.newDataInput(in);
-        }
+    public static void writeNormalizedNode(final DataOutput out, final NormalizedNodeStreamVersion version,
+            final @Nullable NormalizedNode node) throws IOException {
+        if (node != null) {
+            out.writeBoolean(true);
 
-        return streamReader;
+            try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
+                stream.writeNormalizedNode(node);
+            }
+        } else {
+            out.writeBoolean(false);
+        }
     }
 
-    public static void serializePathAndNode(YangInstanceIdentifier path, NormalizedNode<?, ?> node,
-            DataOutput out) {
-        Preconditions.checkNotNull(path);
-        Preconditions.checkNotNull(node);
-        try {
-            NormalizedNodeDataOutput streamWriter = streamWriter(out);
-            streamWriter.writeNormalizedNode(node);
-            streamWriter.writeYangInstanceIdentifier(path);
-        } catch (IOException e) {
-            throw new IllegalArgumentException(String.format("Error serializing path %s and Node %s",
-                    path, node), e);
-        }
+    public static YangInstanceIdentifier readPath(final DataInput in) throws IOException {
+        return NormalizedNodeDataInput.newDataInput(in).readYangInstanceIdentifier();
     }
 
-    public static <T> void deserializePathAndNode(DataInput in, T instance, Applier<T> applier) {
-        try {
-            NormalizedNodeDataInput streamReader = streamReader(in);
-            NormalizedNode<?, ?> node = streamReader.readNormalizedNode();
-            YangInstanceIdentifier path = streamReader.readYangInstanceIdentifier();
-            applier.apply(instance, path, node);
-        } catch (IOException e) {
-            throw new IllegalArgumentException("Error deserializing path and Node", e);
-        }
+    public static void writePath(final DataOutput out, final @NonNull YangInstanceIdentifier path)
+            throws IOException {
+        writePath(out, NormalizedNodeStreamVersion.POTASSIUM, path);
     }
 
-    private static NormalizedNode<?, ?> tryDeserializeNormalizedNode(DataInput in) throws IOException {
-        boolean present = in.readBoolean();
-        if (present) {
-            NormalizedNodeDataInput streamReader = streamReader(in);
-            return streamReader.readNormalizedNode();
+    public static void writePath(final DataOutput out, final NormalizedNodeStreamVersion version,
+            final @NonNull YangInstanceIdentifier path) throws IOException {
+        try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
+            stream.writeYangInstanceIdentifier(path);
         }
-
-        return null;
     }
 
-    public static NormalizedNode<?, ?> deserializeNormalizedNode(DataInput in) {
-        try {
-            return tryDeserializeNormalizedNode(in);
-        } catch (IOException e) {
-            throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
-        }
+    public static <T> void readNodeAndPath(final DataInput in, final T instance, final Applier<T> applier)
+            throws IOException {
+        final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
+        NormalizedNode node = stream.readNormalizedNode();
+        YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
+        applier.apply(instance, path, node);
     }
 
-    public static NormalizedNode<?, ?> deserializeNormalizedNode(byte [] bytes) {
-        try {
-            return tryDeserializeNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)));
-        } catch (IOException e) {
-            throw new IllegalArgumentException("Error deserializing NormalizedNode", e);
+    public static void writeNodeAndPath(final DataOutput out, final NormalizedNodeStreamVersion version,
+            final YangInstanceIdentifier path, final NormalizedNode node) throws IOException {
+        try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
+            stream.writeNormalizedNode(node);
+            stream.writeYangInstanceIdentifier(path);
         }
     }
 
-    public static void serializeNormalizedNode(NormalizedNode<?, ?> node, DataOutput out) {
-        try {
-            out.writeBoolean(node != null);
-            if (node != null) {
-                NormalizedNodeDataOutput streamWriter = streamWriter(out);
-                streamWriter.writeNormalizedNode(node);
-            }
-        } catch (IOException e) {
-            throw new IllegalArgumentException(String.format("Error serializing NormalizedNode %s",
-                    node), e);
-        }
+    public static void writeNodeAndPath(final DataOutput out, final YangInstanceIdentifier path,
+            final NormalizedNode node) throws IOException {
+        writeNodeAndPath(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
     }
 
-    public static byte [] serializeNormalizedNode(NormalizedNode<?, ?> node) {
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        serializeNormalizedNode(node, new DataOutputStream(bos));
-        return bos.toByteArray();
+    public static <T> void readPathAndNode(final DataInput in, final T instance, final Applier<T> applier)
+            throws IOException {
+        final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
+        YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
+        NormalizedNode node = stream.readNormalizedNode();
+        applier.apply(instance, path, node);
     }
 
-    public static void serializePath(YangInstanceIdentifier path, DataOutput out) {
-        Preconditions.checkNotNull(path);
-        try {
-            NormalizedNodeDataOutput streamWriter = streamWriter(out);
-            streamWriter.writeYangInstanceIdentifier(path);
-        } catch (IOException e) {
-            throw new IllegalArgumentException(String.format("Error serializing path %s", path), e);
+    public static void writePathAndNode(final DataOutput out,
+            final org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion version,
+            final YangInstanceIdentifier path, final NormalizedNode node) throws IOException {
+        try (NormalizedNodeDataOutput stream = version.newDataOutput(out)) {
+            stream.writeYangInstanceIdentifier(path);
+            stream.writeNormalizedNode(node);
         }
     }
 
-    public static YangInstanceIdentifier deserializePath(DataInput in) {
-        try {
-            NormalizedNodeDataInput streamReader = streamReader(in);
-            return streamReader.readYangInstanceIdentifier();
-        } catch (IOException e) {
-            throw new IllegalArgumentException("Error deserializing path", e);
-        }
+    public static void writePathAndNode(final DataOutput out, final YangInstanceIdentifier path,
+            final NormalizedNode node) throws IOException {
+        writePathAndNode(out, NormalizedNodeStreamVersion.POTASSIUM, path, node);
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/TokenTypes.java
deleted file mode 100644 (file)
index 90f0472..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-final class TokenTypes {
-    private TokenTypes() {
-        throw new UnsupportedOperationException();
-    }
-
-    static final byte SIGNATURE_MARKER = (byte) 0xab;
-
-    /**
-     * Original stream version. Uses a per-stream dictionary for strings. QNames are serialized as three strings.
-     */
-    static final short LITHIUM_VERSION = 1;
-
-    // Tokens supported in LITHIUM_VERSION
-    static final byte IS_CODE_VALUE = 1;
-    static final byte IS_STRING_VALUE = 2;
-    static final byte IS_NULL_VALUE = 3;
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypes.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypes.java
deleted file mode 100644 (file)
index 51ff8d3..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-import org.opendaylight.yangtools.yang.common.Empty;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-final class ValueTypes {
-    // The String length threshold beyond which a String should be encoded as bytes
-    public static final int STRING_BYTES_LENGTH_THRESHOLD = Short.MAX_VALUE / 4;
-
-    public static final byte SHORT_TYPE = 1;
-    public static final byte BYTE_TYPE = 2;
-    public static final byte INT_TYPE = 3;
-    public static final byte LONG_TYPE = 4;
-    public static final byte BOOL_TYPE = 5;
-    public static final byte QNAME_TYPE = 6;
-    public static final byte BITS_TYPE = 7;
-    public static final byte YANG_IDENTIFIER_TYPE = 8;
-    public static final byte STRING_TYPE = 9;
-    public static final byte BIG_INTEGER_TYPE = 10;
-    public static final byte BIG_DECIMAL_TYPE = 11;
-    public static final byte BINARY_TYPE = 12;
-    // Leaf nodes no longer allow null values. The "empty" type is now represented as
-    // org.opendaylight.yangtools.yang.common.Empty. This is kept for backwards compatibility.
-    @Deprecated
-    public static final byte NULL_TYPE = 13;
-    public static final byte STRING_BYTES_TYPE = 14;
-    public static final byte EMPTY_TYPE = 15;
-
-    private static final Map<Class<?>, Byte> TYPES;
-
-    static {
-        final Builder<Class<?>, Byte> b = ImmutableMap.builder();
-
-        b.put(String.class, STRING_TYPE);
-        b.put(Byte.class, BYTE_TYPE);
-        b.put(Integer.class, INT_TYPE);
-        b.put(Long.class, LONG_TYPE);
-        b.put(Boolean.class, BOOL_TYPE);
-        b.put(QName.class, QNAME_TYPE);
-        b.put(Short.class, SHORT_TYPE);
-        b.put(BigInteger.class, BIG_INTEGER_TYPE);
-        b.put(BigDecimal.class, BIG_DECIMAL_TYPE);
-        b.put(byte[].class, BINARY_TYPE);
-        b.put(Empty.class, EMPTY_TYPE);
-
-        TYPES = b.build();
-    }
-
-    private ValueTypes() {
-        throw new UnsupportedOperationException("Utility class");
-    }
-
-    public static byte getSerializableType(Object node) {
-        Objects.requireNonNull(node);
-
-        final Byte type = TYPES.get(node.getClass());
-        if (type != null) {
-            if (type == STRING_TYPE && ((String) node).length() >= STRING_BYTES_LENGTH_THRESHOLD) {
-                return STRING_BYTES_TYPE;
-            }
-            return type;
-        }
-
-        if (node instanceof Set) {
-            return BITS_TYPE;
-        }
-
-        if (node instanceof YangInstanceIdentifier) {
-            return YANG_IDENTIFIER_TYPE;
-        }
-
-        throw new IllegalArgumentException("Unknown value type " + node.getClass().getSimpleName());
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/AbstractNormalizedNodePruner.java
new file mode 100644 (file)
index 0000000..cee5a03
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.NoSuchElementException;
+import java.util.Optional;
+import javax.xml.transform.dom.DOMSource;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
+ * schema element in the passed in SchemaContext.
+ */
+abstract class AbstractNormalizedNodePruner implements NormalizedNodeStreamWriter {
+    enum State {
+        UNITIALIZED,
+        OPEN,
+        CLOSED;
+    }
+
+    @FunctionalInterface
+    interface WriterMethod<T extends PathArgument> {
+
+        void apply(ReusableImmutableNormalizedNodeStreamWriter writer, T name) throws IOException;
+    }
+
+    @FunctionalInterface
+    interface SizedWriterMethod<T extends PathArgument> {
+
+        void apply(ReusableImmutableNormalizedNodeStreamWriter writer, T name, int childSizeHint) throws IOException;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractNormalizedNodePruner.class);
+
+    private final Deque<DataSchemaContext> stack = new ArrayDeque<>();
+    private final ReusableImmutableNormalizedNodeStreamWriter delegate =
+            ReusableImmutableNormalizedNodeStreamWriter.create();
+    private final DataSchemaContextTree tree;
+
+    private DataSchemaContext nodePathSchemaNode;
+    private NormalizedNode normalizedNode;
+    private State state = State.UNITIALIZED;
+    private int unknown;
+
+    AbstractNormalizedNodePruner(final DataSchemaContextTree tree) {
+        this.tree = requireNonNull(tree);
+    }
+
+    AbstractNormalizedNodePruner(final EffectiveModelContext schemaContext) {
+        this(DataSchemaContextTree.from(schemaContext));
+    }
+
+    final DataSchemaContextTree getTree() {
+        return tree;
+    }
+
+    final void initialize(final YangInstanceIdentifier nodePath) {
+        nodePathSchemaNode = tree.findChild(nodePath).orElse(null);
+        unknown = 0;
+        normalizedNode = null;
+        stack.clear();
+        delegate.reset();
+        state = State.OPEN;
+    }
+
+    @Override
+    public final void startLeafNode(final NodeIdentifier name) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafNode, name);
+    }
+
+    @Override
+    public final void startLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafSet, name, childSizeHint);
+    }
+
+    @Override
+    public final void startOrderedLeafSet(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startOrderedLeafSet, name, childSizeHint);
+    }
+
+    @Override
+    public void startLeafSetEntryNode(final NodeWithValue<?> name) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startLeafSetEntryNode, name);
+    }
+
+    @Override
+    public final void startContainerNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startContainerNode, name, childSizeHint);
+    }
+
+    @Override
+    public final void startUnkeyedList(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startUnkeyedList, name, childSizeHint);
+    }
+
+    @Override
+    public final void startUnkeyedListItem(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startUnkeyedListItem, name, childSizeHint);
+    }
+
+    @Override
+    public final void startMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startMapNode, name, childSizeHint);
+    }
+
+    @Override
+    public void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
+            throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startMapEntryNode, identifier, childSizeHint);
+    }
+
+    @Override
+    public final void startOrderedMapNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startOrderedMapNode, name, childSizeHint);
+    }
+
+    @Override
+    public final void startChoiceNode(final NodeIdentifier name, final int childSizeHint) throws IOException {
+        enter(ReusableImmutableNormalizedNodeStreamWriter::startChoiceNode, name, childSizeHint);
+    }
+
+    @Override
+    public final  boolean startAnyxmlNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        if (enter(name)) {
+            verify(delegate.startAnyxmlNode(name, objectModel),
+                "Unexpected failure to stream DOMSource node %s model %s", name, objectModel);
+        }
+        return true;
+    }
+
+    @Override
+    public final boolean startAnydataNode(final NodeIdentifier name, final Class<?> objectModel) throws IOException {
+        // FIXME: we do not support anydata nodes yet
+        return false;
+    }
+
+    @Override
+    public final  void domSourceValue(final DOMSource value) throws IOException {
+        checkNotSealed();
+        if (unknown == 0) {
+            delegate.domSourceValue(value);
+        }
+    }
+
+    @Override
+    public final void scalarValue(final Object value) throws IOException {
+        checkNotSealed();
+        if (unknown == 0) {
+            delegate.scalarValue(translateScalar(currentSchema(), value));
+        }
+    }
+
+    Object translateScalar(final DataSchemaContext context, final Object value) {
+        // Default is pass-through
+        return value;
+    }
+
+    @Override
+    public final void endNode() throws IOException {
+        checkNotSealed();
+
+        if (unknown == 0) {
+            try {
+                stack.pop();
+            } catch (NoSuchElementException e) {
+                throw new IllegalStateException("endNode called on an empty stack", e);
+            }
+            delegate.endNode();
+        } else {
+            unknown--;
+            if (unknown != 0) {
+                // Still at unknown, do not attempt to create result
+                return;
+            }
+        }
+
+        if (stack.isEmpty()) {
+            final var result = delegate.result();
+            normalizedNode = result != null ? result.data() : null;
+            state = State.CLOSED;
+        }
+    }
+
+    @Override
+    public final void close() throws IOException {
+        state = State.CLOSED;
+        stack.clear();
+        delegate.close();
+    }
+
+    @Override
+    public final void flush() throws IOException {
+        delegate.flush();
+    }
+
+    /**
+     * Return the resulting normalized node.
+     *
+     * @return Resulting node for the path, if it was not pruned
+     * @throws IllegalStateException if this pruner has not been closed
+     */
+    public final Optional<NormalizedNode> getResult() {
+        checkState(state == State.CLOSED, "Cannot get result in state %s", state);
+        return Optional.ofNullable(normalizedNode);
+    }
+
+    private void checkNotSealed() {
+        checkState(state == State.OPEN, "Illegal operation in state %s", state);
+    }
+
+    private boolean enter(final PathArgument name) {
+        checkNotSealed();
+
+        if (unknown != 0) {
+            LOG.debug("Skipping child {} in unknown subtree", name);
+            unknown++;
+            return false;
+        }
+
+        final DataSchemaContext schema;
+        final DataSchemaContext parent = currentSchema();
+        if (parent != null) {
+            schema = parent instanceof DataSchemaContext.Composite compositeParent ? compositeParent.childByArg(name)
+                : null;
+        } else {
+            schema = nodePathSchemaNode;
+        }
+
+        if (schema == null) {
+            LOG.debug("Schema not found for {}", name);
+            unknown = 1;
+            return false;
+        }
+
+        stack.push(schema);
+        final DataSchemaNode dataSchema = schema.dataSchemaNode();
+        if (dataSchema != null) {
+            delegate.nextDataSchemaNode(dataSchema);
+        }
+        return true;
+    }
+
+    final <A extends PathArgument> void enter(final WriterMethod<A> method, final A name) throws IOException {
+        if (enter(name)) {
+            method.apply(delegate, name);
+        }
+    }
+
+    final <A extends PathArgument> void enter(final SizedWriterMethod<A> method, final A name, final int size)
+            throws IOException {
+        if (enter(name)) {
+            method.apply(delegate, name, size);
+        }
+    }
+
+    final DataSchemaContext currentSchema() {
+        return stack.peek();
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodeBuilderWrapper.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodeBuilderWrapper.java
deleted file mode 100644 (file)
index 4654ec9..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
-
-import com.google.common.base.Optional;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeContainerBuilder;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
-
-public class NormalizedNodeBuilderWrapper {
-    private final NormalizedNodeContainerBuilder<?,?,?,?> builder;
-    private final YangInstanceIdentifier.PathArgument identifier;
-    private final Optional<DataSchemaContextNode<?>> schemaNode;
-
-    NormalizedNodeBuilderWrapper(NormalizedNodeContainerBuilder<?,?,?,?> builder,
-            YangInstanceIdentifier.PathArgument identifier, Optional<DataSchemaContextNode<?>> schemaNode) {
-        this.builder = builder;
-        this.identifier = identifier;
-        this.schemaNode = schemaNode;
-    }
-
-    @SuppressWarnings("rawtypes")
-    public NormalizedNodeContainerBuilder builder() {
-        return builder;
-    }
-
-    public QName nodeType() {
-        return identifier.getNodeType();
-    }
-
-    public YangInstanceIdentifier.PathArgument identifier() {
-        return identifier;
-    }
-
-    public Optional<DataSchemaContextNode<?>> getSchema() {
-        return schemaNode;
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/NormalizedNodePruner.java
deleted file mode 100644 (file)
index be31081..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import java.net.URI;
-import java.util.LinkedList;
-import java.util.List;
-import javax.xml.transform.dom.DOMSource;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeContainerBuilder;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextNode;
-import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
- * schema element in the passed in SchemaContext.
- */
-public class NormalizedNodePruner implements NormalizedNodeStreamWriter {
-    private static final Logger LOG = LoggerFactory.getLogger(NormalizedNodePruner.class);
-
-    public static final URI BASE_NAMESPACE = URI.create("urn:ietf:params:xml:ns:netconf:base:1.0");
-    private final SimpleStack<NormalizedNodeBuilderWrapper> stack = new SimpleStack<>();
-    private NormalizedNode<?,?> normalizedNode;
-    private final DataSchemaContextNode<?> nodePathSchemaNode;
-    private boolean sealed = false;
-
-    public NormalizedNodePruner(YangInstanceIdentifier nodePath, SchemaContext schemaContext) {
-        nodePathSchemaNode = findSchemaNodeForNodePath(nodePath, schemaContext);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void leafNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, Object value)
-            throws IllegalArgumentException {
-
-        checkNotSealed();
-
-        NormalizedNodeBuilderWrapper parent = stack.peek();
-        LeafNode<Object> leafNode = Builders.leafBuilder().withNodeIdentifier(nodeIdentifier).withValue(value).build();
-        if (parent != null) {
-            if (hasValidSchema(nodeIdentifier.getNodeType(), parent)) {
-                parent.builder().addChild(leafNode);
-            }
-        } else {
-            // If there's no parent node then this is a stand alone LeafNode.
-            if (nodePathSchemaNode != null) {
-                this.normalizedNode = leafNode;
-            }
-
-            sealed = true;
-        }
-    }
-
-    @Override
-    public void startLeafSet(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.leafSetBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startOrderedLeafSet(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int str)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.orderedLeafSetBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @SuppressWarnings({ "unchecked" })
-    @Override
-    public void leafSetEntryNode(QName name, Object value) throws IllegalArgumentException {
-        checkNotSealed();
-
-        NormalizedNodeBuilderWrapper parent = stack.peek();
-        if (parent != null) {
-            if (hasValidSchema(name, parent)) {
-                parent.builder().addChild(Builders.leafSetEntryBuilder().withValue(value)
-                        .withNodeIdentifier(new YangInstanceIdentifier.NodeWithValue<>(parent.nodeType(), value))
-                        .build());
-            }
-        } else {
-            // If there's no parent LeafSetNode then this is a stand alone
-            // LeafSetEntryNode.
-            if (nodePathSchemaNode != null) {
-                this.normalizedNode = Builders.leafSetEntryBuilder().withValue(value).withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeWithValue<>(name, value)).build();
-            }
-
-            sealed = true;
-        }
-    }
-
-    @Override
-    public void startContainerNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.containerBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startYangModeledAnyXmlNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        throw new UnsupportedOperationException("Not implemented yet");
-    }
-
-    @Override
-    public void startUnkeyedList(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.unkeyedListBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startUnkeyedListItem(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalStateException {
-        checkNotSealed();
-
-        addBuilder(Builders.unkeyedListEntryBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startMapNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.mapBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startMapEntryNode(YangInstanceIdentifier.NodeIdentifierWithPredicates nodeIdentifierWithPredicates,
-            int count)  throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.mapEntryBuilder().withNodeIdentifier(nodeIdentifierWithPredicates),
-                nodeIdentifierWithPredicates);
-    }
-
-    @Override
-    public void startOrderedMapNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.orderedMapBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startChoiceNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, int count)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        addBuilder(Builders.choiceBuilder().withNodeIdentifier(nodeIdentifier), nodeIdentifier);
-    }
-
-    @Override
-    public void startAugmentationNode(YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier)
-            throws IllegalArgumentException {
-
-        checkNotSealed();
-
-        addBuilder(Builders.augmentationBuilder().withNodeIdentifier(augmentationIdentifier), augmentationIdentifier);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void anyxmlNode(YangInstanceIdentifier.NodeIdentifier nodeIdentifier, Object value)
-            throws IllegalArgumentException {
-        checkNotSealed();
-
-        NormalizedNodeBuilderWrapper parent = stack.peek();
-        AnyXmlNode anyXmlNode = Builders.anyXmlBuilder().withNodeIdentifier(nodeIdentifier).withValue((DOMSource) value)
-                .build();
-        if (parent != null) {
-            if (hasValidSchema(nodeIdentifier.getNodeType(), parent)) {
-                parent.builder().addChild(anyXmlNode);
-            }
-        } else {
-            // If there's no parent node then this is a stand alone AnyXmlNode.
-            if (nodePathSchemaNode != null) {
-                this.normalizedNode = anyXmlNode;
-            }
-
-            sealed = true;
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public void endNode() throws IllegalStateException {
-        checkNotSealed();
-
-        NormalizedNodeBuilderWrapper child = stack.pop();
-
-        Preconditions.checkState(child != null, "endNode called on an empty stack");
-
-        if (!child.getSchema().isPresent()) {
-            LOG.debug("Schema not found for {}", child.identifier());
-            return;
-        }
-
-        NormalizedNode<?,?> newNode = child.builder().build();
-
-        if (stack.size() > 0) {
-            NormalizedNodeBuilderWrapper parent = stack.peek();
-            parent.builder().addChild(newNode);
-        } else {
-            this.normalizedNode = newNode;
-            sealed = true;
-        }
-    }
-
-    @Override
-    public void close() {
-        sealed = true;
-    }
-
-    @Override
-    public void flush() {
-
-    }
-
-    public NormalizedNode<?,?> normalizedNode() {
-        return normalizedNode;
-    }
-
-    private void checkNotSealed() {
-        Preconditions.checkState(!sealed, "Pruner can be used only once");
-    }
-
-    private static boolean hasValidSchema(QName name, NormalizedNodeBuilderWrapper parent) {
-        boolean valid = parent.getSchema().isPresent() && parent.getSchema().get().getChild(name) != null;
-        if (!valid) {
-            LOG.debug("Schema not found for {}", name);
-        }
-
-        return valid;
-    }
-
-    private NormalizedNodeBuilderWrapper addBuilder(NormalizedNodeContainerBuilder<?,?,?,?> builder,
-            PathArgument identifier) {
-        final Optional<DataSchemaContextNode<?>> schemaNode;
-        NormalizedNodeBuilderWrapper parent = stack.peek();
-        if (parent == null) {
-            schemaNode = Optional.fromNullable(nodePathSchemaNode);
-        } else if (parent.getSchema().isPresent()) {
-            schemaNode = Optional.fromNullable(parent.getSchema().get().getChild(identifier));
-        } else {
-            schemaNode = Optional.absent();
-        }
-
-        NormalizedNodeBuilderWrapper wrapper = new NormalizedNodeBuilderWrapper(builder, identifier, schemaNode);
-        stack.push(wrapper);
-        return wrapper;
-    }
-
-    private static DataSchemaContextNode<?> findSchemaNodeForNodePath(YangInstanceIdentifier nodePath,
-            SchemaContext schemaContext) {
-        DataSchemaContextNode<?> schemaNode = DataSchemaContextTree.from(schemaContext).getRoot();
-        for (PathArgument arg : nodePath.getPathArguments()) {
-            schemaNode = schemaNode.getChild(arg);
-            if (schemaNode == null) {
-                break;
-            }
-        }
-
-        return schemaNode;
-    }
-
-    @VisibleForTesting
-    static class SimpleStack<E> {
-        List<E> stack = new LinkedList<>();
-
-        void push(E element) {
-            stack.add(element);
-        }
-
-        E pop() {
-            if (size() == 0) {
-                return null;
-            }
-            return stack.remove(stack.size() - 1);
-        }
-
-        E peek() {
-            if (size() == 0) {
-                return null;
-            }
-
-            return stack.get(stack.size() - 1);
-        }
-
-        int size() {
-            return stack.size();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/ReusableNormalizedNodePruner.java
new file mode 100644 (file)
index 0000000..4b75428
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import com.google.common.annotations.Beta;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+
+/**
+ * The NormalizedNodePruner removes all nodes from the input NormalizedNode that do not have a corresponding
+ * schema element in the passed in SchemaContext. Instances of this class can be reused multiple times and must be
+ * initialized before each use through {@link #initializeForPath(YangInstanceIdentifier)}.
+ */
+@Beta
+public abstract class ReusableNormalizedNodePruner extends AbstractNormalizedNodePruner {
+    private static final class SimplePruner extends ReusableNormalizedNodePruner {
+        SimplePruner(final EffectiveModelContext schemaContext) {
+            super(schemaContext);
+        }
+
+        SimplePruner(final DataSchemaContextTree tree) {
+            super(tree);
+        }
+
+        @Override
+        public ReusableNormalizedNodePruner duplicate() {
+            return new SimplePruner(getTree());
+        }
+    }
+
+    ReusableNormalizedNodePruner(final EffectiveModelContext schemaContext) {
+        super(schemaContext);
+    }
+
+    ReusableNormalizedNodePruner(final DataSchemaContextTree tree) {
+        super(tree);
+    }
+
+    /**
+     * Create a new pruner bound to a SchemaContext.
+     *
+     * @param schemaContext SchemaContext to use
+     * @return A new uninitialized pruner
+     * @throws NullPointerException if {@code schemaContext} is null
+     */
+    public static @NonNull ReusableNormalizedNodePruner forSchemaContext(final EffectiveModelContext schemaContext) {
+        return new SimplePruner(schemaContext);
+    }
+
+    /**
+     * Create a new pruner bound to a DataSchemaContextTree. This is a more efficient alternative of
+     * {@link #forSchemaContext(EffectiveModelContext)}.
+     *
+     * @param tree DataSchemaContextTree to use
+     * @return A new uninitialized pruner
+     * @throws NullPointerException if {@code schemaContext} is null
+     */
+    public static @NonNull ReusableNormalizedNodePruner forDataSchemaContext(final DataSchemaContextTree tree) {
+        return new SimplePruner(tree);
+    }
+
+    /**
+     * Return a new instance, which is backed but the same DataSchemaContextTree, but does not share any state and is
+     * uninitialized. This is equivalent to {@link #forDataSchemaContext(DataSchemaContextTree)} and is provided for
+     * convenience.
+     *
+     * @return A new uninitialized pruner bound to the same SchemaContext as this one.
+     */
+    public abstract @NonNull ReusableNormalizedNodePruner duplicate();
+
+    /**
+     * Initialize this pruner for processing a node at specified path.
+     *
+     * @param path Path that will be processed next
+     * @throws NullPointerException if {@code path} is null
+     */
+    public final void initializeForPath(final YangInstanceIdentifier path) {
+        initialize(path);
+    }
+
+    public final @NonNull ReusableNormalizedNodePruner withUintAdaption() {
+        return new UintAdaptingPruner(getTree());
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPruner.java
new file mode 100644 (file)
index 0000000..4c6c223
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import static com.google.common.base.Verify.verify;
+
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.collect.ImmutableMap;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.function.Function;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContext;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.TypedDataSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.type.Uint16TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint32TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint64TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.Uint8TypeDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class UintAdaptingPruner extends ReusableNormalizedNodePruner {
+    @FunctionalInterface
+    private interface NipAdapter extends Function<NodeIdentifierWithPredicates, NodeIdentifierWithPredicates> {
+
+    }
+
+    private enum ValueAdapter implements Function<Object, Object> {
+        UINT8 {
+            @Override
+            public Object apply(final Object obj) {
+                if (obj instanceof Short shortObj) {
+                    LOG.trace("Translating legacy uint8 {}", obj);
+                    return Uint8.valueOf(shortObj);
+                }
+                return obj;
+            }
+        },
+        UINT16 {
+            @Override
+            public Object apply(final Object obj) {
+                if (obj instanceof Integer intObj) {
+                    LOG.trace("Translating legacy uint16 {}", obj);
+                    return Uint16.valueOf(intObj);
+                }
+                return obj;
+            }
+        },
+        UINT32 {
+            @Override
+            public Object apply(final Object obj) {
+                if (obj instanceof Long longObj) {
+                    LOG.trace("Translating legacy uint32 {}", obj);
+                    return Uint32.valueOf(longObj);
+                }
+                return obj;
+            }
+        },
+        UINT64 {
+            @Override
+            public Object apply(final Object obj) {
+                if (obj instanceof BigInteger bigInt) {
+                    LOG.trace("Translating legacy uint64 {}", obj);
+                    return Uint64.valueOf(bigInt);
+                }
+                return obj;
+            }
+        };
+
+        private static final Logger LOG = LoggerFactory.getLogger(ValueAdapter.class);
+
+        static @Nullable ValueAdapter forType(final TypeDefinition<?> type) {
+            if (type instanceof Uint8TypeDefinition) {
+                return UINT8;
+            } else if (type instanceof Uint16TypeDefinition) {
+                return UINT16;
+            } else if (type instanceof Uint32TypeDefinition) {
+                return UINT32;
+            } else if (type instanceof Uint64TypeDefinition) {
+                return UINT64;
+            } else {
+                return null;
+            }
+        }
+    }
+
+    private static final LoadingCache<ListSchemaNode, NipAdapter> NIP_ADAPTERS = CacheBuilder.newBuilder()
+            .weakKeys().build(new AdapterCacheLoader());
+
+    UintAdaptingPruner(final DataSchemaContextTree tree) {
+        super(tree);
+    }
+
+    @Override
+    public ReusableNormalizedNodePruner duplicate() {
+        return new UintAdaptingPruner(getTree());
+    }
+
+    @Override
+    public void startMapEntryNode(final NodeIdentifierWithPredicates identifier, final int childSizeHint)
+            throws IOException {
+        enter(this::adaptEntry, identifier, childSizeHint);
+    }
+
+    @Override
+    public void startLeafSetEntryNode(final NodeWithValue<?> name) throws IOException {
+        enter(this::adaptEntry, name);
+    }
+
+    @Override
+    Object translateScalar(final DataSchemaContext context, final Object value) {
+        final DataSchemaNode schema = context.dataSchemaNode();
+        return schema instanceof TypedDataSchemaNode typed ? adaptValue(typed.getType(), value) : value;
+    }
+
+    private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer, final NodeWithValue<?> name) {
+        final NodeWithValue<?> adapted;
+        final DataSchemaNode schema = currentSchema().dataSchemaNode();
+        if (schema instanceof TypedDataSchemaNode typed) {
+            final Object oldValue = name.getValue();
+            final Object newValue = adaptValue(typed.getType(), oldValue);
+            adapted = newValue == oldValue ? name : new NodeWithValue<>(name.getNodeType(), newValue);
+        } else {
+            adapted = name;
+        }
+
+        writer.startLeafSetEntryNode(adapted);
+    }
+
+    private void adaptEntry(final ReusableImmutableNormalizedNodeStreamWriter writer,
+            final NodeIdentifierWithPredicates name, final int size) {
+        final NodeIdentifierWithPredicates adapted;
+        final DataSchemaNode schema = currentSchema().dataSchemaNode();
+        if (schema instanceof ListSchemaNode list) {
+            adapted = NIP_ADAPTERS.getUnchecked(list).apply(name);
+        } else {
+            adapted = name;
+        }
+
+        writer.startMapEntryNode(adapted, size);
+    }
+
+    private static Object adaptValue(final TypeDefinition<?> type, final Object value) {
+        final ValueAdapter adapter = ValueAdapter.forType(type);
+        return adapter != null ? adapter.apply(value) : value;
+    }
+
+    private static final class AdapterCacheLoader extends CacheLoader<ListSchemaNode, NipAdapter> {
+        @Override
+        public NipAdapter load(final ListSchemaNode key) {
+            final Map<QName, ValueAdapter> adapters = new HashMap<>();
+
+            for (QName qname : key.getKeyDefinition()) {
+                final DataSchemaNode child;
+                try {
+                    child = key.findDataTreeChild(qname).orElseThrow();
+                } catch (NoSuchElementException e) {
+                    throw new IllegalStateException("Failed to find child " + qname, e);
+                }
+
+                verify(child instanceof LeafSchemaNode, "Key references non-leaf child %s", child);
+                final ValueAdapter adapter = ValueAdapter.forType(((LeafSchemaNode) child).getType());
+                if (adapter != null) {
+                    adapters.put(qname, adapter);
+                }
+            }
+
+            return adapters.isEmpty() ? name -> name : new TransformingNipAdapter(adapters);
+        }
+    }
+
+    private static final class TransformingNipAdapter implements NipAdapter {
+        private final ImmutableMap<QName, ValueAdapter> adapters;
+
+        TransformingNipAdapter(final Map<QName, ValueAdapter> toTransform) {
+            adapters = ImmutableMap.copyOf(toTransform);
+        }
+
+        @Override
+        public NodeIdentifierWithPredicates apply(final NodeIdentifierWithPredicates name) {
+            final Set<Entry<QName, Object>> entries = name.entrySet();
+            final ImmutableMap.Builder<QName, Object> newEntries = ImmutableMap.builderWithExpectedSize(entries.size());
+            for (Entry<QName, Object> e : entries) {
+                final QName qname = e.getKey();
+                final ValueAdapter adapter = adapters.get(qname);
+                newEntries.put(qname, adapter != null ? adapter.apply(e.getValue()) : e.getValue());
+            }
+
+            return NodeIdentifierWithPredicates.of(name.getNodeType(), newEntries.build());
+        }
+    }
+}
index ff20c0f631706adf444e68757d63e1b4d5cfb8e8..3e299e3d9b625825b02ba8fc09106384b2f17ad0 100644 (file)
@@ -13,22 +13,20 @@ import static com.google.common.base.Verify.verifyNotNull;
 
 import com.google.common.annotations.Beta;
 import java.util.Optional;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
 
 /**
  * Abstract {@link DataTreeModificationCursor} which tracks the current path. Subclasses can get the current path
- * via {@link #current()}.
+ * via {@link #current()}. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  */
 @Beta
-@NotThreadSafe
 public abstract class AbstractDataTreeModificationCursor implements DataTreeModificationCursor {
-    private YangInstanceIdentifier current = YangInstanceIdentifier.EMPTY;
+    private YangInstanceIdentifier current = YangInstanceIdentifier.of();
 
     protected final YangInstanceIdentifier current() {
         return current;
@@ -73,7 +71,7 @@ public abstract class AbstractDataTreeModificationCursor implements DataTreeModi
     }
 
     @Override
-    public final Optional<NormalizedNode<?, ?>> readNode(final PathArgument child) {
+    public final Optional<NormalizedNode> readNode(final PathArgument child) {
         throw new UnsupportedOperationException("Not implemented");
     }
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedByteArray.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedByteArray.java
new file mode 100644 (file)
index 0000000..87f8a18
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.ByteSink;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInput;
+import java.util.ArrayList;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.yangtools.concepts.Immutable;
+
+@Beta
+@NonNullByDefault
+public final class ChunkedByteArray implements Immutable {
+    private final ImmutableList<byte[]> chunks;
+    private final int size;
+
+    ChunkedByteArray(final int size, final ImmutableList<byte[]> chunks) {
+        this.size = size;
+        this.chunks = requireNonNull(chunks);
+    }
+
+    public static ChunkedByteArray readFrom(final ObjectInput in, final int size, final int chunkSize)
+            throws IOException {
+        final List<byte[]> chunks = new ArrayList<>(requiredChunks(size, chunkSize));
+        int remaining = size;
+        do {
+            final byte[] buffer = new byte[Math.min(remaining, chunkSize)];
+            in.readFully(buffer);
+            chunks.add(buffer);
+            remaining -= buffer.length;
+        } while (remaining != 0);
+
+        return new ChunkedByteArray(size, ImmutableList.copyOf(chunks));
+    }
+
+    public int size() {
+        return size;
+    }
+
+    public InputStream openStream() {
+        return new ChunkedInputStream(size, chunks.iterator());
+    }
+
+    public void copyTo(final DataOutput output) throws IOException {
+        for (byte[] chunk : chunks) {
+            output.write(chunk, 0, chunk.length);
+        }
+    }
+
+    public void copyTo(final ByteSink output) throws IOException {
+        for (byte[] chunk : chunks) {
+            output.write(chunk);
+        }
+    }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("size", size).add("chunkCount", chunks.size()).toString();
+    }
+
+    @VisibleForTesting
+    ImmutableList<byte[]> getChunks() {
+        return chunks;
+    }
+
+    private static int requiredChunks(final int size, final int chunkSize) {
+        final int div = size / chunkSize;
+        return size % chunkSize == 0 ? div : div + 1;
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedInputStream.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedInputStream.java
new file mode 100644 (file)
index 0000000..b26126b
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.InputStream;
+import java.util.Iterator;
+
+final class ChunkedInputStream extends InputStream {
+    private final Iterator<byte[]> remainingChunks;
+
+    private byte[] currentChunk;
+    private int currentLimit;
+    private int currentOffset;
+    private int available;
+
+    ChunkedInputStream(final int size, final Iterator<byte[]> iterator) {
+        remainingChunks = requireNonNull(iterator);
+        currentChunk = remainingChunks.next();
+        currentLimit = currentChunk.length;
+        available = size;
+    }
+
+    @Override
+    public int available() {
+        return available;
+    }
+
+    @Override
+    public int read() {
+        if (currentChunk == null) {
+            return -1;
+        }
+
+        int ret = currentChunk[currentOffset] & 0xff;
+        consumeBytes(1);
+        return ret;
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:ParameterName")
+    public int read(final byte[] b) {
+        return read(b, 0, b.length);
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:ParameterName")
+    public int read(final byte[] b, final int off, final int len) {
+        if (len < 0) {
+            throw new IndexOutOfBoundsException();
+        }
+        if (currentChunk == null) {
+            return -1;
+        }
+
+        final int result = Math.min(available, len);
+        int toOffset = off;
+        int toCopy = result;
+
+        while (toCopy != 0) {
+            final int count = currentBytes(toCopy);
+            System.arraycopy(currentChunk, currentOffset, b, toOffset, count);
+            consumeBytes(count);
+            toOffset += count;
+            toCopy -= count;
+        }
+
+        return result;
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:ParameterName")
+    public long skip(final long n) {
+        final int result = (int) Math.min(available, n);
+
+        int toSkip = result;
+        while (toSkip != 0) {
+            final int count = currentBytes(toSkip);
+            consumeBytes(count);
+            toSkip -= count;
+        }
+
+        return result;
+    }
+
+    private int currentBytes(final int desired) {
+        return Math.min(desired, currentLimit - currentOffset);
+    }
+
+    private void consumeBytes(final int count) {
+        currentOffset += count;
+        available -= count;
+
+        if (currentOffset == currentLimit) {
+            if (remainingChunks.hasNext()) {
+                currentChunk = remainingChunks.next();
+                currentLimit = currentChunk.length;
+            } else {
+                currentChunk = null;
+                currentLimit = 0;
+            }
+            currentOffset = 0;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/ChunkedOutputStream.java
new file mode 100644 (file)
index 0000000..1376c67
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.math.IntMath.ceilingPowerOfTwo;
+import static com.google.common.math.IntMath.isPowerOfTwo;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Deque;
+import java.util.Iterator;
+import org.opendaylight.yangtools.concepts.Either;
+
+/**
+ * An {@link OutputStream} implementation which collects data is a series of {@code byte[]} chunks, each of which has
+ * a fixed maximum size. This is generally preferable to {@link ByteArrayOutputStream}, as that can result in huge
+ * byte arrays -- which can create unnecessary pressure on the GC (as well as lot of copying).
+ *
+ * <p>
+ * This class takes a different approach: it recognizes that result of buffering will be collected at some point, when
+ * the stream is already closed (and thus unmodifiable). Thus it splits the process into two steps:
+ * <ul>
+ *   <li>Data acquisition, during which we start with an initial (power-of-two) size and proceed to fill it up. Once the
+ *       buffer is full, we stash it, allocate a new buffer twice its size and repeat the process. Once we hit
+ *       {@code maxChunkSize}, we do not grow subsequent buffer. We also can skip some intermediate sizes if data
+ *       is introduced in large chunks via {@link #write(byte[], int, int)}.</li>
+ *   <li>Buffer consolidation, which occurs when the stream is {@link #close() closed}. At this point we construct the
+ *       final collection of buffers.</li>
+ * </ul>
+ *
+ * <p>
+ * The data acquisition strategy results in predictably-sized buffers, which are growing exponentially in size until
+ * they hit maximum size. Intrinsic property here is that the total capacity of chunks created during the ramp up is
+ * guaranteed to fit into {@code maxChunkSize}, hence they can readily be compacted into a single buffer, which replaces
+ * them. Combined with the requirement to trim the last buffer to have accurate length, this algorithm guarantees total
+ * number of internal copy operations is capped at {@code 2 * maxChunkSize}. The number of produced chunks is also
+ * well-controlled:
+ * <ul>
+ *   <li>for slowly-built data, we will maintain perfect packing</li>
+ *   <li>for fast-startup data, we will be at most one one chunk away from packing perfectly</li>
+ * </ul>
+ *
+ * @author Robert Varga
+ * @author Tomas Olvecky
+ */
+@Beta
+public final class ChunkedOutputStream extends OutputStream {
+    private static final int MIN_ARRAY_SIZE = 32;
+
+    private final int maxChunkSize;
+
+    // byte[] or a List
+    private Object result;
+    // Lazily-allocated to reduce pressure for single-chunk streams
+    private Deque<byte[]> prevChunks;
+
+    private byte[] currentChunk;
+    private int currentOffset;
+    private int size;
+
+    public ChunkedOutputStream(final int requestedInitialCapacity, final int maxChunkSize) {
+        checkArgument(isPowerOfTwo(maxChunkSize), "Maximum chunk size %s is not a power of two", maxChunkSize);
+        checkArgument(maxChunkSize > 0, "Maximum chunk size %s is not positive", maxChunkSize);
+        this.maxChunkSize = maxChunkSize;
+        currentChunk = new byte[initialCapacity(requestedInitialCapacity, maxChunkSize)];
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:ParameterName")
+    public void write(final int b) throws IOException {
+        checkNotClosed();
+        ensureOneByte();
+        currentChunk[currentOffset] = (byte) b;
+        currentOffset++;
+        size++;
+    }
+
+    @Override
+    @SuppressWarnings("checkstyle:ParameterName")
+    public void write(final byte[] b, final int off, final int len) throws IOException {
+        if (len < 0) {
+            throw new IndexOutOfBoundsException();
+        }
+        checkNotClosed();
+
+        int fromOffset = off;
+        int toCopy = len;
+
+        while (toCopy != 0) {
+            final int count = ensureMoreBytes(toCopy);
+            System.arraycopy(b, fromOffset, currentChunk, currentOffset, count);
+            currentOffset += count;
+            size += count;
+            fromOffset += count;
+            toCopy -= count;
+        }
+    }
+
+    @Override
+    public void close() {
+        if (result == null) {
+            result = computeResult();
+            prevChunks = null;
+            currentChunk = null;
+        }
+    }
+
+    public int size() {
+        return size;
+    }
+
+    public Either<byte[], ChunkedByteArray> toVariant() {
+        checkClosed();
+        return result instanceof byte[] bytes ? Either.ofFirst(bytes)
+                : Either.ofSecond(new ChunkedByteArray(size, (ImmutableList<byte[]>) result));
+    }
+
+    @VisibleForTesting
+    ChunkedByteArray toChunkedByteArray() {
+        checkClosed();
+        return new ChunkedByteArray(size, result instanceof byte[] bytes ? ImmutableList.of(bytes)
+            : (ImmutableList<byte[]>) result);
+    }
+
+    private Object computeResult() {
+        if (prevChunks == null) {
+            // Simple case: it's only the current buffer, return that
+            return trimChunk(currentChunk, currentOffset);
+        }
+        if (size <= maxChunkSize) {
+            // We have collected less than full chunk of data, let's have just one chunk ...
+            final byte[] singleChunk;
+            if (currentOffset == 0 && prevChunks.size() == 1) {
+                // ... which we have readily available
+                return prevChunks.getFirst();
+            }
+
+            // ... which we need to collect
+            singleChunk = new byte[size];
+            int offset = 0;
+            for (byte[] chunk : prevChunks) {
+                System.arraycopy(chunk, 0, singleChunk, offset, chunk.length);
+                offset += chunk.length;
+            }
+            System.arraycopy(currentChunk, 0, singleChunk, offset, currentOffset);
+            return singleChunk;
+        }
+
+        // Determine number of chunks to aggregate and their required storage. Normally storage would be MAX_ARRAY_SIZE,
+        // but we can have faster-than-exponential startup, which ends up needing less storage -- and we do not want to
+        // end up trimming this array.
+        int headSize = 0;
+        int headCount = 0;
+        final Iterator<byte[]> it = prevChunks.iterator();
+        do {
+            final byte[] chunk = it.next();
+            if (chunk.length == maxChunkSize) {
+                break;
+            }
+
+            headSize += chunk.length;
+            headCount++;
+        } while (it.hasNext());
+
+        // Compact initial chunks into a single one
+        final byte[] head = new byte[headSize];
+        int offset = 0;
+        for (int i = 0; i < headCount; ++i) {
+            final byte[] chunk = prevChunks.removeFirst();
+            System.arraycopy(chunk, 0, head, offset, chunk.length);
+            offset += chunk.length;
+        }
+        verify(offset == head.length);
+        prevChunks.addFirst(head);
+
+        // Now append the current chunk if need be, potentially trimming it
+        if (currentOffset == 0) {
+            return ImmutableList.copyOf(prevChunks);
+        }
+
+        final Builder<byte[]> builder = ImmutableList.builderWithExpectedSize(prevChunks.size() + 1);
+        builder.addAll(prevChunks);
+        builder.add(trimChunk(currentChunk, currentOffset));
+        return builder.build();
+    }
+
+    // Ensure a single byte
+    private void ensureOneByte() {
+        if (currentChunk.length == currentOffset) {
+            nextChunk(nextChunkSize(currentChunk.length));
+        }
+    }
+
+    // Ensure more than one byte, returns the number of bytes available
+    private int ensureMoreBytes(final int requested) {
+        int available = currentChunk.length - currentOffset;
+        if (available == 0) {
+            nextChunk(nextChunkSize(currentChunk.length, requested));
+            available = currentChunk.length;
+        }
+        final int count = Math.min(requested, available);
+        verify(count > 0);
+        return count;
+    }
+
+    private void nextChunk(final int chunkSize) {
+        if (prevChunks == null) {
+            prevChunks = new ArrayDeque<>();
+        }
+
+        prevChunks.addLast(currentChunk);
+        currentChunk = new byte[chunkSize];
+        currentOffset = 0;
+    }
+
+    private void checkClosed() {
+        checkState(result != null, "Stream has not been closed yet");
+    }
+
+    private void checkNotClosed() throws IOException {
+        if (result != null) {
+            throw new IOException("Stream is already closed");
+        }
+    }
+
+    private int nextChunkSize(final int currentSize, final int requested) {
+        return currentSize == maxChunkSize || requested >= maxChunkSize
+                ? maxChunkSize : Math.max(currentSize * 2, ceilingPowerOfTwo(requested));
+    }
+
+    private int nextChunkSize(final int currentSize) {
+        return currentSize < maxChunkSize ? currentSize * 2 : maxChunkSize;
+    }
+
+    private static int initialCapacity(final int requestedSize, final int maxChunkSize) {
+        if (requestedSize < MIN_ARRAY_SIZE) {
+            return MIN_ARRAY_SIZE;
+        }
+        if (requestedSize > maxChunkSize) {
+            return maxChunkSize;
+        }
+        return ceilingPowerOfTwo(requestedSize);
+    }
+
+    private static byte[] trimChunk(final byte[] chunk, final int length) {
+        return chunk.length == length ? chunk : Arrays.copyOf(chunk, length);
+    }
+}
index 353a25156cc4940613a8a2a1a55b7ef8fb6c41fc..b00e4bee4e31a9853d574cf6d85fc260bf606922 100644 (file)
@@ -7,10 +7,6 @@
  */
 package org.opendaylight.controller.cluster.io;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import com.google.common.collect.Sets;
 import com.google.common.io.ByteSource;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.ByteArrayInputStream;
@@ -19,11 +15,11 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.lang.ref.Cleaner;
+import java.lang.ref.Cleaner.Cleanable;
 import java.nio.file.Files;
-import java.util.Iterator;
-import java.util.Set;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.ThreadSafe;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
 import org.slf4j.Logger;
@@ -35,22 +31,14 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-@ThreadSafe
 public class FileBackedOutputStream extends OutputStream {
     private static final Logger LOG = LoggerFactory.getLogger(FileBackedOutputStream.class);
 
     /**
-     * This stores the Cleanup PhantomReference instances statically. This is necessary because PhantomReferences
-     * need a hard reference so they're not garbage collected. Once finalized, the Cleanup PhantomReference removes
-     * itself from this map and thus becomes eligible for garbage collection.
+     * A Cleaner instance responsible for deleting any files which may be lost due to us not being cleaning up
+     * temporary files.
      */
-    @VisibleForTesting
-    static final Set<Cleanup> REFERENCE_CACHE = Sets.newConcurrentHashSet();
-
-    /**
-     * Used as the ReferenceQueue for the Cleanup PhantomReferences.
-     */
-    private static final FinalizableReferenceQueue REFERENCE_QUEUE = new FinalizableReferenceQueue();
+    private static final Cleaner FILE_CLEANER = Cleaner.create();
 
     private final int fileThreshold;
     private final String fileDirectory;
@@ -64,6 +52,9 @@ public class FileBackedOutputStream extends OutputStream {
     @GuardedBy("this")
     private File file;
 
+    @GuardedBy("this")
+    private Cleanable fileCleanup;
+
     @GuardedBy("this")
     private ByteSource source;
 
@@ -77,7 +68,7 @@ public class FileBackedOutputStream extends OutputStream {
      * @param fileDirectory the directory in which to create the file if needed. If null, the default temp file
      *                      location is used.
      */
-    public FileBackedOutputStream(int fileThreshold, @Nullable String fileDirectory) {
+    public FileBackedOutputStream(final int fileThreshold, @Nullable final String fileDirectory) {
         this.fileThreshold = fileThreshold;
         this.fileDirectory = fileDirectory;
     }
@@ -100,7 +91,7 @@ public class FileBackedOutputStream extends OutputStream {
                         if (file != null) {
                             return Files.newInputStream(file.toPath());
                         } else {
-                            return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount());
+                            return new ByteArrayInputStream(memory.buf(), 0, memory.count());
                         }
                     }
                 }
@@ -118,19 +109,19 @@ public class FileBackedOutputStream extends OutputStream {
     @Override
     @SuppressFBWarnings(value = "VO_VOLATILE_INCREMENT", justification = "Findbugs erroneously complains that the "
         + "increment of count needs to be atomic even though it is inside a synchronized block.")
-    public synchronized void write(int value) throws IOException {
+    public synchronized void write(final int value) throws IOException {
         possiblySwitchToFile(1);
         out.write(value);
         count++;
     }
 
     @Override
-    public synchronized void write(byte[] bytes) throws IOException {
+    public synchronized void write(final byte[] bytes) throws IOException {
         write(bytes, 0, bytes.length);
     }
 
     @Override
-    public synchronized void write(byte[] bytes, int off, int len) throws IOException {
+    public synchronized void write(final byte[] bytes, final int off, final int len) throws IOException {
         possiblySwitchToFile(len);
         out.write(bytes, off, len);
         count += len;
@@ -161,26 +152,15 @@ public class FileBackedOutputStream extends OutputStream {
      */
     public synchronized void cleanup() {
         LOG.debug("In cleanup");
-
         closeQuietly();
-
-        if (file != null) {
-            Iterator<Cleanup> iter = REFERENCE_CACHE.iterator();
-            while (iter.hasNext()) {
-                if (file.equals(iter.next().file)) {
-                    iter.remove();
-                    break;
-                }
-            }
-
-            LOG.debug("cleanup - deleting temp file {}", file);
-
-            deleteFile(file);
-            file = null;
+        if (fileCleanup != null) {
+            fileCleanup.clean();
         }
+        // Already deleted above
+        file = null;
     }
 
-    @GuardedBy("this")
+    @Holding("this")
     private void closeQuietly() {
         try {
             close();
@@ -192,48 +172,50 @@ public class FileBackedOutputStream extends OutputStream {
     /**
      * Checks if writing {@code len} bytes would go over threshold, and switches to file buffering if so.
      */
-    @GuardedBy("this")
-    private void possiblySwitchToFile(int len) throws IOException {
+    @Holding("this")
+    private void possiblySwitchToFile(final int len) throws IOException {
         if (out == null) {
             throw new IOException("Stream already closed");
         }
 
-        if (file == null && memory.getCount() + len > fileThreshold) {
-            File temp = File.createTempFile("FileBackedOutputStream", null,
+        if (file == null && memory.count() + len > fileThreshold) {
+            final File temp = File.createTempFile("FileBackedOutputStream", null,
                     fileDirectory == null ? null : new File(fileDirectory));
             temp.deleteOnExit();
+            final Cleaner.Cleanable cleanup = FILE_CLEANER.register(this, () -> deleteFile(temp));
 
-            LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.getCount() + len,
+            LOG.debug("Byte count {} has exceeded threshold {} - switching to file: {}", memory.count() + len,
                     fileThreshold, temp);
 
-            OutputStream transfer = null;
+            final OutputStream transfer;
             try {
                 transfer = Files.newOutputStream(temp.toPath());
-                transfer.write(memory.getBuffer(), 0, memory.getCount());
-                transfer.flush();
-
-                // We've successfully transferred the data; switch to writing to file
-                out = transfer;
-                file = temp;
-                memory = null;
-
-                new Cleanup(this, file);
-            } catch (IOException e) {
-                if (transfer != null) {
+                try {
+                    transfer.write(memory.buf(), 0, memory.count());
+                    transfer.flush();
+                } catch (IOException e) {
                     try {
                         transfer.close();
                     } catch (IOException ex) {
                         LOG.debug("Error closing temp file {}", temp, ex);
                     }
+                    throw e;
                 }
-
-                deleteFile(temp);
+            } catch (IOException e) {
+                cleanup.clean();
                 throw e;
             }
+
+            // We've successfully transferred the data; switch to writing to file
+            out = transfer;
+            file = temp;
+            fileCleanup = cleanup;
+            memory = null;
         }
     }
 
-    private static void deleteFile(File file) {
+    private static void deleteFile(final File file) {
+        LOG.debug("Deleting temp file {}", file);
         if (!file.delete()) {
             LOG.warn("Could not delete temp file {}", file);
         }
@@ -242,39 +224,13 @@ public class FileBackedOutputStream extends OutputStream {
     /**
      * ByteArrayOutputStream that exposes its internals for efficiency.
      */
-    private static class MemoryOutputStream extends ByteArrayOutputStream {
-        byte[] getBuffer() {
+    private static final class MemoryOutputStream extends ByteArrayOutputStream {
+        byte[] buf() {
             return buf;
         }
 
-        int getCount() {
+        int count() {
             return count;
         }
     }
-
-    /**
-     * PhantomReference that deletes the temp file when the FileBackedOutputStream is garbage collected.
-     */
-    private static class Cleanup extends FinalizablePhantomReference<FileBackedOutputStream> {
-        private final File file;
-
-        Cleanup(FileBackedOutputStream referent, File file) {
-            super(referent, REFERENCE_QUEUE);
-            this.file = file;
-
-            REFERENCE_CACHE.add(this);
-
-            LOG.debug("Added Cleanup for temp file {}", file);
-        }
-
-        @Override
-        public void finalizeReferent() {
-            LOG.debug("In finalizeReferent");
-
-            if (REFERENCE_CACHE.remove(this)) {
-                LOG.debug("finalizeReferent - deleting temp file {}", file);
-                deleteFile(file);
-            }
-        }
-    }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/InputOutputStreamFactory.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/InputOutputStreamFactory.java
new file mode 100644 (file)
index 0000000..01e1098
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.io.ByteSource;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import net.jpountz.lz4.LZ4FrameOutputStream;
+import org.eclipse.jdt.annotation.NonNull;
+
+@Beta
+public abstract class InputOutputStreamFactory {
+    InputOutputStreamFactory() {
+        // Hidden on purpose
+    }
+
+    public static @NonNull InputOutputStreamFactory simple() {
+        return PlainInputOutputStreamSupport.INSTANCE;
+    }
+
+    public static @NonNull InputOutputStreamFactory lz4(final String blockSize) {
+        return lz4(LZ4FrameOutputStream.BLOCKSIZE.valueOf("SIZE_" + blockSize));
+    }
+
+    public static @NonNull InputOutputStreamFactory lz4(final LZ4FrameOutputStream.BLOCKSIZE blockSize) {
+        return new LZ4InputOutputStreamSupport(requireNonNull(blockSize));
+    }
+
+    public abstract @NonNull InputStream createInputStream(ByteSource input) throws IOException;
+
+    public abstract @NonNull InputStream createInputStream(File file) throws IOException;
+
+    public abstract @NonNull OutputStream createOutputStream(File file) throws IOException;
+
+    public abstract @NonNull OutputStream wrapOutputStream(OutputStream output) throws IOException;
+
+    static @NonNull BufferedInputStream defaultCreateInputStream(final File file) throws FileNotFoundException {
+        return new BufferedInputStream(new FileInputStream(file));
+    }
+
+    static @NonNull BufferedOutputStream defaultCreateOutputStream(final File file) throws FileNotFoundException {
+        return new BufferedOutputStream(new FileOutputStream(file));
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/LZ4InputOutputStreamSupport.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/LZ4InputOutputStreamSupport.java
new file mode 100644 (file)
index 0000000..689b585
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import com.google.common.io.ByteSource;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import net.jpountz.lz4.LZ4Factory;
+import net.jpountz.lz4.LZ4FrameInputStream;
+import net.jpountz.lz4.LZ4FrameOutputStream;
+import net.jpountz.lz4.LZ4FrameOutputStream.FLG.Bits;
+import net.jpountz.xxhash.XXHashFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class LZ4InputOutputStreamSupport extends InputOutputStreamFactory {
+    private static final Logger LOG = LoggerFactory.getLogger(LZ4InputOutputStreamSupport.class);
+    private static final LZ4Factory LZ4_FACTORY = LZ4Factory.fastestInstance();
+    private static final XXHashFactory HASH_FACTORY = XXHashFactory.fastestInstance();
+
+    private final LZ4FrameOutputStream.BLOCKSIZE blocksize;
+
+    LZ4InputOutputStreamSupport(final LZ4FrameOutputStream.BLOCKSIZE blocksize) {
+        this.blocksize = blocksize;
+    }
+
+    @Override
+    public InputStream createInputStream(final ByteSource input) throws IOException {
+        final InputStream stream = input.openStream();
+        try {
+            return new LZ4FrameInputStream(stream, LZ4_FACTORY.safeDecompressor(), HASH_FACTORY.hash32());
+        } catch (IOException e) {
+            stream.close();
+            LOG.warn("Error loading with lz4 decompression, using default one", e);
+            return input.openBufferedStream();
+        }
+    }
+
+    @Override
+    public InputStream createInputStream(final File file) throws IOException {
+        final FileInputStream fileInput = new FileInputStream(file);
+        try {
+            return new LZ4FrameInputStream(fileInput, LZ4_FACTORY.safeDecompressor(), HASH_FACTORY.hash32());
+        } catch (IOException e) {
+            fileInput.close();
+            LOG.warn("Error loading file with lz4 decompression, using default one", e);
+            return defaultCreateInputStream(file);
+        }
+    }
+
+    @Override
+    public OutputStream createOutputStream(final File file) throws IOException {
+        return new LZ4FrameOutputStream(new FileOutputStream(file), blocksize, -1, LZ4_FACTORY.fastCompressor(),
+            HASH_FACTORY.hash32(), Bits.BLOCK_INDEPENDENCE);
+    }
+
+    @Override
+    public OutputStream wrapOutputStream(final OutputStream output) throws IOException {
+        return new LZ4FrameOutputStream(output, blocksize, -1, LZ4_FACTORY.fastCompressor(), HASH_FACTORY.hash32(),
+            Bits.BLOCK_INDEPENDENCE);
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/PlainInputOutputStreamSupport.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/io/PlainInputOutputStreamSupport.java
new file mode 100644 (file)
index 0000000..7287def
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import com.google.common.io.ByteSource;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import org.eclipse.jdt.annotation.NonNull;
+
+final class PlainInputOutputStreamSupport extends InputOutputStreamFactory {
+    static final @NonNull PlainInputOutputStreamSupport INSTANCE = new PlainInputOutputStreamSupport();
+
+    private PlainInputOutputStreamSupport() {
+        // Hidden on purpose
+    }
+
+    @Override
+    public InputStream createInputStream(final ByteSource input) throws IOException {
+        return input.openBufferedStream();
+    }
+
+    @Override
+    public InputStream createInputStream(final File file) throws IOException {
+        return defaultCreateInputStream(file);
+    }
+
+    @Override
+    public OutputStream createOutputStream(final File file) throws IOException {
+        return defaultCreateOutputStream(file);
+    }
+
+    @Override
+    public OutputStream wrapOutputStream(final OutputStream output) throws IOException {
+        return output;
+    }
+}
index b136ed7086d399b028a2a77252b2dce5be4544c7..9df2fd4d3276765634ec859a9e3fbfcdf724b0b2 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.messaging;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -26,7 +27,7 @@ class AbortSlicing implements Serializable {
     private final Identifier identifier;
 
     AbortSlicing(final Identifier identifier) {
-        this.identifier = Preconditions.checkNotNull(identifier);
+        this.identifier = requireNonNull(identifier);
     }
 
     Identifier getIdentifier() {
@@ -53,17 +54,17 @@ class AbortSlicing implements Serializable {
         public Proxy() {
         }
 
-        Proxy(AbortSlicing abortSlicing) {
+        Proxy(final AbortSlicing abortSlicing) {
             this.abortSlicing = abortSlicing;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeObject(abortSlicing.identifier);
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             abortSlicing = new AbortSlicing((Identifier) in.readObject());
         }
 
index 16c73c715563fc7b6a2eb7dcab39db916b0ba34e..842fdafed4ed56a805cd9e026856e2e74ea7e4c4 100644 (file)
@@ -12,7 +12,6 @@ import com.google.common.io.ByteSource;
 import java.io.BufferedOutputStream;
 import java.io.IOException;
 import java.util.Arrays;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStreamFactory;
 import org.opendaylight.yangtools.concepts.Identifier;
@@ -20,11 +19,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Maintains the state of an assembled message.
+ * Maintains the state of an assembled message. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  */
-@NotThreadSafe
 public class AssembledMessageState implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(AssembledMessageState.class);
 
index 297186d9f7c6bd9f13f8c6bea68ae33274931836..2b41bc595fad01f98f0969425fa11830316714f6 100644 (file)
@@ -42,11 +42,11 @@ public final  class MessageAssembler implements AutoCloseable {
     private final String logContext;
 
     MessageAssembler(final Builder builder) {
-        this.fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
+        fileBackedStreamFactory = requireNonNull(builder.fileBackedStreamFactory,
                 "FiledBackedStreamFactory cannot be null");
-        this.assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
+        assembledMessageCallback = requireNonNull(builder.assembledMessageCallback,
                 "assembledMessageCallback cannot be null");
-        this.logContext = builder.logContext;
+        logContext = builder.logContext;
 
         stateCache = CacheBuilder.newBuilder()
                 .expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit)
@@ -97,13 +97,13 @@ public final  class MessageAssembler implements AutoCloseable {
      * @return true if the message was handled, false otherwise
      */
     public boolean handleMessage(final Object message, final @NonNull ActorRef sendTo) {
-        if (message instanceof MessageSlice) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            onMessageSlice((MessageSlice) message, sendTo);
+        if (message instanceof MessageSlice messageSlice) {
+            LOG.debug("{}: handleMessage: {}", logContext, messageSlice);
+            onMessageSlice(messageSlice, sendTo);
             return true;
-        } else if (message instanceof AbortSlicing) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            onAbortSlicing((AbortSlicing) message);
+        } else if (message instanceof AbortSlicing abortSlicing) {
+            LOG.debug("{}: handleMessage: {}", logContext, abortSlicing);
+            onAbortSlicing(abortSlicing);
             return true;
         }
 
@@ -116,14 +116,9 @@ public final  class MessageAssembler implements AutoCloseable {
             final AssembledMessageState state = stateCache.get(identifier, () -> createState(messageSlice));
             processMessageSliceForState(messageSlice, state, sendTo);
         } catch (ExecutionException e) {
-            final MessageSliceException messageSliceEx;
             final Throwable cause = e.getCause();
-            if (cause instanceof MessageSliceException) {
-                messageSliceEx = (MessageSliceException) cause;
-            } else {
-                messageSliceEx = new MessageSliceException(String.format(
-                        "Error creating state for identifier %s", identifier), cause);
-            }
+            final MessageSliceException messageSliceEx = cause instanceof MessageSliceException sliceEx ? sliceEx
+                : new MessageSliceException(String.format("Error creating state for identifier %s", identifier), cause);
 
             messageSlice.getReplyTo().tell(MessageSliceReply.failed(identifier, messageSliceEx, sendTo),
                     ActorRef.noSender());
@@ -231,7 +226,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
-            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
             return this;
         }
 
@@ -243,7 +238,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder assembledMessageCallback(final BiConsumer<Object, ActorRef> newAssembledMessageCallback) {
-            this.assembledMessageCallback = newAssembledMessageCallback;
+            assembledMessageCallback = newAssembledMessageCallback;
             return this;
         }
 
@@ -258,8 +253,8 @@ public final  class MessageAssembler implements AutoCloseable {
          */
         public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
             checkArgument(duration > 0, "duration must be > 0");
-            this.expireStateAfterInactivityDuration = duration;
-            this.expireStateAfterInactivityUnit = unit;
+            expireStateAfterInactivityDuration = duration;
+            expireStateAfterInactivityUnit = unit;
             return this;
         }
 
@@ -270,7 +265,7 @@ public final  class MessageAssembler implements AutoCloseable {
          * @return this Builder
          */
         public Builder logContext(final String newLogContext) {
-            this.logContext = newLogContext;
+            logContext = newLogContext;
             return this;
         }
 
index ae711df74a88ceb5e9cfe20fdf4428c7d8efa70d..50e0460836c8a7b409fa2c21b265fb6944737a6e 100644 (file)
@@ -7,10 +7,11 @@
  */
 package org.opendaylight.controller.cluster.messaging;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.serialization.JavaSerializer;
 import akka.serialization.Serialization;
-import com.google.common.base.Preconditions;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
@@ -34,14 +35,14 @@ public class MessageSlice implements Serializable {
     private final int lastSliceHashCode;
     private final ActorRef replyTo;
 
-    MessageSlice(Identifier identifier, byte[] data, int sliceIndex, int totalSlices, int lastSliceHashCode,
-            final ActorRef replyTo) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.data = Preconditions.checkNotNull(data);
+    MessageSlice(final Identifier identifier, final byte[] data, final int sliceIndex, final int totalSlices,
+            final int lastSliceHashCode, final ActorRef replyTo) {
+        this.identifier = requireNonNull(identifier);
+        this.data = requireNonNull(data);
         this.sliceIndex = sliceIndex;
         this.totalSlices = totalSlices;
         this.lastSliceHashCode = lastSliceHashCode;
-        this.replyTo = Preconditions.checkNotNull(replyTo);
+        this.replyTo = requireNonNull(replyTo);
     }
 
     public Identifier getIdentifier() {
@@ -93,12 +94,12 @@ public class MessageSlice implements Serializable {
         public Proxy() {
         }
 
-        Proxy(MessageSlice messageSlice) {
+        Proxy(final MessageSlice messageSlice) {
             this.messageSlice = messageSlice;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeObject(messageSlice.identifier);
             out.writeInt(messageSlice.sliceIndex);
             out.writeInt(messageSlice.totalSlices);
@@ -108,7 +109,7 @@ public class MessageSlice implements Serializable {
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             Identifier identifier = (Identifier) in.readObject();
             int sliceIndex = in.readInt();
             int totalSlices = in.readInt();
index f81cbb00a11085b95fd8228d7b09e7c48825f92b..852e7f9b19212f8b6a5ac1da6e22db2ef6bb6607 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.messaging;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -34,7 +35,7 @@ final class MessageSliceIdentifier implements Identifier {
     }
 
     private MessageSliceIdentifier(final Identifier clientIdentifier, final long slicerId, final long messageId) {
-        this.clientIdentifier = Preconditions.checkNotNull(clientIdentifier);
+        this.clientIdentifier = requireNonNull(clientIdentifier);
         this.messageId = messageId;
         this.slicerId = slicerId;
     }
@@ -58,18 +59,10 @@ final class MessageSliceIdentifier implements Identifier {
     }
 
     @Override
-    public boolean equals(Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (!(obj instanceof MessageSliceIdentifier)) {
-            return false;
-        }
-
-        MessageSliceIdentifier other = (MessageSliceIdentifier) obj;
-        return other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
-                && other.messageId == messageId;
+    public boolean equals(final Object obj) {
+        return this == obj || obj instanceof MessageSliceIdentifier other
+            && other.clientIdentifier.equals(clientIdentifier) && other.slicerId == slicerId
+            && other.messageId == messageId;
     }
 
     @Override
@@ -93,18 +86,18 @@ final class MessageSliceIdentifier implements Identifier {
         public Proxy() {
         }
 
-        Proxy(MessageSliceIdentifier messageSliceId) {
+        Proxy(final MessageSliceIdentifier messageSliceId) {
             this.messageSliceId = messageSliceId;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
+        public void writeExternal(final ObjectOutput out) throws IOException {
             out.writeObject(messageSliceId.clientIdentifier);
             WritableObjects.writeLongs(out, messageSliceId.slicerId, messageSliceId.messageId);
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
             final Identifier clientIdentifier = (Identifier) in.readObject();
             final byte header = WritableObjects.readLongHeader(in);
             final long slicerId =  WritableObjects.readFirstLong(in, header);
index bfcfdede850bdd3a273e72d7c8865e164b3a5040..e820c4ba3f881c57a821cfd706ecd5fd12504b66 100644 (file)
@@ -7,10 +7,11 @@
  */
 package org.opendaylight.controller.cluster.messaging;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.serialization.JavaSerializer;
 import akka.serialization.Serialization;
-import com.google.common.base.Preconditions;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -34,9 +35,9 @@ public final class MessageSliceReply implements Serializable {
 
     private MessageSliceReply(final Identifier identifier, final int sliceIndex, final MessageSliceException failure,
             final ActorRef sendTo) {
-        this.identifier = Preconditions.checkNotNull(identifier);
+        this.identifier = requireNonNull(identifier);
         this.sliceIndex = sliceIndex;
-        this.sendTo = Preconditions.checkNotNull(sendTo);
+        this.sendTo = requireNonNull(sendTo);
         this.failure = failure;
     }
 
index 57a6f9ed4f44d48ffb2720126b32c21627222b3a..f30dbc66a71966f3691bf57a471a730aaa3b986b 100644 (file)
@@ -48,12 +48,12 @@ public class MessageSlicer implements AutoCloseable {
     private final long id;
 
     MessageSlicer(final Builder builder) {
-        this.fileBackedStreamFactory = builder.fileBackedStreamFactory;
-        this.messageSliceSize = builder.messageSliceSize;
-        this.maxSlicingTries = builder.maxSlicingTries;
+        fileBackedStreamFactory = builder.fileBackedStreamFactory;
+        messageSliceSize = builder.messageSliceSize;
+        maxSlicingTries = builder.maxSlicingTries;
 
         id = SLICER_ID_COUNTER.getAndIncrement();
-        this.logContext = builder.logContext + "_slicer-id-" + id;
+        logContext = builder.logContext + "_slicer-id-" + id;
 
         CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder =
                 CacheBuilder.newBuilder().removalListener(this::stateRemoved);
@@ -174,9 +174,9 @@ public class MessageSlicer implements AutoCloseable {
      * @return true if the message was handled, false otherwise
      */
     public boolean handleMessage(final Object message) {
-        if (message instanceof MessageSliceReply) {
-            LOG.debug("{}: handleMessage: {}", logContext, message);
-            return onMessageSliceReply((MessageSliceReply) message);
+        if (message instanceof MessageSliceReply sliceReply) {
+            LOG.debug("{}: handleMessage: {}", logContext, sliceReply);
+            return onMessageSliceReply(sliceReply);
         }
 
         return false;
@@ -219,8 +219,7 @@ public class MessageSlicer implements AutoCloseable {
 
     private boolean onMessageSliceReply(final MessageSliceReply reply) {
         final Identifier identifier = reply.getIdentifier();
-        if (!(identifier instanceof MessageSliceIdentifier)
-                || ((MessageSliceIdentifier)identifier).getSlicerId() != id) {
+        if (!(identifier instanceof MessageSliceIdentifier sliceIdentifier) || sliceIdentifier.getSlicerId() != id) {
             return false;
         }
 
@@ -236,7 +235,7 @@ public class MessageSlicer implements AutoCloseable {
                 final Optional<MessageSliceException> failure = reply.getFailure();
                 if (failure.isPresent()) {
                     LOG.warn("{}: Received failed {}", logContext, reply);
-                    processMessageSliceException(failure.get(), state, reply.getSendTo());
+                    processMessageSliceException(failure.orElseThrow(), state, reply.getSendTo());
                     return true;
                 }
 
@@ -336,7 +335,7 @@ public class MessageSlicer implements AutoCloseable {
          * @return this Builder
          */
         public Builder fileBackedStreamFactory(final FileBackedOutputStreamFactory newFileBackedStreamFactory) {
-            this.fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
+            fileBackedStreamFactory = requireNonNull(newFileBackedStreamFactory);
             return this;
         }
 
@@ -348,7 +347,7 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder messageSliceSize(final int newMessageSliceSize) {
             checkArgument(newMessageSliceSize > 0, "messageSliceSize must be > 0");
-            this.messageSliceSize = newMessageSliceSize;
+            messageSliceSize = newMessageSliceSize;
             return this;
         }
 
@@ -361,7 +360,7 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder maxSlicingTries(final int newMaxSlicingTries) {
             checkArgument(newMaxSlicingTries > 0, "newMaxSlicingTries must be > 0");
-            this.maxSlicingTries = newMaxSlicingTries;
+            maxSlicingTries = newMaxSlicingTries;
             return this;
         }
 
@@ -376,8 +375,8 @@ public class MessageSlicer implements AutoCloseable {
          */
         public Builder expireStateAfterInactivity(final long duration, final TimeUnit unit) {
             checkArgument(duration > 0, "duration must be > 0");
-            this.expireStateAfterInactivityDuration = duration;
-            this.expireStateAfterInactivityUnit = unit;
+            expireStateAfterInactivityDuration = duration;
+            expireStateAfterInactivityUnit = unit;
             return this;
         }
 
@@ -388,7 +387,7 @@ public class MessageSlicer implements AutoCloseable {
          * @return this Builder
          */
         public Builder logContext(final String newLogContext) {
-            this.logContext = requireNonNull(newLogContext);
+            logContext = requireNonNull(newLogContext);
             return this;
         }
 
index 6bae647cb7a43a1ef000961b48333104f7f6857a..9ba69fb65580fdf4da1960960dbdb9031baa4669 100644 (file)
@@ -7,9 +7,11 @@
  */
 package org.opendaylight.controller.cluster.messaging;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
 import java.io.Serializable;
 import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
@@ -174,23 +176,23 @@ public final class SliceOptions {
         public SliceOptions build() {
             sealed = true;
 
-            Preconditions.checkNotNull(identifier, "identifier must be set");
-            Preconditions.checkNotNull(replyTo, "replyTo must be set");
-            Preconditions.checkNotNull(onFailureCallback, "onFailureCallback must be set");
-            Preconditions.checkState(fileBackedStream == null || message == null,
+            requireNonNull(identifier, "identifier must be set");
+            requireNonNull(replyTo, "replyTo must be set");
+            requireNonNull(onFailureCallback, "onFailureCallback must be set");
+            checkState(fileBackedStream == null || message == null,
                     "Only one of message and fileBackedStream can be set");
-            Preconditions.checkState(!(fileBackedStream == null && message == null),
+            checkState(!(fileBackedStream == null && message == null),
                     "One of message and fileBackedStream must be set");
-            Preconditions.checkState(sendToRef == null || sendToSelection == null,
+            checkState(sendToRef == null || sendToSelection == null,
                     "Only one of sendToRef and sendToSelection can be set");
-            Preconditions.checkState(!(sendToRef == null && sendToSelection == null),
+            checkState(!(sendToRef == null && sendToSelection == null),
                     "One of sendToRef and sendToSelection must be set");
 
             return new SliceOptions(this);
         }
 
         protected void checkSealed() {
-            Preconditions.checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
+            checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
         }
     }
 }
index 8c3cb51713d53b3be32a76702c3f85a748aa540b..5be3fa46d2bcbbb5e2d7c1544702d85e23cc71cf 100644 (file)
@@ -12,19 +12,17 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.Arrays;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.io.FileBackedOutputStream;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Maintains the state of a sliced message.
+ * Maintains the state of a sliced message. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  * @see MessageSlicer
  */
-@NotThreadSafe
 public class SlicedMessageState<T> implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(SlicedMessageState.class);
 
@@ -153,7 +151,7 @@ public class SlicedMessageState<T> implements AutoCloseable {
      * @param index the slice index to test
      * @return true if the index is the last slice, false otherwise
      */
-    public boolean isLastSlice(int index) {
+    public boolean isLastSlice(final int index) {
         return totalSlices == index;
     }
 
index caa1a8debf53911bd2193c085c36a64a5aea2117..373823ef0ffeaf0ac37140aab1d437c7e4753979 100644 (file)
@@ -18,11 +18,12 @@ import org.eclipse.jdt.annotation.Nullable;
  * @author Thomas Pantelis
  */
 public class LeaderStateChanged {
-    private final String memberId;
-    private final String leaderId;
+    private final @NonNull String memberId;
+    private final @Nullable String leaderId;
     private final short leaderPayloadVersion;
 
-    public LeaderStateChanged(@NonNull String memberId, @Nullable String leaderId, short leaderPayloadVersion) {
+    public LeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final short leaderPayloadVersion) {
         this.memberId = requireNonNull(memberId);
         this.leaderId = leaderId;
         this.leaderPayloadVersion = leaderPayloadVersion;
index 89f9dd12e050aab2e99da50cf1e945a2fccf4ab5..ed0c10a7172b8edeb759819f8ddf1dc861b87ea1 100644 (file)
@@ -5,14 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.notifications;
 
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.serialization.Serialization;
-import com.google.common.collect.Maps;
+import java.util.HashMap;
 import java.util.Map;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 
@@ -26,11 +25,11 @@ import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 public class RoleChangeNotifier extends AbstractUntypedActor implements AutoCloseable {
 
     private final String memberId;
-    private final Map<ActorPath, ActorRef> registeredListeners = Maps.newHashMap();
+    private final Map<ActorPath, ActorRef> registeredListeners = new HashMap<>();
     private RoleChangeNotification latestRoleChangeNotification = null;
     private LeaderStateChanged latestLeaderStateChanged;
 
-    public RoleChangeNotifier(String memberId) {
+    public RoleChangeNotifier(final String memberId) {
         this.memberId = memberId;
     }
 
@@ -46,7 +45,7 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos
     }
 
     @Override
-    protected void handleReceive(Object message) {
+    protected void handleReceive(final Object message) {
         if (message instanceof RegisterRoleChangeListener) {
             // register listeners for this shard
 
@@ -73,9 +72,8 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos
             }
 
 
-        } else if (message instanceof RoleChanged) {
+        } else if (message instanceof RoleChanged roleChanged) {
             // this message is sent by RaftActor. Notify registered listeners when this message is received.
-            RoleChanged roleChanged = (RoleChanged) message;
 
             LOG.info("RoleChangeNotifier for {} , received role change from {} to {}", memberId,
                 roleChanged.getOldRole(), roleChanged.getNewRole());
@@ -84,13 +82,13 @@ public class RoleChangeNotifier extends AbstractUntypedActor implements AutoClos
                 new RoleChangeNotification(roleChanged.getMemberId(),
                     roleChanged.getOldRole(), roleChanged.getNewRole());
 
-            for (ActorRef listener: registeredListeners.values()) {
+            for (ActorRef listener : registeredListeners.values()) {
                 listener.tell(latestRoleChangeNotification, getSelf());
             }
-        } else if (message instanceof LeaderStateChanged) {
-            latestLeaderStateChanged = (LeaderStateChanged)message;
+        } else if (message instanceof LeaderStateChanged leaderStateChanged) {
+            latestLeaderStateChanged = leaderStateChanged;
 
-            for (ActorRef listener: registeredListeners.values()) {
+            for (ActorRef listener : registeredListeners.values()) {
                 listener.tell(latestLeaderStateChanged, getSelf());
             }
         } else {
index 76babc15db7d831403e985c658e3a3cef46f3e99..c7963056753c43a0062440e016dc80b465c79549 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.persistence;
 
+import static com.google.common.base.Preconditions.checkArgument;
+
 import akka.actor.ExtendedActorSystem;
 import akka.dispatch.Futures;
 import akka.persistence.SelectedSnapshot;
@@ -22,12 +24,10 @@ import com.typesafe.config.Config;
 import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
-import java.io.UnsupportedEncodingException;
 import java.net.URLDecoder;
 import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
@@ -45,6 +45,7 @@ import java.util.stream.Collector;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.io.InputOutputStreamFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.ExecutionContext;
@@ -58,22 +59,31 @@ import scala.concurrent.Future;
  *
  * @author Thomas Pantelis
  */
-public class LocalSnapshotStore extends SnapshotStore {
+public final class LocalSnapshotStore extends SnapshotStore {
     private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class);
-
     private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length();
 
+    private final InputOutputStreamFactory streamFactory;
     private final ExecutionContext executionContext;
     private final int maxLoadAttempts;
     private final File snapshotDir;
 
     public LocalSnapshotStore(final Config config) {
-        this.executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
+        executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
         snapshotDir = new File(config.getString("dir"));
 
-        int localMaxLoadAttempts = config.getInt("max-load-attempts");
+        final int localMaxLoadAttempts = config.getInt("max-load-attempts");
         maxLoadAttempts = localMaxLoadAttempts > 0 ? localMaxLoadAttempts : 1;
 
+        if (config.getBoolean("use-lz4-compression")) {
+            final String size = config.getString("lz4-blocksize");
+            streamFactory = InputOutputStreamFactory.lz4(size);
+            LOG.debug("Using LZ4 Input/Output Stream, blocksize: {}", size);
+        } else {
+            streamFactory = InputOutputStreamFactory.simple();
+            LOG.debug("Using plain Input/Output Stream");
+        }
+
         LOG.debug("LocalSnapshotStore ctor: snapshotDir: {}, maxLoadAttempts: {}", snapshotDir, maxLoadAttempts);
     }
 
@@ -137,7 +147,7 @@ public class LocalSnapshotStore extends SnapshotStore {
     private Object deserialize(final File file) throws IOException {
         return JavaSerializer.currentSystem().withValue((ExtendedActorSystem) context().system(),
             (Callable<Object>) () -> {
-                try (ObjectInputStream in = new ObjectInputStream(new FileInputStream(file))) {
+                try (ObjectInputStream in = new ObjectInputStream(streamFactory.createInputStream(file))) {
                     return in.readObject();
                 } catch (ClassNotFoundException e) {
                     throw new IOException("Error loading snapshot file " + file, e);
@@ -175,7 +185,7 @@ public class LocalSnapshotStore extends SnapshotStore {
 
         LOG.debug("Saving to temp file: {}", temp);
 
-        try (ObjectOutputStream out = new ObjectOutputStream(new FileOutputStream(temp))) {
+        try (ObjectOutputStream out = new ObjectOutputStream(streamFactory.createOutputStream(temp))) {
             out.writeObject(snapshot);
         } catch (IOException e) {
             LOG.error("Error saving snapshot file {}. Deleting file..", temp, e);
@@ -223,7 +233,13 @@ public class LocalSnapshotStore extends SnapshotStore {
 
         LOG.debug("Deleting files: {}", files);
 
-        files.forEach(File::delete);
+        files.forEach(file -> {
+            try {
+                Files.delete(file.toPath());
+            } catch (IOException | SecurityException e) {
+                LOG.error("Unable to delete snapshot file: {}, persistenceId: {} ", file, persistenceId);
+            }
+        });
         return null;
     }
 
@@ -232,7 +248,13 @@ public class LocalSnapshotStore extends SnapshotStore {
 
         LOG.debug("Deleting files: {}", files);
 
-        files.forEach(File::delete);
+        files.forEach(file -> {
+            try {
+                Files.delete(file.toPath());
+            } catch (IOException | SecurityException e) {
+                LOG.error("Unable to delete snapshot file: {}", file);
+            }
+        });
         return null;
     }
 
@@ -308,30 +330,18 @@ public class LocalSnapshotStore extends SnapshotStore {
     }
 
     private static String encode(final String str) {
-        try {
-            return URLEncoder.encode(str, StandardCharsets.UTF_8.name());
-        } catch (UnsupportedEncodingException e) {
-            // Shouldn't happen
-            LOG.warn("Error encoding {}", str, e);
-            return str;
-        }
+        return URLEncoder.encode(str, StandardCharsets.UTF_8);
     }
 
     private static String decode(final String str) {
-        try {
-            return URLDecoder.decode(str, StandardCharsets.UTF_8.name());
-        } catch (final UnsupportedEncodingException e) {
-            // Shouldn't happen
-            LOG.warn("Error decoding {}", str, e);
-            return str;
-        }
+        return URLDecoder.decode(str, StandardCharsets.UTF_8);
     }
 
     @VisibleForTesting
     static int compare(final SnapshotMetadata m1, final SnapshotMetadata m2) {
-        return (int) (!m1.persistenceId().equals(m2.persistenceId())
-                ? m1.persistenceId().compareTo(m2.persistenceId()) :
-            m1.sequenceNr() != m2.sequenceNr() ? m1.sequenceNr() - m2.sequenceNr() :
-                m1.timestamp() != m2.timestamp() ? m1.timestamp() - m2.timestamp() : 0);
+        checkArgument(m1.persistenceId().equals(m2.persistenceId()),
+                "Persistence id does not match. id1: %s, id2: %s", m1.persistenceId(), m2.persistenceId());
+        final int cmp = Long.compare(m1.timestamp(), m2.timestamp());
+        return cmp != 0 ? cmp : Long.compare(m1.sequenceNr(), m2.sequenceNr());
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java
deleted file mode 100644 (file)
index b970ba4..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
-
-/**
- * An instance of a Payload class is meant to be used as the Payload for
- * AppendEntries.
- *
- * <p>
- * When an actor which is derived from RaftActor attempts to persistData it
- * must pass an instance of the Payload class. Similarly when state needs to
- * be applied to the derived RaftActor it will be passed an instance of the
- * Payload class.
- */
-public abstract class Payload {
-    public abstract int size();
-}
index 03b44b585129e97f199310b955272c57e8a055e8..c8ceb13ed97d429ef8dab7b1be26fe0a7b7ba2e7 100644 (file)
@@ -11,7 +11,7 @@ import com.google.common.annotations.Beta;
 import java.util.Set;
 import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.schema.provider.impl.YangTextSchemaSourceSerializationProxy;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
 import scala.concurrent.Future;
 
 /**
index 3208e390a44104e2c245ca8c47b448bb175611d4..7a53188718e904115c99e2d4474543b90f802b94 100644 (file)
@@ -5,70 +5,53 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
 import akka.dispatch.OnComplete;
 import com.google.common.annotations.Beta;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.ExecutionContext;
-import scala.concurrent.Future;
 
 /**
  * Provides schema sources from {@link RemoteYangTextSourceProvider}.
  */
 @Beta
-public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSchemaSource> {
-
+public class RemoteSchemaProvider implements SchemaSourceProvider<YangTextSource> {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteSchemaProvider.class);
 
     private final RemoteYangTextSourceProvider remoteRepo;
     private final ExecutionContext executionContext;
 
-    private static final ExceptionMapper<SchemaSourceException> MAPPER = new ExceptionMapper<SchemaSourceException>(
-            "schemaDownload", SchemaSourceException.class) {
-        @Override
-        protected SchemaSourceException newWithCause(final String message, final Throwable throwable) {
-            return new SchemaSourceException(message, throwable);
-        }
-    };
-
-    public RemoteSchemaProvider(RemoteYangTextSourceProvider remoteRepo, ExecutionContext executionContext) {
+    public RemoteSchemaProvider(final RemoteYangTextSourceProvider remoteRepo,
+            final ExecutionContext executionContext) {
         this.remoteRepo = remoteRepo;
         this.executionContext = executionContext;
     }
 
     @Override
-    public CheckedFuture<YangTextSchemaSource, SchemaSourceException> getSource(SourceIdentifier sourceIdentifier) {
-        LOG.trace("Getting yang schema source for {}", sourceIdentifier.getName());
+    public ListenableFuture<YangTextSource> getSource(final SourceIdentifier sourceIdentifier) {
+        LOG.trace("Getting yang schema source for {}", sourceIdentifier.name().getLocalName());
 
-        Future<YangTextSchemaSourceSerializationProxy> result = remoteRepo.getYangTextSchemaSource(sourceIdentifier);
-
-        final SettableFuture<YangTextSchemaSource> res = SettableFuture.create();
-        result.onComplete(new OnComplete<YangTextSchemaSourceSerializationProxy>() {
+        final var res = SettableFuture.<YangTextSource>create();
+        remoteRepo.getYangTextSchemaSource(sourceIdentifier).onComplete(new OnComplete<>() {
             @Override
-            public void onComplete(Throwable throwable,
-                    YangTextSchemaSourceSerializationProxy yangTextSchemaSourceSerializationProxy) {
-                if (yangTextSchemaSourceSerializationProxy != null) {
-                    res.set(yangTextSchemaSourceSerializationProxy.getRepresentation());
+            public void onComplete(final Throwable failure, final YangTextSchemaSourceSerializationProxy success) {
+                if (success != null) {
+                    res.set(success.getRepresentation());
                 }
-                if (throwable != null) {
-                    res.setException(throwable);
+                if (failure != null) {
+                    res.setException(failure);
                 }
             }
-
         }, executionContext);
 
-        return Futures.makeChecked(res, MAPPER);
+        return res;
     }
 }
index 5e88952dda26f2e2ad670c202f6f67b0e795e965..eea0aa86071d115e04f4bfbd23b83f8fa29a066d 100644 (file)
@@ -17,9 +17,9 @@ import com.google.common.util.concurrent.MoreExecutors;
 import java.io.IOException;
 import java.util.Set;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -51,16 +51,16 @@ public class RemoteYangTextSourceProviderImpl implements RemoteYangTextSourcePro
         LOG.trace("Sending yang schema source for {}", identifier);
 
         final Promise<YangTextSchemaSourceSerializationProxy> promise = akka.dispatch.Futures.promise();
-        ListenableFuture<YangTextSchemaSource> future =
-                repository.getSchemaSource(identifier, YangTextSchemaSource.class);
+        ListenableFuture<YangTextSource> future =
+                repository.getSchemaSource(identifier, YangTextSource.class);
 
-        Futures.addCallback(future, new FutureCallback<YangTextSchemaSource>() {
+        Futures.addCallback(future, new FutureCallback<YangTextSource>() {
             @Override
-            public void onSuccess(final YangTextSchemaSource result) {
+            public void onSuccess(final YangTextSource result) {
                 try {
                     promise.success(new YangTextSchemaSourceSerializationProxy(result));
                 } catch (IOException e) {
-                    LOG.warn("Unable to read schema source for {}", result.getIdentifier(), e);
+                    LOG.warn("Unable to read schema source for {}", result.sourceId(), e);
                     promise.failure(e);
                 }
             }
index 202de58a2780fd52efc401b0779a54e287f731b8..9ad9948e6c86c4ac0272cc04be5c8860dae11348 100644 (file)
@@ -5,36 +5,38 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
 import com.google.common.annotations.Beta;
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import java.io.IOException;
 import java.io.Serializable;
 import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.common.UnresolvedQName.Unqualified;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 /**
- * {@link org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource} serialization proxy.
+ * {@link YangTextSource} serialization proxy.
  */
 @Beta
 public class YangTextSchemaSourceSerializationProxy implements Serializable {
     private static final long serialVersionUID = -6361268518176019477L;
 
-    private final byte[] schemaSource;
+    private final String schemaSource;
     private final Revision revision;
     private final String name;
 
-    public YangTextSchemaSourceSerializationProxy(final YangTextSchemaSource source) throws IOException {
-        this.revision = source.getIdentifier().getRevision().orElse(null);
-        this.name = source.getIdentifier().getName();
-        this.schemaSource = source.read();
+    public YangTextSchemaSourceSerializationProxy(final YangTextSource source) throws IOException {
+        final var sourceId = source.sourceId();
+        revision = sourceId.revision();
+        name = sourceId.name().getLocalName();
+        schemaSource = source.read();
     }
 
-    public YangTextSchemaSource getRepresentation() {
-        return YangTextSchemaSource.delegateForByteSource(
-                RevisionSourceIdentifier.create(name, revision), ByteSource.wrap(schemaSource));
+    public YangTextSource getRepresentation() {
+        return new DelegatedYangTextSource(new SourceIdentifier(Unqualified.of(name), revision),
+            CharSource.wrap(schemaSource));
     }
 }
index 3bead64be8ec62dac7c4e763aad690468d8a998a..f1c3eae731da0543bd030f91856a3e64e2e6630a 100644 (file)
@@ -10,11 +10,11 @@ package org.opendaylight.controller.cluster.common.actor;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 
 import com.google.common.testing.FakeTicker;
 import java.util.List;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -56,9 +56,8 @@ public class MessageTrackerTest {
         ticker.advance(20, MILLISECONDS);
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
-        Assert.assertEquals(true, context2.error().isPresent());
-        Assert.assertEquals(0, context2.error().get().getMessageProcessingTimesSinceLastExpectedMessage().size());
-
+        assertEquals(true, context2.error().isPresent());
+        assertEquals(0, context2.error().orElseThrow().getMessageProcessingTimesSinceLastExpectedMessage().size());
     }
 
     @Test
@@ -78,21 +77,21 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context2.error().isPresent());
+        assertEquals(true, context2.error().isPresent());
 
-        MessageTracker.Error error = context2.error().get();
+        MessageTracker.Error error = context2.error().orElseThrow();
 
         List<MessageTracker.MessageProcessingTime> messageProcessingTimes =
                 error.getMessageProcessingTimesSinceLastExpectedMessage();
 
-        Assert.assertEquals(3, messageProcessingTimes.size());
+        assertEquals(3, messageProcessingTimes.size());
 
-        Assert.assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
-        Assert.assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
-        Assert.assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
-        Assert.assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
-        Assert.assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
-        Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+        assertEquals(String.class, messageProcessingTimes.get(0).getMessageClass());
+        assertEquals(Long.class, messageProcessingTimes.get(1).getMessageClass());
+        assertEquals(Integer.class, messageProcessingTimes.get(2).getMessageClass());
+        assertTrue(messageProcessingTimes.get(2).getElapsedTimeInNanos() > MILLISECONDS.toNanos(10));
+        assertEquals(Foo.class, error.getLastExpectedMessage().getClass());
+        assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
 
         LOG.error("An error occurred : {}" , error);
     }
@@ -107,8 +106,7 @@ public class MessageTrackerTest {
         ticker.advance(1, MILLISECONDS);
 
         MessageTracker.Context context2 = messageTracker.received(new Foo());
-        Assert.assertEquals(false, context2.error().isPresent());
-
+        assertEquals(false, context2.error().isPresent());
     }
 
     @Test
@@ -117,12 +115,7 @@ public class MessageTrackerTest {
 
         messageTracker.received(new Foo());
 
-        try {
-            messageTracker.received(new Foo());
-            fail("Expected an IllegalStateException");
-        } catch (IllegalStateException e) {
-            // expected
-        }
+        assertThrows(IllegalStateException.class, () -> messageTracker.received(new Foo()));
     }
 
     @Test
@@ -139,15 +132,15 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context.error().isPresent());
+        assertEquals(true, context.error().isPresent());
 
-        MessageTracker.Error error = context.error().get();
+        MessageTracker.Error error = context.error().orElseThrow();
 
-        Assert.assertEquals(null, error.getLastExpectedMessage());
-        Assert.assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
+        assertEquals(null, error.getLastExpectedMessage());
+        assertEquals(Foo.class, error.getCurrentExpectedMessage().getClass());
 
         String errorString = error.toString();
-        Assert.assertTrue(errorString.contains("Last Expected Message = null"));
+        assertTrue(errorString.contains("Last Expected Message = null"));
 
         LOG.error("An error occurred : {}", error);
     }
@@ -162,8 +155,7 @@ public class MessageTrackerTest {
 
         MessageTracker.Context context = messageTracker.received(new Foo());
 
-        Assert.assertEquals(true, context.error().isPresent());
-
+        assertEquals(true, context.error().isPresent());
     }
 
     @Test
@@ -172,20 +164,18 @@ public class MessageTrackerTest {
         messageTracker.begin();
 
         try (MessageTracker.Context ctx = messageTracker.received(45)) {
-            Assert.assertEquals(false, ctx.error().isPresent());
+            assertEquals(false, ctx.error().isPresent());
         }
         try (MessageTracker.Context ctx = messageTracker.received(45L)) {
-            Assert.assertEquals(false, ctx.error().isPresent());
+            assertEquals(false, ctx.error().isPresent());
         }
 
         List<MessageTracker.MessageProcessingTime> processingTimeList =
                 messageTracker.getMessagesSinceLastExpectedMessage();
 
-        Assert.assertEquals(2, processingTimeList.size());
+        assertEquals(2, processingTimeList.size());
 
         assertEquals(Integer.class, processingTimeList.get(0).getMessageClass());
         assertEquals(Long.class, processingTimeList.get(1).getMessageClass());
-
     }
-
 }
index ceb21e13418c916236d3b3df91bb98f3bc5baac7..897cb6193ad3678ba8a1e308a74506029d8a9081 100644 (file)
@@ -18,15 +18,18 @@ import akka.event.Logging;
 import akka.japi.Effect;
 import akka.remote.AssociationErrorEvent;
 import akka.remote.InvalidAssociation;
-import akka.remote.ThisActorSystemQuarantinedEvent;
+import akka.remote.UniqueAddress;
+import akka.remote.artery.ThisActorSystemQuarantinedEvent;
 import akka.testkit.javadsl.TestKit;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import scala.Option;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class QuarantinedMonitorActorTest {
 
     private static final Address LOCAL = Address.apply("http", "local");
@@ -39,7 +42,6 @@ public class QuarantinedMonitorActorTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         actor = system.actorOf(QuarantinedMonitorActor.props(callback));
     }
@@ -53,11 +55,35 @@ public class QuarantinedMonitorActorTest {
     public void testOnReceiveQuarantined() throws Exception {
         final Throwable t = new RuntimeException("Remote has quarantined this system");
         final InvalidAssociation cause = InvalidAssociation.apply(LOCAL, REMOTE, t, Option.apply(null));
-        final ThisActorSystemQuarantinedEvent event = new ThisActorSystemQuarantinedEvent(LOCAL, REMOTE);
+        final UniqueAddress localAddress = new UniqueAddress(LOCAL, 1);
+        final UniqueAddress remoteAddress = new UniqueAddress(REMOTE, 2);
+        final ThisActorSystemQuarantinedEvent event = new ThisActorSystemQuarantinedEvent(localAddress, remoteAddress);
         actor.tell(event, ActorRef.noSender());
         verify(callback, timeout(1000)).apply();
     }
 
+    @Test
+    public void testOnReceiveQuarantinedAsAssociation() throws Exception {
+        for (int i = 0; i < 9; i++) {
+            final Throwable t =
+                    new RuntimeException("The remote system has a UID that has been quarantined. Association aborted.");
+            final InvalidAssociation cause = InvalidAssociation.apply(LOCAL, REMOTE, t, Option.apply(null));
+            final AssociationErrorEvent event =
+                    new AssociationErrorEvent(cause, LOCAL, REMOTE, true, Logging.ErrorLevel());
+            actor.tell(event, ActorRef.noSender());
+        }
+
+        final Address local1 = Address.apply("http", "local1");
+        final Address remote1 = Address.apply("http", "remote1");
+        final Throwable t1 =
+                new RuntimeException("The remote system has a UID that has been quarantined. Association aborted.");
+        final InvalidAssociation cause1 = InvalidAssociation.apply(local1, remote1, t1, Option.apply(null));
+        final AssociationErrorEvent event1 =
+                new AssociationErrorEvent(cause1, local1, remote1, true, Logging.ErrorLevel());
+        actor.tell(event1, ActorRef.noSender());
+        verify(callback, timeout(1000)).apply();
+    }
+
     @Test
     public void testOnReceiveAnother() throws Exception {
         final Address local = Address.apply("http", "local");
@@ -68,5 +94,4 @@ public class QuarantinedMonitorActorTest {
         actor.tell(event, ActorRef.noSender());
         verify(callback, never()).apply();
     }
-
 }
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactoryTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/QNameFactoryTest.java
deleted file mode 100644 (file)
index 9515240..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.util.TestModel;
-import org.opendaylight.yangtools.yang.common.QName;
-
-public class QNameFactoryTest {
-
-    @Test
-    public void testBasic() {
-        QName expected = TestModel.AUG_NAME_QNAME;
-        QName created = QNameFactory.create(expected.toString());
-
-        assertFalse(expected == created);
-
-        assertEquals(expected, created);
-
-        QName cached = QNameFactory.create(expected.toString());
-
-        assertEquals(expected, cached);
-
-        assertTrue(cached == created);
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamReaderWriterTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeStreamReaderWriterTest.java
deleted file mode 100644 (file)
index 147cc66..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import static org.junit.Assert.assertEquals;
-
-import com.google.common.io.ByteStreams;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.Optional;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.util.TestModel;
-import org.opendaylight.yangtools.yang.common.Empty;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.w3c.dom.Node;
-import org.xml.sax.InputSource;
-
-public class NormalizedNodeStreamReaderWriterTest {
-
-    @Test
-    public void testNormalizedNodeStreaming() throws IOException {
-
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(ByteStreams.newDataOutput(bos));
-
-        NormalizedNode<?, ?> testContainer = createTestContainer();
-        nnout.writeNormalizedNode(testContainer);
-
-        QName toaster = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","toaster");
-        QName darknessFactor = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","darknessFactor");
-        QName description = QName.create("http://netconfcentral.org/ns/toaster","2009-11-20","description");
-        ContainerNode toasterNode = Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(toaster))
-                .withChild(ImmutableNodes.leafNode(darknessFactor, "1000"))
-                .withChild(ImmutableNodes.leafNode(description, largeString(20))).build();
-
-        ContainerNode toasterContainer = Builders.containerBuilder()
-                .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME)).withChild(toasterNode).build();
-        nnout.writeNormalizedNode(toasterContainer);
-
-        NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(ByteStreams.newDataInput(
-            bos.toByteArray()));
-
-        NormalizedNode<?,?> node = nnin.readNormalizedNode();
-        Assert.assertEquals(testContainer, node);
-
-        node = nnin.readNormalizedNode();
-        Assert.assertEquals(toasterContainer, node);
-    }
-
-    private static NormalizedNode<?, ?> createTestContainer() {
-        byte[] bytes1 = {1, 2, 3};
-        LeafSetEntryNode<Object> entry1 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes1)).withValue(bytes1).build();
-
-        byte[] bytes2 = {};
-        LeafSetEntryNode<Object> entry2 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes2)).withValue(bytes2).build();
-
-        return TestModel.createBaseTestContainerBuilder()
-                .withChild(ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                        new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
-                        .withChild(entry1).withChild(entry2).build())
-                .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[]{1, 2, 3, 4}))
-                .withChild(ImmutableNodes.leafNode(TestModel.EMPTY_QNAME, Empty.getInstance()))
-                .withChild(Builders.orderedMapBuilder()
-                      .withNodeIdentifier(new NodeIdentifier(TestModel.ORDERED_LIST_QNAME))
-                      .withChild(ImmutableNodes.mapEntry(TestModel.ORDERED_LIST_ENTRY_QNAME,
-                              TestModel.ID_QNAME, 11)).build()).build();
-    }
-
-    @Test
-    public void testYangInstanceIdentifierStreaming() throws IOException  {
-        YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH)
-                .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
-                        TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
-
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(ByteStreams.newDataOutput(bos));
-
-        nnout.writeYangInstanceIdentifier(path);
-
-        NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(ByteStreams.newDataInput(
-            bos.toByteArray()));
-
-        YangInstanceIdentifier newPath = nnin.readYangInstanceIdentifier();
-        Assert.assertEquals(path, newPath);
-    }
-
-    @Test
-    public void testNormalizedNodeAndYangInstanceIdentifierStreaming() throws IOException {
-
-        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
-        NormalizedNodeOutputStreamWriter writer = new NormalizedNodeOutputStreamWriter(
-            ByteStreams.newDataOutput(byteArrayOutputStream));
-
-        NormalizedNode<?, ?> testContainer = TestModel.createBaseTestContainerBuilder().build();
-        writer.writeNormalizedNode(testContainer);
-
-        YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.TEST_PATH)
-                .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(
-                        TestModel.INNER_LIST_QNAME, TestModel.ID_QNAME, 10).build();
-
-        writer.writeYangInstanceIdentifier(path);
-
-        NormalizedNodeDataInput reader = NormalizedNodeInputOutput.newDataInput(
-            ByteStreams.newDataInput(byteArrayOutputStream.toByteArray()));
-
-        NormalizedNode<?,?> node = reader.readNormalizedNode();
-        Assert.assertEquals(testContainer, node);
-
-        YangInstanceIdentifier newPath = reader.readYangInstanceIdentifier();
-        Assert.assertEquals(path, newPath);
-
-        writer.close();
-    }
-
-    @Test(expected = InvalidNormalizedNodeStreamException.class, timeout = 10000)
-    public void testInvalidNormalizedNodeStream() throws IOException {
-        byte[] invalidBytes = {1,2,3};
-        NormalizedNodeDataInput reader = NormalizedNodeInputOutput.newDataInput(
-                ByteStreams.newDataInput(invalidBytes));
-
-        reader.readNormalizedNode();
-    }
-
-    @Test(expected = InvalidNormalizedNodeStreamException.class, timeout = 10000)
-    public void testInvalidYangInstanceIdentifierStream() throws IOException {
-        byte[] invalidBytes = {1,2,3};
-        NormalizedNodeDataInput reader = NormalizedNodeInputOutput.newDataInput(
-            ByteStreams.newDataInput(invalidBytes));
-
-        reader.readYangInstanceIdentifier();
-    }
-
-    @Test
-    public void testWithSerializable() {
-        NormalizedNode<?, ?> input = TestModel.createTestContainer();
-        SampleNormalizedNodeSerializable serializable = new SampleNormalizedNodeSerializable(input);
-        SampleNormalizedNodeSerializable clone =
-                (SampleNormalizedNodeSerializable)SerializationUtils.clone(serializable);
-
-        Assert.assertEquals(input, clone.getInput());
-    }
-
-    @Test
-    public void testAnyXmlStreaming() throws Exception {
-        String xml = "<foo xmlns=\"http://www.w3.org/TR/html4/\" x=\"123\"><bar>one</bar><bar>two</bar></foo>";
-        final DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
-        factory.setNamespaceAware(true);
-
-        Node xmlNode = factory.newDocumentBuilder().parse(
-                new InputSource(new StringReader(xml))).getDocumentElement();
-
-        assertEquals("http://www.w3.org/TR/html4/", xmlNode.getNamespaceURI());
-
-        NormalizedNode<?, ?> anyXmlContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).withChild(
-                        Builders.anyXmlBuilder().withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
-                            .withValue(new DOMSource(xmlNode)).build()).build();
-
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(ByteStreams.newDataOutput(bos));
-
-        nnout.writeNormalizedNode(anyXmlContainer);
-
-        NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(ByteStreams.newDataInput(
-            bos.toByteArray()));
-
-        ContainerNode deserialized = (ContainerNode)nnin.readNormalizedNode();
-
-        Optional<DataContainerChild<? extends PathArgument, ?>> child =
-                deserialized.getChild(new NodeIdentifier(TestModel.ANY_XML_QNAME));
-        assertEquals("AnyXml child present", true, child.isPresent());
-
-        StreamResult xmlOutput = new StreamResult(new StringWriter());
-        Transformer transformer = TransformerFactory.newInstance().newTransformer();
-        transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
-        transformer.transform(((AnyXmlNode)child.get()).getValue(), xmlOutput);
-
-        assertEquals("XML", xml, xmlOutput.getWriter().toString());
-        assertEquals("http://www.w3.org/TR/html4/", ((AnyXmlNode)child.get()).getValue().getNode().getNamespaceURI());
-    }
-
-    @Test
-    public void testSchemaPathSerialization() throws Exception {
-        final SchemaPath expected = SchemaPath.create(true, TestModel.ANY_XML_QNAME);
-
-        ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(ByteStreams.newDataOutput(bos));
-        nnout.writeSchemaPath(expected);
-
-        NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(ByteStreams.newDataInput(
-            bos.toByteArray()));
-        SchemaPath actual = nnin.readSchemaPath();
-        assertEquals(expected, actual);
-    }
-
-    private static String largeString(final int pow) {
-        StringBuilder sb = new StringBuilder("X");
-        for (int i = 0; i < pow; i++) {
-            sb.append(sb);
-        }
-        return sb.toString();
-    }
-}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SampleNormalizedNodeSerializable.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/SampleNormalizedNodeSerializable.java
deleted file mode 100644 (file)
index 6b24b05..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-
-public class SampleNormalizedNodeSerializable implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private NormalizedNode<?, ?> input;
-
-    public SampleNormalizedNodeSerializable(NormalizedNode<?, ?> input) {
-        this.input = input;
-    }
-
-    public NormalizedNode<?, ?> getInput() {
-        return input;
-    }
-
-    private void readObject(final ObjectInputStream stream)
-            throws IOException {
-        NormalizedNodeDataInput reader = NormalizedNodeInputOutput.newDataInput(stream);
-        this.input = reader.readNormalizedNode();
-    }
-
-    private void writeObject(final ObjectOutputStream stream) throws IOException {
-        NormalizedNodeStreamWriter writer = new NormalizedNodeOutputStreamWriter(stream);
-        NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(writer);
-
-        normalizedNodeWriter.write(this.input);
-    }
-
-}
index 50d06ada231c23ca611d1ad0234288ed9bedfce3..630ebecb4eabc596067e45580db4dfbaf57966e3 100644 (file)
  */
 package org.opendaylight.controller.cluster.datastore.node.utils.stream;
 
-import com.google.common.collect.ImmutableSet;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
-import java.io.DataOutput;
 import java.io.DataOutputStream;
-import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.Set;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
 import javax.xml.transform.dom.DOMSource;
-import org.custommonkey.xmlunit.Diff;
-import org.custommonkey.xmlunit.XMLUnit;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.yangtools.util.xml.UntrustedXML;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
-import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.w3c.dom.Document;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.xmlunit.builder.DiffBuilder;
 
 public class SerializationUtilsTest {
-
-    private static final QName CONTAINER_Q_NAME = QName.create("ns-1", "2017-03-17", "container1");
+    private static final QName CONTAINER1 = QName.create("ns-1", "2017-03-17", "container1");
 
     @Test
-    public void testSerializeDeserializeNodes() {
-        final NormalizedNode<?, ?> normalizedNode = createNormalizedNode();
-        final byte[] bytes = SerializationUtils.serializeNormalizedNode(normalizedNode);
-        Assert.assertEquals(normalizedNode, SerializationUtils.deserializeNormalizedNode(bytes));
-
+    public void testSerializeDeserializeNodes() throws Exception {
+        final var normalizedNode = createNormalizedNode();
+        final var bytes = serialize(normalizedNode);
+        assertEquals(10567, bytes.length);
+        assertEquals(normalizedNode, deserialize(bytes));
     }
 
     @Test
     public void testSerializeDeserializeAnyXmlNode() throws Exception {
-        final ByteArrayInputStream is =
-                new ByteArrayInputStream("<xml><data/></xml>".getBytes(Charset.defaultCharset()));
-        final Document parse = UntrustedXML.newDocumentBuilder().parse(is);
-        final AnyXmlNode anyXmlNode = Builders.anyXmlBuilder()
-                .withNodeIdentifier(id("anyXmlNode"))
-                .withValue(new DOMSource(parse))
-                .build();
-        final byte[] bytes = SerializationUtils.serializeNormalizedNode(anyXmlNode);
-        final NormalizedNode<?, ?> deserialized = SerializationUtils.deserializeNormalizedNode(bytes);
-        final DOMSource value = (DOMSource) deserialized.getValue();
-        final Diff diff = XMLUnit.compareXML((Document) anyXmlNode.getValue().getNode(),
-                value.getNode().getOwnerDocument());
-        Assert.assertTrue(diff.toString(), diff.similar());
+        final var parse = UntrustedXML.newDocumentBuilder().parse(
+            new ByteArrayInputStream("<xml><data/></xml>".getBytes(StandardCharsets.UTF_8)));
+        final var anyXmlNode = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(id("anyXmlNode"))
+            .withValue(new DOMSource(parse))
+            .build();
+        final byte[] bytes = serialize(anyXmlNode);
+        assertEquals(113, bytes.length);
+
+        final var diff = DiffBuilder.compare(anyXmlNode.body().getNode())
+            // FIXME: why all this magic?
+            .withTest(((DOMSource) deserialize(bytes).body()).getNode().getOwnerDocument())
+            .checkForSimilar()
+            .build();
+        assertFalse(diff.toString(), diff.hasDifferences());
     }
 
     @Test
-    public void testSerializeDeserializePath() {
-        final ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        final DataOutput out = new DataOutputStream(bos);
-        final YangInstanceIdentifier path = YangInstanceIdentifier.builder()
-                .node(id("container1"))
-                .node(autmentationId("list1", "list2"))
-                .node(listId("list1", "keyName1", "keyValue1"))
-                .node(leafSetId("leafSer1", "leafSetValue1"))
-                .build();
-        SerializationUtils.serializePath(path, out);
-        final YangInstanceIdentifier deserialized =
-                SerializationUtils.deserializePath(new DataInputStream(new ByteArrayInputStream(bos.toByteArray())));
-        Assert.assertEquals(path, deserialized);
+    public void testSerializeDeserializePath() throws IOException {
+        final var path = YangInstanceIdentifier.builder()
+            .node(id("container1"))
+            .node(listId("list1", "keyName1", "keyValue1"))
+            .node(leafSetId("leafSer1", "leafSetValue1"))
+            .build();
+
+        final var bos = new ByteArrayOutputStream();
+        try (var out = new DataOutputStream(bos)) {
+            SerializationUtils.writePath(out, path);
+        }
+
+        final var bytes = bos.toByteArray();
+        assertEquals(105, bytes.length);
+
+        assertEquals(path, SerializationUtils.readPath(new DataInputStream(new ByteArrayInputStream(bytes))));
     }
 
     @Test
-    public void testSerializeDeserializePathAndNode() {
-        final ByteArrayOutputStream bos = new ByteArrayOutputStream();
-        final DataOutput out = new DataOutputStream(bos);
-        final NormalizedNode<?, ?> node = createNormalizedNode();
-        final YangInstanceIdentifier path = YangInstanceIdentifier.create(id("container1"));
-        SerializationUtils.serializePathAndNode(path, node, out);
-        final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bos.toByteArray()));
-        final AtomicBoolean applierCalled = new AtomicBoolean(false);
-        SerializationUtils.deserializePathAndNode(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
-            Assert.assertEquals(path, deserializedPath);
-            Assert.assertEquals(node, deserializedNode);
-            applierCalled.set(true);
-        });
-        Assert.assertTrue(applierCalled.get());
+    public void testSerializeDeserializePathAndNode() throws IOException {
+        final var path = YangInstanceIdentifier.of(id("container1"));
+        final var node = createNormalizedNode();
+
+        final var bos = new ByteArrayOutputStream();
+        try (var out = new DataOutputStream(bos)) {
+            SerializationUtils.writeNodeAndPath(out, path, node);
+        }
+
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(10569, bytes.length);
+
+        final var applierCalled = new AtomicBoolean(false);
+        try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) {
+            SerializationUtils.readNodeAndPath(in, applierCalled, (instance, deserializedPath, deserializedNode) -> {
+                assertEquals(path, deserializedPath);
+                assertEquals(node, deserializedNode);
+                applierCalled.set(true);
+            });
+        }
+        assertTrue(applierCalled.get());
     }
 
-    private static NormalizedNode<?, ?> createNormalizedNode() {
-        final LeafSetNode<Object> leafSetNode = Builders.leafSetBuilder()
-                .withNodeIdentifier(id("leafSetNode"))
-                .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
-                .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
-                .build();
-        final LeafSetNode<Object> orderedLeafSetNode = Builders.orderedLeafSetBuilder()
-                .withNodeIdentifier(id("orderedLeafSetNode"))
-                .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
-                .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
-                .build();
-        final LeafNode<Boolean> booleanLeaf = createLeaf("booleanLeaf", true);
-        final LeafNode<Byte> byteLeaf = createLeaf("byteLeaf", (byte) 0);
-        final LeafNode<Short> shortLeaf = createLeaf("shortLeaf", (short) 55);
-        final LeafNode<Integer> intLeaf = createLeaf("intLeaf", 11);
-        final LeafNode<Long> longLeaf = createLeaf("longLeaf", 151515L);
-        final LeafNode<String> stringLeaf = createLeaf("stringLeaf", "stringValue");
-        final LeafNode<String> longStringLeaf = createLeaf("longStringLeaf", getLongString());
-        final LeafNode<QName> qNameLeaf = createLeaf("stringLeaf", QName.create("base", "qName"));
-        final LeafNode<YangInstanceIdentifier> idLeaf = createLeaf("stringLeaf", YangInstanceIdentifier.EMPTY);
-        final MapEntryNode entry1 = Builders.mapEntryBuilder()
-                .withNodeIdentifier(listId("mapNode", "key", "key1"))
-                .withChild(stringLeaf)
-                .build();
-        final MapEntryNode entry2 = Builders.mapEntryBuilder()
-                .withNodeIdentifier(listId("mapNode", "key", "key2"))
-                .withChild(stringLeaf)
-                .build();
-        final MapNode mapNode = Builders.mapBuilder()
-                .withNodeIdentifier(id("mapNode"))
-                .withChild(entry1)
-                .withChild(entry2)
-                .build();
-        final OrderedMapNode orderedMapNode = Builders.orderedMapBuilder()
-                .withNodeIdentifier(id("orderedMapNode"))
-                .withChild(entry2)
-                .withChild(entry1)
-                .build();
-        final UnkeyedListEntryNode unkeyedListEntry1 = Builders.unkeyedListEntryBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
-                .withChild(stringLeaf)
-                .build();
-        final UnkeyedListEntryNode unkeyedListEntry2 = Builders.unkeyedListEntryBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
+    private static NormalizedNode deserialize(final byte[] bytes) throws Exception {
+        return SerializationUtils.readNormalizedNode(new DataInputStream(new ByteArrayInputStream(bytes)))
+            .orElseThrow();
+    }
+
+    private static byte[] serialize(final NormalizedNode node) throws Exception {
+        final var bos = new ByteArrayOutputStream();
+        SerializationUtils.writeNormalizedNode(new DataOutputStream(bos), node);
+        return bos.toByteArray();
+    }
+
+    private static ContainerNode createNormalizedNode() {
+        final var stringLeaf = createLeaf("stringLeaf", "stringValue");
+        final var entry1 = ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(listId("mapNode", "key", "key1"))
+            .withChild(stringLeaf)
+            .build();
+        final var entry2 = ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(listId("mapNode", "key", "key2"))
+            .withChild(stringLeaf)
+            .build();
+
+        return ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CONTAINER1))
+                .withChild(createLeaf("booleanLeaf", true))
+                .withChild(createLeaf("byteLeaf", (byte) 0))
+                .withChild(createLeaf("shortLeaf", (short) 55))
+                .withChild(createLeaf("intLeaf", 11))
+                .withChild(createLeaf("longLeaf", 151515L))
                 .withChild(stringLeaf)
-                .build();
-        final UnkeyedListNode unkeyedListNode = Builders.unkeyedListBuilder()
-                .withNodeIdentifier(id("unkeyedList"))
-                .withChild(unkeyedListEntry1)
-                .withChild(unkeyedListEntry2)
-                .build();
-        final ImmutableSet<QName> childNames =
-                ImmutableSet.of(QName.create(CONTAINER_Q_NAME, "aug1"), QName.create(CONTAINER_Q_NAME, "aug1"));
-        final AugmentationNode augmentationNode = Builders.augmentationBuilder()
-                .withNodeIdentifier(new YangInstanceIdentifier.AugmentationIdentifier(childNames))
+                .withChild(createLeaf("longStringLeaf", "0123456789".repeat(1000)))
+                .withChild(createLeaf("stringLeaf", QName.create("base", "qName")))
+                .withChild(createLeaf("stringLeaf", YangInstanceIdentifier.of(QName.create("test", "test"))))
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(id("mapNode"))
+                    .withChild(entry1)
+                    .withChild(entry2)
+                    .build())
+                .withChild(ImmutableNodes.newUserMapBuilder()
+                    .withNodeIdentifier(id("orderedMapNode"))
+                    .withChild(entry2)
+                    .withChild(entry1)
+                    .build())
+                .withChild(ImmutableNodes.newUnkeyedListBuilder()
+                    .withNodeIdentifier(id("unkeyedList"))
+                    .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                        .withNodeIdentifier(id("unkeyedList"))
+                        .withChild(stringLeaf)
+                        .build())
+                    .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                        .withNodeIdentifier(id("unkeyedList"))
+                        .withChild(stringLeaf)
+                        .build())
+                    .build())
+                .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                    .withNodeIdentifier(id("leafSetNode"))
+                    .withChild(createLeafSetEntry("leafSetNode", "leafSetValue1"))
+                    .withChild(createLeafSetEntry("leafSetNode", "leafSetValue2"))
+                    .build())
+                .withChild(ImmutableNodes.newUserLeafSetBuilder()
+                    .withNodeIdentifier(id("orderedLeafSetNode"))
+                    .withChild(createLeafSetEntry("orderedLeafSetNode", "value1"))
+                    .withChild(createLeafSetEntry("orderedLeafSetNode", "value2"))
+                    .build())
                 .withChild(createLeaf("aug1", "aug1Value"))
                 .withChild(createLeaf("aug2", "aug2Value"))
-                .build();
-        final ChoiceNode choiceNode = Builders.choiceBuilder()
-                .withNodeIdentifier(id("choiceNode"))
-                .withChild(createLeaf("choiceLeaf", 12))
-                .build();
-        return Builders.containerBuilder()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CONTAINER_Q_NAME))
-                .withChild(booleanLeaf)
-                .withChild(byteLeaf)
-                .withChild(shortLeaf)
-                .withChild(intLeaf)
-                .withChild(longLeaf)
-                .withChild(stringLeaf)
-                .withChild(longStringLeaf)
-                .withChild(qNameLeaf)
-                .withChild(idLeaf)
-                .withChild(mapNode)
-                .withChild(orderedMapNode)
-                .withChild(unkeyedListNode)
-                .withChild(leafSetNode)
-                .withChild(orderedLeafSetNode)
-                .withChild(augmentationNode)
-                .withChild(choiceNode)
+                .withChild(ImmutableNodes.newChoiceBuilder()
+                    .withNodeIdentifier(id("choiceNode"))
+                    .withChild(createLeaf("choiceLeaf", 12))
+                    .build())
                 .build();
     }
 
@@ -192,39 +184,20 @@ public class SerializationUtilsTest {
     }
 
     private static LeafSetEntryNode<Object> createLeafSetEntry(final String leafSet, final String value) {
-        return Builders.leafSetEntryBuilder()
-                .withNodeIdentifier(leafSetId(leafSet, value))
-                .withValue(value)
-                .build();
+        return ImmutableNodes.leafSetEntry(leafSetId(leafSet, value));
     }
 
-    private static YangInstanceIdentifier.NodeIdentifier id(final String name) {
-        return new YangInstanceIdentifier.NodeIdentifier(QName.create(CONTAINER_Q_NAME, name));
+    private static NodeIdentifier id(final String name) {
+        return new NodeIdentifier(QName.create(CONTAINER1, name));
     }
 
-    private static YangInstanceIdentifier.NodeIdentifierWithPredicates listId(final String listName,
-                                                                              final String keyName,
-                                                                              final Object keyValue) {
-        return new YangInstanceIdentifier.NodeIdentifierWithPredicates(QName.create(CONTAINER_Q_NAME, listName),
-                QName.create(CONTAINER_Q_NAME, keyName), keyValue);
+    private static NodeIdentifierWithPredicates listId(final String listName, final String keyName,
+            final Object keyValue) {
+        return NodeIdentifierWithPredicates.of(QName.create(CONTAINER1, listName), QName.create(CONTAINER1, keyName),
+            keyValue);
     }
 
-    private static <T> YangInstanceIdentifier.NodeWithValue<T> leafSetId(final String node, final T value) {
-        return new YangInstanceIdentifier.NodeWithValue<>(QName.create(CONTAINER_Q_NAME, node), value);
-    }
-
-    private static YangInstanceIdentifier.AugmentationIdentifier autmentationId(final String... nodes) {
-        final Set<QName> qNames = Arrays.stream(nodes)
-                .map(node -> QName.create(CONTAINER_Q_NAME, node))
-                .collect(Collectors.toSet());
-        return new YangInstanceIdentifier.AugmentationIdentifier(qNames);
-    }
-
-    private static String getLongString() {
-        final StringBuilder builder = new StringBuilder(10000);
-        for (int i = 0; i < 1000; i++) {
-            builder.append("0123456789");
-        }
-        return builder.toString();
+    private static <T> NodeWithValue<T> leafSetId(final String node, final T value) {
+        return new NodeWithValue<>(QName.create(CONTAINER1, node), value);
     }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypesTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/ValueTypesTest.java
deleted file mode 100644 (file)
index 0395e94..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.node.utils.stream;
-
-import static org.junit.Assert.assertEquals;
-
-import org.junit.Test;
-
-public class ValueTypesTest {
-    @Test
-    public void testStringType() {
-        assertEquals(ValueTypes.STRING_TYPE, ValueTypes.getSerializableType("foobar"));
-        final String largeString = largeString(ValueTypes.STRING_BYTES_LENGTH_THRESHOLD);
-        assertEquals(ValueTypes.STRING_BYTES_TYPE, ValueTypes.getSerializableType(largeString));
-    }
-
-    private static String largeString(int minSize) {
-        final int pow = (int) (Math.log(minSize * 2) / Math.log(2));
-        StringBuilder sb = new StringBuilder("X");
-        for (int i = 0; i < pow; i++) {
-            sb.append(sb);
-        }
-        return sb.toString();
-    }
-}
index 47150420b4e96aa82c3c4b361a21485942386ff7..6b150131b3e5b058b8fe65e1f281c523b29549b3 100644 (file)
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.containerNode;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
 
-import com.google.common.collect.Sets;
 import java.io.IOException;
+import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
 import javax.xml.transform.dom.DOMSource;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
-import org.mockito.MockitoAnnotations;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.node.utils.NormalizedNodeNavigator;
 import org.opendaylight.controller.cluster.datastore.util.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyxmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
 import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemLeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class NormalizedNodePrunerTest {
-    private static final SchemaContext NO_TEST_SCHEMA = TestModel.createTestContextWithoutTestSchema();
-    private static final SchemaContext NO_AUG_SCHEMA = TestModel.createTestContextWithoutAugmentationSchema();
-    private static final SchemaContext FULL_SCHEMA = TestModel.createTestContext();
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
+    private static final EffectiveModelContext NO_TEST_SCHEMA = TestModel.createTestContextWithoutTestSchema();
+    private static final EffectiveModelContext NO_AUG_SCHEMA = TestModel.createTestContextWithoutAugmentationSchema();
+    private static final EffectiveModelContext FULL_SCHEMA = TestModel.createTestContext();
+
+    private static AbstractNormalizedNodePruner prunerFullSchema(final YangInstanceIdentifier path) {
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(FULL_SCHEMA);
+        pruner.initializeForPath(path);
+        return pruner;
     }
 
-    private static NormalizedNodePruner prunerFullSchema(final YangInstanceIdentifier path) {
-        return new NormalizedNodePruner(path, FULL_SCHEMA);
+    private static AbstractNormalizedNodePruner prunerNoAugSchema(final YangInstanceIdentifier path) {
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(NO_AUG_SCHEMA);
+        pruner.initializeForPath(path);
+        return pruner;
     }
 
-    private static NormalizedNodePruner prunerNoAugSchema(final YangInstanceIdentifier path) {
-        return new NormalizedNodePruner(path, NO_AUG_SCHEMA);
-    }
-
-    private static NormalizedNodePruner prunerNoTestSchema(final YangInstanceIdentifier path) {
-        return new NormalizedNodePruner(path, NO_TEST_SCHEMA);
+    private static AbstractNormalizedNodePruner prunerNoTestSchema(final YangInstanceIdentifier path) {
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forSchemaContext(NO_TEST_SCHEMA);
+        pruner.initializeForPath(path);
+        return pruner;
     }
 
     @Test
     public void testNodesNotPrunedWhenSchemaPresent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
 
         NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
 
-        NormalizedNode<?, ?> expected = createTestContainer();
+        NormalizedNode expected = createTestContainer();
 
         normalizedNodeWriter.write(expected);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
+        NormalizedNode actual = pruner.getResult().orElseThrow();
 
         assertEquals(expected, actual);
-
     }
 
     @Test(expected = IllegalStateException.class)
     public void testReusePruner() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
 
         NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
 
-        NormalizedNode<?, ?> expected = createTestContainer();
+        NormalizedNode expected = createTestContainer();
 
         normalizedNodeWriter.write(expected);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
+        NormalizedNode actual = pruner.getResult().orElseThrow();
 
         assertEquals(expected, actual);
 
         NormalizedNodeWriter.forStreamWriter(pruner).write(expected);
-
     }
 
-
     @Test
     public void testNodesPrunedWhenAugmentationSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerNoAugSchema(TestModel.TEST_PATH);
+        AbstractNormalizedNodePruner pruner = prunerNoAugSchema(TestModel.TEST_PATH);
 
         NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
 
-        NormalizedNode<?, ?> expected = createTestContainer();
+        NormalizedNode expected = createTestContainer();
 
         normalizedNodeWriter.write(expected);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
+        NormalizedNode actual = pruner.getResult().orElseThrow();
 
-        Assert.assertNotEquals(expected, actual);
+        assertNotEquals(expected, actual);
 
         // Asserting true here instead of checking actual value because I don't want this assertion to be fragile
         assertTrue(countNodes(expected, "store:aug") > 0);
@@ -122,160 +117,132 @@ public class NormalizedNodePrunerTest {
 
     @Test
     public void testNodesPrunedWhenTestSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH);
+        AbstractNormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH);
 
         NormalizedNodeWriter normalizedNodeWriter = NormalizedNodeWriter.forStreamWriter(pruner);
 
-        NormalizedNode<?, ?> expected = createTestContainer();
+        NormalizedNode expected = createTestContainer();
 
         normalizedNodeWriter.write(expected);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-
-        // Since top level schema is missing null is returned
-        assertNull(actual);
+        // Since top level schema is missing empty is returned
+        assertEquals(Optional.empty(), pruner.getResult());
 
         // Asserting true here instead of checking actual value because I don't want this assertion to be fragile
         assertTrue(countNodes(expected, "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test") > 0);
-
     }
 
-    private static int countNodes(final NormalizedNode<?,?> normalizedNode, final String namespaceFilter) {
+    private static int countNodes(final NormalizedNode normalizedNode, final String namespaceFilter) {
         if (normalizedNode == null) {
             return 0;
         }
         final AtomicInteger count = new AtomicInteger();
         new NormalizedNodeNavigator((level, parentPath, normalizedNode1) -> {
-            if (!(normalizedNode1.getIdentifier() instanceof AugmentationIdentifier)) {
-                if (normalizedNode1.getIdentifier().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
-                    count.incrementAndGet();
-                }
+            if (normalizedNode1.name().getNodeType().getNamespace().toString().contains(namespaceFilter)) {
+                count.incrementAndGet();
             }
-        }).navigate(YangInstanceIdentifier.EMPTY.toString(), normalizedNode);
+        }).navigate(YangInstanceIdentifier.of().toString(), normalizedNode);
 
         return count.get();
     }
 
     @Test
     public void testLeafNodeNotPrunedWhenHasNoParent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.DESC_QNAME));
-        NormalizedNode<?, ?> input = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.DESC_QNAME));
+        NormalizedNode input = ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", input, actual);
-    }
-
-    @Test
-    public void testLeafNodePrunedWhenHasAugmentationParentAndSchemaMissing() throws IOException {
-        AugmentationIdentifier augId = new AugmentationIdentifier(Sets.newHashSet(TestModel.AUG_CONT_QNAME));
-        NormalizedNodePruner pruner = prunerFullSchema(YangInstanceIdentifier.builder()
-                .node(TestModel.TEST_QNAME).node(TestModel.AUGMENTED_LIST_QNAME)
-                        .node(TestModel.AUGMENTED_LIST_QNAME).node(augId).build());
-        LeafNode<Object> child = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
-        NormalizedNode<?, ?> input = Builders.augmentationBuilder().withNodeIdentifier(augId).withChild(child).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", Builders.augmentationBuilder().withNodeIdentifier(augId).build(), actual);
+        assertEquals("normalizedNode", input, pruner.getResult().orElseThrow());
     }
 
     @Test
     public void testLeafNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        NormalizedNode<?, ?> input = Builders.leafBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withValue("test").build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
+        LeafNode<String> input = ImmutableNodes.leafNode(TestModel.INVALID_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
-
     @Test
     public void testLeafSetEntryNodeNotPrunedWhenHasNoParent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
-        NormalizedNode<?, ?> input = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
+        LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.SHOE_QNAME, "puma");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
+        NormalizedNode actual = pruner.getResult().orElseThrow();
         assertEquals("normalizedNode", input, actual);
     }
 
     @Test
     public void testLeafSetEntryNodeNotPrunedWhenHasParent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
-        LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("puma").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.SHOE_QNAME, "puma")).build();
-        NormalizedNode<?, ?> input = Builders.leafSetBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.SHOE_QNAME)).withChild(child).build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.SHOE_QNAME));
+        SystemLeafSetNode<?> input = ImmutableNodes.<String>newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.SHOE_QNAME))
+            .withChildValue("puma")
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
+        NormalizedNode actual = pruner.getResult().orElseThrow();
         assertEquals("normalizedNode", input, actual);
     }
 
     @Test
     public void testLeafSetEntryNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        NormalizedNode<?, ?> input = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
+        LeafSetEntryNode<?> input = ImmutableNodes.leafSetEntry(TestModel.INVALID_QNAME, "test");
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
     @Test
     public void testLeafSetEntryNodePrunedWhenHasParentAndSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
-        LeafSetEntryNode<Object> child = Builders.leafSetEntryBuilder().withValue("test").withNodeIdentifier(
-                new NodeWithValue<>(TestModel.INVALID_QNAME, "test")).build();
-        NormalizedNode<?, ?> input = Builders.leafSetBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.INVALID_QNAME)).withChild(child).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.INVALID_QNAME));
+        NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.<String>newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.INVALID_QNAME))
+            .withChildValue("test")
+            .build());
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
     @Test
     public void testAnyXMLNodeNotPrunedWhenHasNoParent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
-        NormalizedNode<?, ?> input = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
+        AnyxmlNode<DOMSource> input = ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+            .withValue(mock(DOMSource.class))
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
-
     @Test
     public void testAnyXMLNodeNotPrunedWhenHasParent() throws IOException {
-        NormalizedNodePruner pruner = prunerFullSchema(TestModel.TEST_PATH);
-        AnyXmlNode child = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
-        NormalizedNode<?, ?> input = Builders.containerBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.TEST_QNAME)).withChild(child).build();
+        final var pruner = prunerFullSchema(TestModel.TEST_PATH);
+        final var input = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+                .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+                .withValue(mock(DOMSource.class))
+                .build())
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
     public void testAnyXmlNodePrunedWhenHasNoParentAndSchemaMissing() throws IOException {
-        NormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
-        NormalizedNode<?, ?> input = Builders.anyXmlBuilder().withNodeIdentifier(
-                new NodeIdentifier(TestModel.ANY_XML_QNAME)).withValue(mock(DOMSource.class)).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        AbstractNormalizedNodePruner pruner = prunerNoTestSchema(TestModel.TEST_PATH.node(TestModel.ANY_XML_QNAME));
+        NormalizedNodeWriter.forStreamWriter(pruner).write(ImmutableNodes.newAnyxmlBuilder(DOMSource.class)
+            .withNodeIdentifier(new NodeIdentifier(TestModel.ANY_XML_QNAME))
+            .withValue(mock(DOMSource.class))
+            .build());
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
     @Test
@@ -284,13 +251,12 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .node(TestModel.INNER_LIST_QNAME).nodeWithKey(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
                 .node(TestModel.INNER_CONTAINER_QNAME).build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        NormalizedNode<?, ?> input = ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME);
+        ContainerNode input = containerNode(TestModel.INNER_CONTAINER_QNAME);
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -299,13 +265,11 @@ public class NormalizedNodePrunerTest {
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .node(TestModel.INNER_LIST_QNAME).nodeWithKey(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
                 .node(TestModel.INVALID_QNAME).build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        NormalizedNode<?, ?> input = ImmutableNodes.containerNode(TestModel.INVALID_QNAME);
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(containerNode(TestModel.INVALID_QNAME));
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
     @Test
@@ -313,20 +277,22 @@ public class NormalizedNodePrunerTest {
         YangInstanceIdentifier path = YangInstanceIdentifier.builder().node(TestModel.TEST_QNAME)
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
-
-        MapNode innerList = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INVALID_QNAME)).build()).build();
-        NormalizedNode<?, ?> input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(innerList).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode<?, ?> expected = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                    TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build()).build()).build();
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", expected, actual);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
+
+        NormalizedNodeWriter.forStreamWriter(pruner)
+            .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+                .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+                    .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+                        .withChild(containerNode(TestModel.INVALID_QNAME))
+                        .build())
+                    .build())
+                .build());
+
+        assertEquals(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+            .withChild(mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+                .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").build())
+                .build())
+            .build(), pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -334,15 +300,16 @@ public class NormalizedNodePrunerTest {
         YangInstanceIdentifier path = YangInstanceIdentifier.builder().node(TestModel.TEST_QNAME)
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .node(TestModel.INNER_LIST_QNAME).build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(mapEntryBuilder(
-                TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
+        SystemMapNode input = mapNodeBuilder(TestModel.INNER_LIST_QNAME)
+            .withChild(mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "one")
+                .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                .build())
+            .build();
         NormalizedNodeWriter.forStreamWriter(pruner).write(input);
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", input, actual);
+        assertEquals(input, pruner.getResult().orElseThrow());
     }
 
     @Test
@@ -350,15 +317,15 @@ public class NormalizedNodePrunerTest {
         YangInstanceIdentifier path = YangInstanceIdentifier.builder().node(TestModel.TEST_QNAME)
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .node(TestModel.INVALID_QNAME).build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
 
-        MapNode input = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
-                TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
+        NormalizedNodeWriter.forStreamWriter(pruner).write(mapNodeBuilder(TestModel.INVALID_QNAME)
+            .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+                .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                .build())
+            .build());
 
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertNull(actual);
+        assertEquals(Optional.empty(), pruner.getResult());
     }
 
     @Test
@@ -366,34 +333,29 @@ public class NormalizedNodePrunerTest {
         YangInstanceIdentifier path = YangInstanceIdentifier.builder().node(TestModel.TEST_QNAME)
                 .node(TestModel.OUTER_LIST_QNAME).nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
                 .build();
-        NormalizedNodePruner pruner = prunerFullSchema(path);
-
-        MapNode innerList = mapNodeBuilder(TestModel.INVALID_QNAME).withChild(mapEntryBuilder(
-                TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one").withChild(
-                        ImmutableNodes.containerNode(TestModel.INNER_CONTAINER_QNAME)).build()).build();
-        NormalizedNode<?, ?> input = mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                .withChild(innerList).build();
-        NormalizedNodeWriter.forStreamWriter(pruner).write(input);
-
-        NormalizedNode<?, ?> expected = mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
-        NormalizedNode<?, ?> actual = pruner.normalizedNode();
-        assertEquals("normalizedNode", expected, actual);
+        AbstractNormalizedNodePruner pruner = prunerFullSchema(path);
+
+        NormalizedNodeWriter.forStreamWriter(pruner)
+            .write(mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
+                .withChild(mapNodeBuilder(TestModel.INVALID_QNAME)
+                    .withChild(mapEntryBuilder(TestModel.INVALID_QNAME, TestModel.NAME_QNAME, "one")
+                        .withChild(containerNode(TestModel.INNER_CONTAINER_QNAME))
+                        .build())
+                    .build())
+                .build());
+
+        assertEquals(mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1),
+            pruner.getResult().orElseThrow());
     }
 
-    private static NormalizedNode<?, ?> createTestContainer() {
-        byte[] bytes1 = {1, 2, 3};
-        LeafSetEntryNode<Object> entry1 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes1)).withValue(bytes1).build();
-
-        byte[] bytes2 = {};
-        LeafSetEntryNode<Object> entry2 = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(TestModel.BINARY_LEAF_LIST_QNAME, bytes2)).withValue(bytes2).build();
-
+    private static ContainerNode createTestContainer() {
         return TestModel.createBaseTestContainerBuilder()
-                .withChild(ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                        new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
-                        .withChild(entry1).withChild(entry2).build())
-                .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[]{1, 2, 3, 4}))
-                .build();
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.BINARY_LEAF_LIST_QNAME))
+                .withChildValue(new byte[] {1, 2, 3})
+                .withChildValue(new byte[0])
+                .build())
+            .withChild(ImmutableNodes.leafNode(TestModel.SOME_BINARY_DATA_QNAME, new byte[] {1, 2, 3, 4}))
+            .build();
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/utils/transformer/UintAdaptingPrunerTest.java
new file mode 100644 (file)
index 0000000..b29113c
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.node.utils.transformer;
+
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.collect.ImmutableMap;
+import java.io.IOException;
+import java.math.BigInteger;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
+
+public class UintAdaptingPrunerTest {
+    private static final QName CONT = QName.create("urn:odl-ctlr1923", "cont");
+    private static final QName LST = QName.create(CONT, "lst");
+    private static final QName LFLST8 = QName.create(CONT, "lf-lst8");
+    private static final QName LFLST16 = QName.create(CONT, "lf-lst16");
+    private static final QName LFLST32 = QName.create(CONT, "lf-lst32");
+    private static final QName LFLST64 = QName.create(CONT, "lf-lst64");
+    private static final QName A = QName.create(LST, "a");
+    private static final QName B = QName.create(LST, "b");
+    private static final QName C = QName.create(LST, "c");
+    private static final QName D = QName.create(LST, "d");
+    private static final QName E = QName.create(LST, "e");
+    private static final QName F = QName.create(LST, "f");
+    private static final QName G = QName.create(LST, "g");
+    private static final QName H = QName.create(LST, "h");
+
+    private static EffectiveModelContext CONTEXT;
+
+    @BeforeClass
+    public static void beforeClass() {
+        CONTEXT = YangParserTestUtils.parseYangResource("/odl-ctlr1923.yang");
+    }
+
+    @Test
+    public void testListTranslation() throws IOException {
+        assertEquals(ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LST))
+            .withChild(ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST, ImmutableMap.<QName, Object>builder()
+                    .put(A, (byte) 1)
+                    .put(B, (short) 1)
+                    .put(C, 1)
+                    .put(D, 1L)
+                    .put(E, Uint8.ONE)
+                    .put(F, Uint16.ONE)
+                    .put(G, Uint32.ONE)
+                    .put(H, Uint64.ONE)
+                    .build()))
+                .withChild(ImmutableNodes.leafNode(A, (byte) 1))
+                .withChild(ImmutableNodes.leafNode(B, (short) 1))
+                .withChild(ImmutableNodes.leafNode(C, 1))
+                .withChild(ImmutableNodes.leafNode(D, 1L))
+                .withChild(ImmutableNodes.leafNode(E, Uint8.ONE))
+                .withChild(ImmutableNodes.leafNode(F, Uint16.ONE))
+                .withChild(ImmutableNodes.leafNode(G, Uint32.ONE))
+                .withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
+                .build())
+            .build(),
+            prune(ImmutableNodes.newSystemMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(LST))
+                .withChild(ImmutableNodes.newMapEntryBuilder()
+                    .withNodeIdentifier(NodeIdentifierWithPredicates.of(LST,  ImmutableMap.<QName, Object>builder()
+                        .put(A, (byte) 1)
+                        .put(B, (short) 1)
+                        .put(C, 1)
+                        .put(D, 1L)
+                        .put(E, (short) 1)
+                        .put(F, 1)
+                        .put(G, 1L)
+                        .put(H, BigInteger.ONE)
+                        .build()))
+                    .withChild(ImmutableNodes.leafNode(A, (byte) 1))
+                    .withChild(ImmutableNodes.leafNode(B, (short) 1))
+                    .withChild(ImmutableNodes.leafNode(C, 1))
+                    .withChild(ImmutableNodes.leafNode(D, 1L))
+                    .withChild(ImmutableNodes.leafNode(E, (short) 1))
+                    .withChild(ImmutableNodes.leafNode(F, 1))
+                    .withChild(ImmutableNodes.leafNode(G, 1L))
+                    .withChild(ImmutableNodes.leafNode(H, BigInteger.ONE))
+                    .build())
+                .build()));
+    }
+
+    @Test
+    public void testContainerTranslation() throws IOException {
+        assertEquals(ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CONT))
+            .withChild(ImmutableNodes.leafNode(A, (byte) 1))
+            .withChild(ImmutableNodes.leafNode(B, (short) 1))
+            .withChild(ImmutableNodes.leafNode(C, 1))
+            .withChild(ImmutableNodes.leafNode(D, 1L))
+            .withChild(ImmutableNodes.leafNode(E, Uint8.ONE))
+            .withChild(ImmutableNodes.leafNode(F, Uint16.ONE))
+            .withChild(ImmutableNodes.leafNode(G, Uint32.ONE))
+            .withChild(ImmutableNodes.leafNode(H, Uint64.ONE))
+            .build(),
+            prune(ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CONT))
+                .withChild(ImmutableNodes.leafNode(A, (byte) 1))
+                .withChild(ImmutableNodes.leafNode(B, (short) 1))
+                .withChild(ImmutableNodes.leafNode(C, 1))
+                .withChild(ImmutableNodes.leafNode(D, 1L))
+                .withChild(ImmutableNodes.leafNode(E, (short) 1))
+                .withChild(ImmutableNodes.leafNode(F, 1))
+                .withChild(ImmutableNodes.leafNode(G, 1L))
+                .withChild(ImmutableNodes.leafNode(H, BigInteger.ONE))
+                .build()));
+    }
+
+    @Test
+    public void testLeafList8() throws IOException {
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LFLST8))
+            .withChildValue(Uint8.ONE)
+            .build(),
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(LFLST8))
+                .withChildValue((short) 1)
+                .build()));
+    }
+
+    @Test
+    public void testLeafList16() throws IOException {
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LFLST16))
+            .withChildValue(Uint16.ONE)
+            .build(),
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(LFLST16))
+                .withChildValue(1)
+                .build()));
+    }
+
+    @Test
+    public void testLeafList32() throws IOException {
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LFLST32))
+            .withChildValue(Uint32.ONE)
+            .build(),
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(LFLST32))
+                .withChildValue(1L)
+                .build()));
+    }
+
+    @Test
+    public void testLeafList64() throws IOException {
+        assertEquals(ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LFLST64))
+            .withChildValue(Uint64.ONE)
+            .build(),
+            prune(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(LFLST64))
+                .withChildValue(BigInteger.ONE)
+                .build()));
+    }
+
+    private static NormalizedNode prune(final NormalizedNode node) throws IOException {
+        final var pruner = ReusableNormalizedNodePruner.forSchemaContext(CONTEXT).withUintAdaption();
+        pruner.initializeForPath(YangInstanceIdentifier.of(node.name()));
+
+        try (NormalizedNodeWriter writer = NormalizedNodeWriter.forStreamWriter(pruner)) {
+            writer.write(node);
+        }
+        pruner.close();
+        return pruner.getResult().orElseThrow();
+    }
+}
index 7cdd7839954397c440858070adb6268013124b4e..37d102d0f352e98aaf5382290a2c751091d57528 100644 (file)
@@ -5,48 +5,28 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.util;
 
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableSet;
 import java.io.InputStream;
-import java.math.BigDecimal;
-import java.math.BigInteger;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
+import java.util.List;
+import org.opendaylight.yangtools.yang.common.Decimal64;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.UnkeyedListEntryNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
 public final class TestModel {
@@ -173,163 +153,84 @@ public final class TestModel {
         return TestModel.class.getResourceAsStream(resourceName);
     }
 
-    public static SchemaContext createTestContext() {
+    public static EffectiveModelContext createTestContext() {
         return YangParserTestUtils.parseYangResources(TestModel.class, DATASTORE_TEST_YANG, DATASTORE_AUG_YANG,
             DATASTORE_TEST_NOTIFICATION_YANG);
     }
 
-    public static SchemaContext createTestContextWithoutTestSchema() {
+    public static EffectiveModelContext createTestContextWithoutTestSchema() {
         return YangParserTestUtils.parseYangResource(DATASTORE_TEST_NOTIFICATION_YANG);
     }
 
-    public static SchemaContext createTestContextWithoutAugmentationSchema() {
+    public static EffectiveModelContext createTestContextWithoutAugmentationSchema() {
         return YangParserTestUtils.parseYangResources(TestModel.class, DATASTORE_TEST_YANG,
             DATASTORE_TEST_NOTIFICATION_YANG);
     }
 
-    /**
-     * Returns a test document.
-     * <p/>
-     * <p/>
-     * <pre>
-     * test
-     *     outer-list
-     *          id 1
-     *     outer-list
-     *          id 2
-     *          inner-list
-     *                  name "one"
-     *          inner-list
-     *                  name "two"
-     *
-     * </pre>
-     */
-    public static NormalizedNode<?, ?> createDocumentOne(final SchemaContext schemaContext) {
-        return ImmutableContainerNodeBuilder
-                .create()
-                .withNodeIdentifier(
-                        new NodeIdentifier(schemaContext.getQName()))
-                .withChild(createTestContainer()).build();
-
-    }
-
-    public static DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> createBaseTestContainerBuilder() {
-        // Create a list of shoes
-        // This is to test leaf list entry
-        final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(SHOE_QNAME, "nike")).withValue("nike").build();
-
-        final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create().withNodeIdentifier(
-                new NodeWithValue<>(SHOE_QNAME, "puma")).withValue("puma").build();
-
-        final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                new NodeIdentifier(SHOE_QNAME)).withChild(nike).withChild(puma).build();
-
-        // Test a leaf-list where each entry contains an identity
-        final LeafSetEntryNode<Object> cap1 =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "capability"), DESC_QNAME))
-                        .withValue(DESC_QNAME).build();
-
-        final LeafSetNode<Object> capabilities =
-                ImmutableLeafSetNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(QName.create(
-                                        TEST_QNAME, "capability"))).withChild(cap1).build();
-
-        ContainerNode switchFeatures =
-                ImmutableContainerNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(SWITCH_FEATURES_QNAME))
-                        .withChild(capabilities).build();
-
-        // Create a leaf list with numbers
-        final LeafSetEntryNode<Object> five =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "number"), 5)).withValue(5).build();
-        final LeafSetEntryNode<Object> fifteen =
-                ImmutableLeafSetEntryNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeWithValue<>(QName.create(
-                                        TEST_QNAME, "number"), 15)).withValue(15).build();
-        final LeafSetNode<Object> numbers =
-                ImmutableLeafSetNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new NodeIdentifier(QName.create(
-                                        TEST_QNAME, "number"))).withChild(five).withChild(fifteen)
-                        .build();
-
-
-        // Create augmentations
-        MapEntryNode augMapEntry = createAugmentedListEntry(1, "First Test");
-
-        // Create a bits leaf
-        NormalizedNodeAttrBuilder<NodeIdentifier, Object, LeafNode<Object>>
-                myBits = Builders.leafBuilder()
-                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "my-bits")))
-                .withValue(ImmutableSet.of("foo", "bar"));
-
-        // Create unkeyed list entry
-        UnkeyedListEntryNode unkeyedListEntry =
-                Builders.unkeyedListEntryBuilder().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(
-                    UNKEYED_LIST_QNAME)).withChild(ImmutableNodes.leafNode(NAME_QNAME, "unkeyed-entry-name")).build();
-
+    public static DataContainerNodeBuilder<NodeIdentifier, ContainerNode> createBaseTestContainerBuilder() {
         // Create YangInstanceIdentifier with all path arg types.
-        YangInstanceIdentifier instanceID = YangInstanceIdentifier.create(
-                new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
-                new NodeIdentifierWithPredicates(QName.create(TEST_QNAME, "list-entry"),
-                        QName.create(TEST_QNAME, "key"), 10),
-                new AugmentationIdentifier(ImmutableSet.of(
-                        QName.create(TEST_QNAME, "aug1"), QName.create(TEST_QNAME, "aug2"))),
-                new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
-
-        Map<QName, Object> keyValues = new HashMap<>();
-        keyValues.put(CHILDREN_QNAME, FIRST_CHILD_NAME);
-
+        YangInstanceIdentifier instanceID = YangInstanceIdentifier.of(
+            new NodeIdentifier(QName.create(TEST_QNAME, "qname")),
+            NodeIdentifierWithPredicates.of(QName.create(TEST_QNAME, "list-entry"),
+                QName.create(TEST_QNAME, "key"), 10),
+            new NodeWithValue<>(QName.create(TEST_QNAME, "leaf-list-entry"), "foo"));
 
         // Create the document
-        return ImmutableContainerNodeBuilder
-                .create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME))
-                .withChild(myBits.build())
-                .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC))
-                .withChild(ImmutableNodes.leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
-                .withChild(ImmutableNodes.leafNode(SHORT_LEAF_QNAME, SHORT_ID))
-                .withChild(ImmutableNodes.leafNode(BYTE_LEAF_QNAME, BYTE_ID))
-                .withChild(ImmutableNodes.leafNode(TestModel.BIGINTEGER_LEAF_QNAME, BigInteger.valueOf(100)))
-                .withChild(ImmutableNodes.leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, BigDecimal.valueOf(1.2)))
-                .withChild(ImmutableNodes.leafNode(SOME_REF_QNAME, instanceID))
-                .withChild(ImmutableNodes.leafNode(MYIDENTITY_QNAME, DESC_QNAME))
-                .withChild(Builders.unkeyedListBuilder()
-                        .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(UNKEYED_LIST_QNAME))
-                        .withChild(unkeyedListEntry).build())
-                .withChild(Builders.choiceBuilder()
-                        .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TWO_THREE_QNAME))
-                        .withChild(ImmutableNodes.leafNode(TWO_QNAME, "two")).build())
-                .withChild(Builders.orderedMapBuilder()
-                        .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ORDERED_LIST_QNAME))
-                        .withValue(ImmutableList.<MapEntryNode>builder().add(
-                                mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
-                                mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()).build())
-                        .build())
-                .withChild(shoes)
-                .withChild(numbers)
-                .withChild(switchFeatures)
-                .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME).withChild(augMapEntry).build())
-                .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
-                                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
-                                .withChild(BAR_NODE).build()
-                );
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            // Create a bits leaf
+            .withChild(leafNode(QName.create(TEST_QNAME, "my-bits"), ImmutableSet.of("foo", "bar")))
+            .withChild(leafNode(DESC_QNAME, DESC))
+            .withChild(leafNode(BOOLEAN_LEAF_QNAME, ENABLED))
+            .withChild(leafNode(SHORT_LEAF_QNAME, SHORT_ID))
+            .withChild(leafNode(BYTE_LEAF_QNAME, BYTE_ID))
+            .withChild(leafNode(TestModel.BIGINTEGER_LEAF_QNAME, Uint64.valueOf(100)))
+            .withChild(leafNode(TestModel.BIGDECIMAL_LEAF_QNAME, Decimal64.valueOf("1.2").scaleTo(2)))
+            .withChild(leafNode(SOME_REF_QNAME, instanceID))
+            .withChild(leafNode(MYIDENTITY_QNAME, DESC_QNAME))
+            .withChild(ImmutableNodes.newUnkeyedListBuilder()
+                .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+                // Create unkeyed list entry
+                .withChild(ImmutableNodes.newUnkeyedListEntryBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(UNKEYED_LIST_QNAME))
+                    .withChild(leafNode(NAME_QNAME, "unkeyed-entry-name"))
+                    .build())
+                .build())
+            .withChild(ImmutableNodes.newChoiceBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TWO_THREE_QNAME))
+                .withChild(leafNode(TWO_QNAME, "two")).build())
+            .withChild(ImmutableNodes.newUserMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(ORDERED_LIST_QNAME))
+                .withValue(List.of(
+                    mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "1").build(),
+                    mapEntryBuilder(ORDERED_LIST_QNAME, ORDERED_LIST_ENTRY_QNAME, "2").build()))
+                .build())
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(SHOE_QNAME))
+                .withChildValue("nike")
+                .withChildValue("puma")
+                .build())
+            .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
+                .withChildValue(5)
+                .withChildValue(15)
+                .build())
+            .withChild(ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(SWITCH_FEATURES_QNAME))
+                // Test a leaf-list where each entry contains an identity
+                .withChild(ImmutableNodes.newSystemLeafSetBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "capability")))
+                    .withChildValue(DESC_QNAME)
+                    .build())
+                .build())
+            .withChild(mapNodeBuilder(AUGMENTED_LIST_QNAME)
+                // Create augmentations
+                .withChild(createAugmentedListEntry(1, "First Test"))
+                .build())
+            .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+                .withChild(BAR_NODE)
+                .build());
     }
 
     public static ContainerNode createTestContainer() {
@@ -337,89 +238,41 @@ public final class TestModel {
     }
 
     public static MapEntryNode createAugmentedListEntry(final int id, final String name) {
-
-        Set<QName> childAugmentations = new HashSet<>();
-        childAugmentations.add(AUG_CONT_QNAME);
-
-        ContainerNode augCont =
-                ImmutableContainerNodeBuilder
-                        .create()
-                        .withNodeIdentifier(
-                                new YangInstanceIdentifier.NodeIdentifier(AUG_CONT_QNAME))
-                        .withChild(ImmutableNodes.leafNode(AUG_NAME_QNAME, name)).build();
-
-
-        final YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier =
-                new YangInstanceIdentifier.AugmentationIdentifier(childAugmentations);
-
-        final AugmentationNode augmentationNode =
-                Builders.augmentationBuilder()
-                        .withNodeIdentifier(augmentationIdentifier).withChild(augCont)
-                        .build();
-
-        return ImmutableMapEntryNodeBuilder
-                .create()
-                .withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeIdentifierWithPredicates(
-                                AUGMENTED_LIST_QNAME, ID_QNAME, id))
-                .withChild(ImmutableNodes.leafNode(ID_QNAME, id))
-                .withChild(augmentationNode).build();
+        return ImmutableNodes.newMapEntryBuilder()
+            .withNodeIdentifier(NodeIdentifierWithPredicates.of(AUGMENTED_LIST_QNAME, ID_QNAME, id))
+            .withChild(leafNode(ID_QNAME, id))
+            .withChild(ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONT_QNAME))
+                .withChild(leafNode(AUG_NAME_QNAME, name))
+                .build())
+            .build();
     }
 
-
     public static ContainerNode createFamily() {
-        final DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode>
-            familyContainerBuilder = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeIdentifier(FAMILY_QNAME));
-
-        final CollectionNodeBuilder<MapEntryNode, MapNode> childrenBuilder =
-                mapNodeBuilder(CHILDREN_QNAME);
-
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    FIRST_GRAND_CHILD_ID);
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    SECOND_GRAND_CHILD_ID);
-
-        firstGrandChildBuilder
-                .withChild(
-                        ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME,
-                                FIRST_GRAND_CHILD_ID)).withChild(
-                ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME,
-                        FIRST_GRAND_CHILD_NAME));
-
-        secondGrandChildBuilder.withChild(
-                ImmutableNodes
-                        .leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
-                .withChild(
-                        ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME,
-                                SECOND_GRAND_CHILD_NAME));
-
-        firstChildBuilder
-                .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
-                .withChild(
-                        mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(
-                                firstGrandChildBuilder.build()).build());
-
-
-        secondChildBuilder
-                .withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
-                .withChild(
-                        mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(
-                                firstGrandChildBuilder.build()).build());
-
-        childrenBuilder.withChild(firstChildBuilder.build());
-        childrenBuilder.withChild(secondChildBuilder.build());
-
-        return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+        final var firstGrandChildBuilder = mapEntryBuilder(
+                GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+            .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME,FIRST_GRAND_CHILD_ID))
+            .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
+
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+            .withChild(ImmutableNodes.newSystemMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CHILDREN_QNAME))
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+                        .withChild(firstGrandChildBuilder.build())
+                        .build())
+                    .build())
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME)
+                        .withChild(firstGrandChildBuilder.build())
+                        .build())
+                    .build())
+                .build())
+            .build();
     }
-
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/io/ChunkedOutputStreamTest.java
new file mode 100644 (file)
index 0000000..a93b6a8
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.io;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.List;
+import org.junit.Test;
+
+public class ChunkedOutputStreamTest {
+    private static final int INITIAL_SIZE = 256;
+    private static final int MAX_ARRAY_SIZE = 256 * 1024;
+
+    private final ChunkedOutputStream stream = new ChunkedOutputStream(INITIAL_SIZE, MAX_ARRAY_SIZE);
+
+    @Test
+    public void testBasicWrite() throws IOException {
+        for (int i = 0; i < INITIAL_SIZE; ++i) {
+            stream.write(i);
+        }
+
+        final byte[] chunk = assertFinishedStream(INITIAL_SIZE, 1).get(0);
+        assertEquals(INITIAL_SIZE, chunk.length);
+        for (int i = 0; i < INITIAL_SIZE; ++i) {
+            assertEquals((byte) i, chunk[i]);
+        }
+    }
+
+    @Test
+    public void testBasicLargeWrite() throws IOException {
+        final byte[] array = createArray(INITIAL_SIZE);
+        stream.write(array);
+        final byte[] chunk = assertFinishedStream(INITIAL_SIZE, 1).get(0);
+        assertArrayEquals(array, chunk);
+    }
+
+    @Test
+    public void testGrowWrite() throws IOException {
+        for (int i = 0; i < INITIAL_SIZE * 2; ++i) {
+            stream.write(i);
+        }
+
+        final byte[] chunk = assertFinishedStream(INITIAL_SIZE * 2, 1).get(0);
+        assertEquals(INITIAL_SIZE * 2, chunk.length);
+        for (int i = 0; i < INITIAL_SIZE * 2; ++i) {
+            assertEquals((byte) i, chunk[i]);
+        }
+    }
+
+    @Test
+    public void testGrowLargeWrite() throws IOException {
+        final byte[] array = createArray(INITIAL_SIZE * 2);
+        stream.write(array);
+        final byte[] chunk = assertFinishedStream(INITIAL_SIZE * 2, 1).get(0);
+        assertArrayEquals(array, chunk);
+    }
+
+    @Test
+    public void testTwoChunksWrite() throws IOException {
+        int size = MAX_ARRAY_SIZE + 1;
+        for (int i = 0; i < size; ++i) {
+            stream.write(i);
+        }
+
+        int counter = 0;
+        for (byte[] chunk : assertFinishedStream(size, 2)) {
+            for (byte actual : chunk) {
+                assertEquals((byte) counter++, actual);
+            }
+        }
+    }
+
+    private List<byte[]> assertFinishedStream(final int expectedSize, final int expectedChunks) {
+        stream.close();
+        final ChunkedByteArray array = stream.toChunkedByteArray();
+        assertEquals(expectedSize, array.size());
+
+        final List<byte[]> chunks = array.getChunks();
+        assertEquals(expectedChunks, chunks.size());
+        return chunks;
+    }
+
+    private static byte[] createArray(final int size) {
+        final byte[] array = new byte[size];
+        for (int i = 0; i < size; ++i) {
+            array[i] = (byte) i;
+        }
+        return array;
+    }
+}
index a7abf7ca9740942285d8f14f13009ebc8ce3b58a..da5c5b6763b806540074be6b193a74ccbeb7ae98 100644 (file)
@@ -18,7 +18,6 @@ import com.google.common.base.Stopwatch;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.File;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.Arrays;
 import java.util.concurrent.TimeUnit;
 import org.junit.After;
@@ -52,7 +51,6 @@ public class FileBackedOutputStreamTest {
     @Before
     public void setup() {
         deleteTempFiles(TEMP_DIR);
-        FileBackedOutputStream.REFERENCE_CACHE.clear();
     }
 
     @After
@@ -76,8 +74,6 @@ public class FileBackedOutputStreamTest {
             assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
             assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
 
-            assertEquals("Reference cache size", 0, FileBackedOutputStream.REFERENCE_CACHE.size());
-
             fbos.cleanup();
         }
 
@@ -101,23 +97,17 @@ public class FileBackedOutputStreamTest {
             assertEquals("Temp file", tempFileName, findTempFileName(TEMP_DIR));
             assertEquals("Size", bytes.length, fbos.asByteSource().size());
 
-            InputStream inputStream = fbos.asByteSource().openStream();
-
-            assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
-
-            byte[] inBytes = new byte[bytes.length];
-            assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
-            assertArrayEquals("Read InputStream", bytes, inBytes);
-            assertEquals("End of stream", -1, inputStream.read());
+            try (var inputStream = fbos.asByteSource().openStream()) {
+                assertArrayEquals("Read bytes", bytes, fbos.asByteSource().read());
 
-            inputStream.close();
-
-            assertEquals("Reference cache size", 1, FileBackedOutputStream.REFERENCE_CACHE.size());
+                byte[] inBytes = new byte[bytes.length];
+                assertEquals("# bytes read", bytes.length, inputStream.read(inBytes));
+                assertArrayEquals("Read InputStream", bytes, inBytes);
+                assertEquals("End of stream", -1, inputStream.read());
+            }
 
             fbos.cleanup();
 
-            assertEquals("Reference cache size", 0, FileBackedOutputStream.REFERENCE_CACHE.size());
-
             assertNull("Found unexpected temp file", findTempFileName(TEMP_DIR));
         }
 
@@ -189,27 +179,27 @@ public class FileBackedOutputStreamTest {
         fail("Temp file was not deleted");
     }
 
-    static String findTempFileName(String dirPath) {
+    static String findTempFileName(final String dirPath) {
         String[] files = new File(dirPath).list();
         assertNotNull(files);
         assertTrue("Found more than one temp file: " + Arrays.toString(files), files.length < 2);
         return files.length == 1 ? files[0] : null;
     }
 
-    static boolean deleteFile(String file) {
+    static boolean deleteFile(final String file) {
         return new File(file).delete();
     }
 
-    static void deleteTempFiles(String path) {
+    static void deleteTempFiles(final String path) {
         String[] files = new File(path).list();
         if (files != null) {
-            for (String file: files) {
+            for (String file : files) {
                 deleteFile(path + File.separator + file);
             }
         }
     }
 
-    static void createDir(String path) {
+    static void createDir(final String path) {
         File dir = new File(path);
         if (!dir.exists() && !dir.mkdirs()) {
             throw new RuntimeException("Failed to create temp dir " + path);
index 0d6f5811238166a4727bb996252e8aed0fbd7a3c..71e1866f3268e4eca84c2b0ca06c8c2d609f41be 100644 (file)
@@ -46,7 +46,6 @@ public class SharedFileBackedOutputStreamTest {
     @Before
     public void setup() {
         FileBackedOutputStreamTest.deleteTempFiles(TEMP_DIR);
-        FileBackedOutputStream.REFERENCE_CACHE.clear();
     }
 
     @After
index 4441857f39c7fab27cd357f34ae5bcb06b993922..af0027bdfecbd0b36fd02dfa107ede79c3b8e53d 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -22,7 +22,7 @@ public class AbortSlicingTest {
     @Test
     public void testSerialization() {
         AbortSlicing expected = new AbortSlicing(new StringIdentifier("test"));
-        AbortSlicing cloned = (AbortSlicing) SerializationUtils.clone(expected);
+        AbortSlicing cloned = SerializationUtils.clone(expected);
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
     }
 }
index 17b54a045dfb74d9437e41606c97d4492fa9f3e5..128a0442e3d4112b4a14df7b9c6da061d3285eec 100644 (file)
@@ -105,7 +105,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
 
             final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
             assertFailedMessageSliceReply(reply, IDENTIFIER, false);
-            assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+            assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
 
             assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
             verify(mockFiledBackedStream).cleanup();
@@ -130,7 +130,7 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
 
             final MessageSliceReply reply = testProbe.expectMsgClass(MessageSliceReply.class);
             assertFailedMessageSliceReply(reply, IDENTIFIER, false);
-            assertEquals("Failure cause", mockFailure, reply.getFailure().get().getCause());
+            assertEquals("Failure cause", mockFailure, reply.getFailure().orElseThrow().getCause());
 
             assertFalse("MessageAssembler did not remove state for " + identifier, assembler.hasState(identifier));
             verify(mockFiledBackedStream).cleanup();
@@ -173,11 +173,11 @@ public class MessageAssemblerTest extends AbstractMessagingTest {
         }
     }
 
-    private MessageAssembler newMessageAssembler(String logContext) {
+    private MessageAssembler newMessageAssembler(final String logContext) {
         return newMessageAssemblerBuilder(logContext).build();
     }
 
-    private Builder newMessageAssemblerBuilder(String logContext) {
+    private Builder newMessageAssemblerBuilder(final String logContext) {
         return MessageAssembler.builder().fileBackedStreamFactory(mockFiledBackedStreamFactory)
                 .assembledMessageCallback(mockAssembledMessageCallback).logContext(logContext);
     }
index 9c80033b92014bd08b43a4bbdfd4a321fc5dd780..c5dbcdd13cdf084493142859e85fdeefb334c42e 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.messaging;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -22,7 +22,7 @@ public class MessageSliceIdentifierTest {
     @Test
     public void testSerialization() {
         MessageSliceIdentifier expected = new MessageSliceIdentifier(new StringIdentifier("test"), 123L);
-        MessageSliceIdentifier cloned = (MessageSliceIdentifier) SerializationUtils.clone(expected);
+        MessageSliceIdentifier cloned = SerializationUtils.clone(expected);
         assertEquals("cloned", expected, cloned);
         assertEquals("getClientIdentifier", expected.getClientIdentifier(), cloned.getClientIdentifier());
         assertEquals("getSlicerId", expected.getSlicerId(), cloned.getSlicerId());
index 51c4479119f40618b0e15cc3bfd0744854569282..8b661f68d7e9f5275666a645edbd9f603753b42b 100644 (file)
@@ -16,7 +16,7 @@ import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -48,7 +48,7 @@ public class MessageSliceReplyTest {
     private void testSuccess() {
         MessageSliceReply expected = MessageSliceReply.success(new StringIdentifier("test"), 3,
                 TestProbe.apply(actorSystem).ref());
-        MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+        MessageSliceReply cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
@@ -59,15 +59,15 @@ public class MessageSliceReplyTest {
     private void testFailure() {
         MessageSliceReply expected = MessageSliceReply.failed(new StringIdentifier("test"),
                 new MessageSliceException("mock", true), TestProbe.apply(actorSystem).ref());
-        MessageSliceReply cloned = (MessageSliceReply) SerializationUtils.clone(expected);
+        MessageSliceReply cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
         assertEquals("getSendTo", expected.getSendTo(), cloned.getSendTo());
         assertTrue("getFailure present", cloned.getFailure().isPresent());
-        assertEquals("getFailure message", expected.getFailure().get().getMessage(),
-                cloned.getFailure().get().getMessage());
-        assertEquals("getFailure isRetriable", expected.getFailure().get().isRetriable(),
-                cloned.getFailure().get().isRetriable());
+        assertEquals("getFailure message", expected.getFailure().orElseThrow().getMessage(),
+                cloned.getFailure().orElseThrow().getMessage());
+        assertEquals("getFailure isRetriable", expected.getFailure().orElseThrow().isRetriable(),
+                cloned.getFailure().orElseThrow().isRetriable());
     }
 }
index dc2e6de9d75a2fef8dff83b962751fd9008beb6b..afb764091ca43a63281e8b0db8c3d947905e3c51 100644 (file)
@@ -15,7 +15,7 @@ import akka.actor.ExtendedActorSystem;
 import akka.serialization.JavaSerializer;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -35,7 +35,7 @@ public class MessageSliceTest {
 
     @After
     public void tearDown() {
-        TestKit.shutdownActorSystem(actorSystem, Boolean.TRUE);
+        TestKit.shutdownActorSystem(actorSystem, true);
     }
 
     @Test
@@ -50,7 +50,7 @@ public class MessageSliceTest {
 
         MessageSlice expected = new MessageSlice(new StringIdentifier("test"), data, 2, 3, 54321,
                 TestProbe.apply(actorSystem).ref());
-        MessageSlice cloned = (MessageSlice) SerializationUtils.clone(expected);
+        MessageSlice cloned = SerializationUtils.clone(expected);
 
         assertEquals("getIdentifier", expected.getIdentifier(), cloned.getIdentifier());
         assertEquals("getSliceIndex", expected.getSliceIndex(), cloned.getSliceIndex());
index 61c06177991613d2013f2b1b4d0c96fc9e901664..63b37e12b0bc436708d4a9e84de0eb6998022400 100644 (file)
@@ -328,7 +328,7 @@ public class MessageSlicingIntegrationTest {
         assertEquals("Identifier", identifier, ((MessageSliceIdentifier)reply.getIdentifier())
                 .getClientIdentifier());
         assertEquals("Failure present", Boolean.TRUE, reply.getFailure().isPresent());
-        assertEquals("isRetriable", isRetriable, reply.getFailure().get().isRetriable());
+        assertEquals("isRetriable", isRetriable, reply.getFailure().orElseThrow().isRetriable());
     }
 
     static void assertMessageSlice(final MessageSlice sliceMessage, final Identifier identifier, final int sliceIndex,
index 7b70063afa9fad2c16fbb296214c958b8673cf59..5fb60e64b0b23546b684bb604c21845f133d1a3c 100644 (file)
@@ -13,7 +13,7 @@ import java.io.File;
 import java.io.IOException;
 import org.apache.commons.io.FileUtils;
 import org.junit.runner.RunWith;
-import org.scalatest.junit.JUnitRunner;
+import org.scalatestplus.junit.JUnitRunner;
 
 /**
  * Tests the LocalSnapshotStore using akka's standard test suite for snapshot store plugins via SnapshotStoreSpec.
index 7115dd84756e3b3ef64fec3679cb4645e172bd52..c9ab83e762977a5814d838e243c238b63ed87aa7 100644 (file)
@@ -32,17 +32,17 @@ import com.typesafe.config.ConfigFactory;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.net.URLEncoder;
 import java.nio.charset.StandardCharsets;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.MockitoAnnotations;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
 import scala.Option;
 
 /**
@@ -51,6 +51,7 @@ import scala.Option;
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class LocalSnapshotStoreTest {
     private static final String PERSISTENCE_ID = "member-1-shard-default-config";
     private static final String PREFIX_BASED_SHARD_PERSISTENCE_ID = "member-1-shard-id-ints!-config";
@@ -74,7 +75,6 @@ public class LocalSnapshotStoreTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
         cleanSnapshotDir();
     }
 
@@ -195,9 +195,7 @@ public class LocalSnapshotStoreTest {
         }
     }
 
-    private static String toSnapshotName(final String persistenceId, final int seqNr, final int timestamp)
-            throws UnsupportedEncodingException {
-        final String encodedPersistenceId = URLEncoder.encode(persistenceId, StandardCharsets.UTF_8.name());
-        return "snapshot-" + encodedPersistenceId + "-" + seqNr + "-" + timestamp;
+    private static String toSnapshotName(final String persistenceId, final int seqNr, final int timestamp) {
+        return "snapshot-" + URLEncoder.encode(persistenceId, StandardCharsets.UTF_8) + "-" + seqNr + "-" + timestamp;
     }
 }
index 0b41cdfa4ed8ea300b47222feb855c7fc49b7398..611bebfd61e6330edc29efb31664406e56edb002 100644 (file)
@@ -5,68 +5,61 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 
 import akka.dispatch.ExecutionContexts;
 import akka.dispatch.Futures;
-import com.google.common.io.ByteSource;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.io.CharSource;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.io.IOException;
+import java.util.concurrent.ExecutionException;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.schema.provider.RemoteYangTextSourceProvider;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 public class RemoteSchemaProviderTest {
-
-    private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+    private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
 
     private RemoteSchemaProvider remoteSchemaProvider;
     private RemoteYangTextSourceProvider mockedRemoteSchemaRepository;
 
     @Before
     public void setUp() {
-        mockedRemoteSchemaRepository = Mockito.mock(RemoteYangTextSourceProvider.class);
-        ExecutionContexts.fromExecutorService(MoreExecutors.newDirectExecutorService());
+        mockedRemoteSchemaRepository = mock(RemoteYangTextSourceProvider.class);
         remoteSchemaProvider = new RemoteSchemaProvider(mockedRemoteSchemaRepository,
-                ExecutionContexts.fromExecutorService(MoreExecutors.newDirectExecutorService()));
+                ExecutionContexts.fromExecutor(MoreExecutors.directExecutor()));
     }
 
     @Test
-    public void getExistingYangTextSchemaSource() throws IOException, SchemaSourceException {
-        String source = "Test";
-        YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(
-                ID, ByteSource.wrap(source.getBytes()));
-        YangTextSchemaSourceSerializationProxy sourceProxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
-        Mockito.when(mockedRemoteSchemaRepository.getYangTextSchemaSource(ID))
-            .thenReturn(Futures.successful(sourceProxy));
+    public void getExistingYangTextSchemaSource() throws IOException, InterruptedException, ExecutionException {
+        final var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test"));
+        doReturn(Futures.successful(new YangTextSchemaSourceSerializationProxy(schemaSource)))
+            .when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
 
-        YangTextSchemaSource providedSource = remoteSchemaProvider.getSource(ID).checkedGet();
-        assertEquals(providedSource.getIdentifier(), ID);
-        assertArrayEquals(providedSource.read(), schemaSource.read());
+        final var providedSource = remoteSchemaProvider.getSource(ID).get();
+        assertEquals(ID, providedSource.sourceId());
+        assertEquals(schemaSource.read(), providedSource.read());
     }
 
-    @Test(expected = SchemaSourceException.class)
-    public void getNonExistingSchemaSource() throws Exception {
-        Futures.failed(new Exception("halo"));
-
-        Mockito.when(mockedRemoteSchemaRepository.getYangTextSchemaSource(ID)).thenReturn(
-                Futures.failed(
-                        new SchemaSourceException("Source not provided")));
+    @Test
+    public void getNonExistingSchemaSource() throws InterruptedException {
+        final var exception = new SchemaSourceException(ID, "Source not provided");
+        doReturn(Futures.failed(exception)).when(mockedRemoteSchemaRepository).getYangTextSchemaSource(ID);
 
-        CheckedFuture<?, ?> sourceFuture = remoteSchemaProvider.getSource(ID);
+        final var sourceFuture = remoteSchemaProvider.getSource(ID);
         assertTrue(sourceFuture.isDone());
-        sourceFuture.checkedGet();
+
+        final var cause = assertThrows(ExecutionException.class, sourceFuture::get).getCause();
+        assertSame(exception, cause);
     }
 }
index 1fda858fdd59a5c8c60ebfb0077c6145aa07074a..a63be0aa00e752d44d3704b8d5c30d8105cfcb10 100644 (file)
@@ -7,75 +7,76 @@
  */
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
 
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import com.google.common.util.concurrent.Futures;
 import java.util.Collections;
 import java.util.Set;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaRepository;
 import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
-import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 import scala.concurrent.Await;
-import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class RemoteYangTextSourceProviderImplTest {
+    private static final SourceIdentifier ID = new SourceIdentifier("Test", "2015-10-30");
 
-    private static final SourceIdentifier ID = RevisionSourceIdentifier.create("Test", Revision.of("2015-10-30"));
+    @Mock
+    private SchemaRepository mockedLocalRepository;
 
     private RemoteYangTextSourceProviderImpl remoteRepository;
-    private SchemaRepository mockedLocalRepository;
     private final Set<SourceIdentifier> providedSources = Collections.singleton(ID);
 
     @Before
     public void setUp() {
-        mockedLocalRepository = Mockito.mock(SchemaRepository.class);
-
         remoteRepository = new RemoteYangTextSourceProviderImpl(mockedLocalRepository, providedSources);
     }
 
     @Test
     public void testGetExistingYangTextSchemaSource() throws Exception {
-        String source = "Test source.";
-        YangTextSchemaSource schemaSource = YangTextSchemaSource.delegateForByteSource(
-                ID, ByteSource.wrap(source.getBytes()));
-        Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
-                Futures.immediateFuture(schemaSource));
+        var schemaSource = new DelegatedYangTextSource(ID, CharSource.wrap("Test source."));
+
+        doReturn(Futures.immediateFuture(schemaSource)).when(mockedLocalRepository)
+            .getSchemaSource(ID, YangTextSource.class);
 
-        Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
-                remoteRepository.getYangTextSchemaSource(ID);
+        var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
         assertTrue(retrievedSourceFuture.isCompleted());
-        YangTextSchemaSource resultSchemaSource = Await.result(retrievedSourceFuture,
-                FiniteDuration.Zero()).getRepresentation();
-        assertEquals(resultSchemaSource.getIdentifier(), schemaSource.getIdentifier());
-        assertArrayEquals(resultSchemaSource.read(), schemaSource.read());
+        var resultSchemaSource = Await.result(retrievedSourceFuture, FiniteDuration.Zero()).getRepresentation();
+        assertEquals(resultSchemaSource.sourceId(), schemaSource.sourceId());
+        assertEquals(resultSchemaSource.read(), schemaSource.read());
     }
 
-    @Test(expected = SchemaSourceException.class)
+    @Test
     public void testGetNonExistentYangTextSchemaSource() throws Exception {
-        Mockito.when(mockedLocalRepository.getSchemaSource(ID, YangTextSchemaSource.class)).thenReturn(
-                Futures.immediateFailedFuture(new SchemaSourceException("Source is not provided")));
+        final var exception = new SchemaSourceException(ID, "Source is not provided");
+
+        doReturn(Futures.immediateFailedFuture(exception)).when(mockedLocalRepository)
+            .getSchemaSource(ID, YangTextSource.class);
 
-        Future<YangTextSchemaSourceSerializationProxy> retrievedSourceFuture =
-                remoteRepository.getYangTextSchemaSource(ID);
+        var retrievedSourceFuture = remoteRepository.getYangTextSchemaSource(ID);
         assertTrue(retrievedSourceFuture.isCompleted());
-        Await.result(retrievedSourceFuture, FiniteDuration.Zero());
+
+        final var ex = assertThrows(SchemaSourceException.class,
+            () -> Await.result(retrievedSourceFuture, FiniteDuration.Zero()));
+        assertSame(ex, exception);
     }
 
     @Test
     public void testGetProvidedSources() throws Exception {
-        Set<SourceIdentifier> remoteProvidedSources = Await.result(remoteRepository
-                .getProvidedSources(), FiniteDuration.Zero());
+        var remoteProvidedSources = Await.result(remoteRepository.getProvidedSources(), FiniteDuration.Zero());
         assertEquals(providedSources, remoteProvidedSources);
     }
-
 }
index b2a86e1c0ceef3b1c19f06bc320b7c1911b9c785..ced954640cd142fbb4f01b63231a61eb37162801 100644 (file)
@@ -5,61 +5,55 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.schema.provider.impl;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 
-import com.google.common.io.ByteSource;
+import com.google.common.io.CharSource;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
-import java.nio.charset.StandardCharsets;
 import org.junit.Before;
 import org.junit.Test;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.repo.api.RevisionSourceIdentifier;
-import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.api.source.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.source.YangTextSource;
+import org.opendaylight.yangtools.yang.model.spi.source.DelegatedYangTextSource;
 
 public class YangTextSourceSerializationProxyTest {
-
-    private YangTextSchemaSource schemaSource;
+    private YangTextSource schemaSource;
 
     @Before
     public void setUp() {
-        String source = "Test source.";
-        schemaSource = YangTextSchemaSource.delegateForByteSource(
-                RevisionSourceIdentifier.create("test", Revision.of("2015-10-30")),
-                ByteSource.wrap(source.getBytes(StandardCharsets.UTF_8)));
+        schemaSource = new DelegatedYangTextSource(new SourceIdentifier("test", "2015-10-30"),
+            CharSource.wrap("Test source."));
     }
 
-
     @Test
-    public void serializeAndDesrializeProxy() throws ClassNotFoundException, IOException {
-        YangTextSchemaSourceSerializationProxy proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
+    public void serializeAndDeserializeProxy() throws ClassNotFoundException, IOException {
+        final var proxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
         ObjectOutputStream oos = new ObjectOutputStream(bos);
 
         oos.writeObject(proxy);
 
-        ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()));
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(323, bytes.length);
+
+        ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes));
 
-        YangTextSchemaSourceSerializationProxy deserializedProxy =
-                (YangTextSchemaSourceSerializationProxy) ois.readObject();
+        final var deserializedProxy = (YangTextSchemaSourceSerializationProxy) ois.readObject();
 
-        assertEquals(deserializedProxy.getRepresentation().getIdentifier(), proxy.getRepresentation().getIdentifier());
-        assertArrayEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
+        assertEquals(deserializedProxy.getRepresentation().sourceId(), proxy.getRepresentation().sourceId());
+        assertEquals(deserializedProxy.getRepresentation().read(), proxy.getRepresentation().read());
     }
 
     @Test
     public void testProxyEqualsBackingYangTextSource() throws IOException {
-        YangTextSchemaSourceSerializationProxy serializationProxy =
-                new YangTextSchemaSourceSerializationProxy(schemaSource);
+        final var serializationProxy = new YangTextSchemaSourceSerializationProxy(schemaSource);
 
-        assertEquals(serializationProxy.getRepresentation().getIdentifier(), schemaSource.getIdentifier());
-        assertArrayEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
+        assertEquals(serializationProxy.getRepresentation().sourceId(), schemaSource.sourceId());
+        assertEquals(serializationProxy.getRepresentation().read(), schemaSource.read());
     }
 }
index 83b1ece6f21b56410da628c28fecebb3c682f50d..ece191671c7f7782acbe8cd54ec7380230eaa64a 100644 (file)
@@ -3,5 +3,6 @@ akka {
       snapshot-store.local.class = "org.opendaylight.controller.cluster.persistence.LocalSnapshotStore"
       snapshot-store.plugin = akka.persistence.snapshot-store.local
       snapshot-store.local.dir = "target/snapshots"
+      snapshot-store.local.use-lz4-compression = false
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/resources/odl-ctlr1923.yang b/opendaylight/md-sal/sal-clustering-commons/src/test/resources/odl-ctlr1923.yang
new file mode 100644 (file)
index 0000000..9ad4f72
--- /dev/null
@@ -0,0 +1,43 @@
+module odl-ctrl1923 {
+  yang-version 1;
+  namespace "urn:odl-ctlr1923";
+  prefix "ctrl1923";
+
+  description "Sodium -> Magnesium uint translation";
+
+  grouping grp {
+    leaf a { type int8; }
+    leaf b { type int16; }
+    leaf c { type int32; }
+    leaf d { type int64; }
+    leaf e { type uint8; }
+    leaf f { type uint16; }
+    leaf g { type uint32; }
+    leaf h { type uint64; }
+  }
+
+  container cont {
+    uses grp;
+  }
+
+  list lst {
+    key "a b c d e f g h";
+    uses grp;
+  }
+
+  leaf-list lf-lst8 {
+    type uint8;
+  }
+
+  leaf-list lf-lst16 {
+    type uint16;
+  }
+
+  leaf-list lf-lst32 {
+    type uint32;
+  }
+
+  leaf-list lf-lst64 {
+    type uint64;
+  }
+}
index 52c413f673baeddd42e58627a69dabae39b666c9..4c6291230764d62531a6c194c435c11d59beac65 100644 (file)
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-clustering-config</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>jar</packaging>
   <description>Configuration files for md-sal clustering</description>
 
index a51e09f0ecfc283353d88931722cd9a353614d2f..0a71fd416b1a5d03c30db14e6bb70a0c889674c2 100644 (file)
@@ -3,29 +3,26 @@ odl-cluster-data {
   akka {
     remote {
       artery {
-        enabled = off
+        enabled = on
+        transport = tcp
         canonical.hostname = "127.0.0.1"
         canonical.port = 2550
       }
-      netty.tcp {
-        hostname = "127.0.0.1"
-        port = 2550
-      }
-      # when under load we might trip a false positive on the failure detector
-      # transport-failure-detector {
-        # heartbeat-interval = 4 s
-        # acceptable-heartbeat-pause = 16s
-      # }
     }
 
     cluster {
-      # Remove ".tcp" when using artery.
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
+      # Using artery.
+      seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550"]
 
       roles = [
         "member-1"
       ]
 
+      # when under load we might trip a false positive on the failure detector
+      # failure-detector {
+        # heartbeat-interval = 4 s
+        # acceptable-heartbeat-pause = 16s
+      # }
     }
 
     persistence {
@@ -34,16 +31,12 @@ odl-cluster-data {
       # The relative path is always relative to KARAF_HOME.
 
       # snapshot-store.local.dir = "target/snapshots"
-      # journal.leveldb.dir = "target/journal"
 
-      journal {
-        leveldb {
-          # Set native = off to use a Java-only implementation of leveldb.
-          # Note that the Java-only version is not currently considered by Akka to be production quality.
-
-          # native = off
-        }
-      }
+      # Use lz4 compression for LocalSnapshotStore snapshots
+      snapshot-store.local.use-lz4-compression = false
+      # Size of blocks for lz4 compression: 64KB, 256KB, 1MB or 4MB
+      snapshot-store.local.lz4-blocksize = 256KB
     }
+    disable-default-actor-system-quarantined-event-handling = "false"
   }
 }
index 250a8149ff1e0b77e6f203c275beb473c03d8c2e..b03a4a114e0f87a8cd06c913ebce9cb2ec4fb0fc 100644 (file)
@@ -4,6 +4,15 @@
 # the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
 # "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
 # specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+# Overall flexibility goes even further, as these overrides can also be combined with per-shard specification,
+# so that you can also use:
+#   <shard-name>.<setting>
+#   <datastore-name>.<shard-name>.<setting>
+# for example:
+#   topology.shard-election-timeout-factor=2             # override both config/operational for topology shard
+#   config.topology.shard-election-timeout-factor=5      # override config for topology shard
+#   operational.topology.shard-election-timeout-factor=7 # override operational for topology shard
+#
 
 # The multiplication factor to be used to determine shard election timeout. The shard election timeout
 # is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
@@ -44,8 +53,13 @@ operational.persistent=false
 #shard-snapshot-batch-count=20000
 
 # The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+# Disabled, if direct threshold is enabled.
 #shard-snapshot-data-threshold-percentage=12
 
+# The max size of in-memory journal(in MB), after reaching the limit, snapshot will be taken. Should be not less then 1.
+# If set to 0, direct threshold is disabled and percentage is used instead.
+#shard-snapshot-data-threshold=0
+
 # The interval at which the leader of the shard will check if its majority followers are active and
 # term itself as isolated.
 #shard-isolated-leader-check-interval-in-millis=5000
@@ -63,18 +77,6 @@ operational.persistent=false
 # measures the latency for a commit and auto-adjusts the rate limit.
 #transaction-creation-initial-rate-limit=100
 
-# The maximum thread pool size for each shard's data store data change notification executor.
-#max-shard-data-change-executor-pool-size=20
-
-# The maximum queue size for each shard's data store data change notification executor.
-#max-shard-data-change-executor-queue-size=1000
-
-# The maximum queue size for each shard's data store data change listener.
-#max-shard-data-change-listener-queue-size=1000
-
-# The maximum queue size for each shard's data store executor.
-#max-shard-data-store-executor-queue-size=5000
-
 # A fully qualified java class name. The class should implement
 # org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be
 # accessible to the distributed data store OSGi module so that it can be dynamically loaded via
@@ -84,12 +86,8 @@ operational.persistent=false
 #custom-raft-policy-implementation=
 
 # When fragmenting messages thru the akka remoting framework, this is the maximum size in bytes
-# for a message slice.
-#maximum-message-slice-size=20480000
-
-# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol
-# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use ask-based protocol).
-#use-tell-based-protocol=true
+# for a message slice. This needs to be below Akka's maximum-frame-size and defaults to 480KiB.
+maximum-message-slice-size=491520
 
 # Tune the maximum number of entries a follower is allowed to lag behind the leader before it is
 # considered out-of-sync. This flag may require tuning in face of a large number of small transactions.
@@ -99,3 +97,38 @@ operational.persistent=false
 # the stack trace of the creator of the Tx when there is an exception when the transaction is submitted 
 # (e.g. for a failed validation).  Defaults to false due to performance impact.
 #transaction-debug-context-enabled=true
+
+# Multiplicator of shard-leader-election-timeout-in-seconds for the purposes of initial datastore
+# convergence. Each frontend datastore instance will wait specified amount of time before becoming
+# exposed as a service. A value of 0 indicates waiting forever. Defaults to 3.
+#initial-settle-timeout-multiplier=3
+
+#Interval after which a snapshot should be taken during the recovery process.
+#recovery-snapshot-interval-seconds=0
+
+# Option to take a snapshot when the entire DataTree root or top-level container is overwritten
+snapshot-on-root-overwrite=false
+
+# Enable lz4 compression for snapshots sent from leader to followers
+#use-lz4-compression=true
+
+# Export snapshot and journal content after recovery, possible modes: off, json
+#
+# Journal Json structure:
+#       Entries : [
+#            Entry : [
+#                Node: [
+#                   Path : {},
+#                   ModificationType : {},
+#                   Data : {}
+#                ]
+#            ]
+#        ]
+#
+# Snapshot Json structure:
+#       RootNode : {}
+#
+export-on-recovery=off
+
+# Directory name for export files
+#recovery-export-base-dir=persistence-export
index 34b309b0b162d77f905bfede823f34f05711cd47..9834e08ea89b8776d0521fd3db15ebd210d0e6ad 100644 (file)
@@ -56,6 +56,11 @@ odl-cluster-data {
     loggers = ["akka.event.slf4j.Slf4jLogger"]
     logger-startup-timeout = 300s
 
+    # JFR requires boot delegation, which we do not have by default
+    java-flight-recorder {
+      enabled = false
+    }
+
     actor {
       warn-about-java-serializer-usage = off
       provider = "akka.cluster.ClusterActorRefProvider"
@@ -86,17 +91,23 @@ odl-cluster-data {
     }
     remote {
       log-remote-lifecycle-events = off
+      # Disable passive connections, as we are seeing issues
+      # with read-only associations
+      use-passive-connections = off
 
-      netty.tcp {
+      classic.netty.tcp {
         maximum-frame-size = 419430400
         send-buffer-size = 52428800
         receive-buffer-size = 52428800
       }
 
       artery {
+        enabled = on
+        transport = tcp
+
         advanced {
-          #maximum-frame-size = 256 KiB
-          #maximum-large-frame-size = 2 MiB
+          maximum-frame-size = 512 KiB
+          maximum-large-frame-size = 2 MiB
         }
       }
     }
@@ -110,24 +121,40 @@ odl-cluster-data {
       # This is crucial for correct behavior if you use Cluster Singleton or Cluster Sharding,
       # especially together with Akka Persistence.
 
-      #auto-down-unreachable-after = 30s
-
       allow-weakly-up-members = on
 
       use-dispatcher = cluster-dispatcher
 
       failure-detector.acceptable-heartbeat-pause = 3 s
+
+      distributed-data {
+        # How often the Replicator should send out gossip information.
+        # This value controls how quickly Entity Ownership Service data is replicated
+        # across cluster nodes.
+        gossip-interval = 100 ms
+
+        # How often the subscribers will be notified of changes, if any.
+        # This value controls how quickly Entity Ownership Service decisions are
+        # propagated within a node.
+        notify-subscribers-interval = 20 ms
+      }
+
+      downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
+
+      split-brain-resolver {
+        active-strategy = keep-majority
+        stable-after = 7s
+      }
     }
 
     persistence {
       journal {
-        plugin = akka.persistence.journal.leveldb
-
-        # The following activates the alternative segmented file journal. Each persistent actor
+        # The following activates the default segmented file journal. Each persistent actor
         # is stored in a separate directory, with multiple segment files. Segments are removed
-        # when they are not longer required.
+        # when they are no longer required.
         #
-        # plugin = akka.persistence.journal.segmented-file
+        plugin = akka.persistence.journal.segmented-file
+
         segmented-file {
           class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
           # Root directory for segmented journal storage
@@ -136,6 +163,30 @@ odl-cluster-data {
           max-entry-size = 16M
           # Maximum size of a segment
           max-segment-size = 128M
+          # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+          # Set to <= 0 to flush immediately.
+          #max-unflushed-bytes = 1M
+          # Map each segment into memory. Defaults to true, use false to keep a heap-based
+          # buffer instead.
+          memory-mapped = true
+        }
+      }
+
+      # Journal configuration for shards that have persistence turned off. They still need to have a journal plugin
+      # configured, since they still need to store things in the journal occasionally, but having larger segment sizes
+      # would be wastefull.
+      non-persistent {
+        journal {
+          class = "org.opendaylight.controller.akka.segjournal.SegmentedFileJournal"
+          # Root directory for segmented journal storage
+          root-directory = "segmented-journal"
+          # Maximum size of a single entry in the segmented journal
+          max-entry-size = 512K
+          # Maximum size of a segment
+          max-segment-size = 1M
+          # Maximum number of bytes that are written without synchronizing storage. Defaults to max-entry-size.
+          # Set to <= 0 to flush immediately.
+          #max-unflushed-bytes = 128K
           # Map each segment into memory. Note that while this can improve performance,
           # it will also place additional burden on system resources.
           memory-mapped = false
diff --git a/opendaylight/md-sal/sal-common-api/pom.xml b/opendaylight/md-sal/sal-common-api/pom.xml
deleted file mode 100644 (file)
index 6ce5b18..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <version>1.10.0-SNAPSHOT</version>
-  <artifactId>sal-common-api</artifactId>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>concepts</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-common-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.commons</groupId>
-      <artifactId>commons-lang3</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-</project>
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFuture.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFuture.java
deleted file mode 100644 (file)
index 7c0a777..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.util.concurrent.AbstractCheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.function.Function;
-
-/**
- * An implementation of CheckedFuture that provides similar behavior for the <code>get</code> methods
- * that the <code>checkedGet</code> methods provide.
- *
- * <p>For {@link CancellationException} and {@link InterruptedException}, the specified exception mapper
- * is invoked to translate them to the checked exception type.
- *
- * <p>For {@link ExecutionException}, the mapper is invoked to translate the cause to the checked exception
- * and a new ExecutionException is thrown with the translated cause.
- *
- * @author Thomas Pantelis
- *
- * @param <V> The result type returned by this Future's get method
- * @param <X> The checked exception type
- */
-public final class MappingCheckedFuture<V, X extends Exception> extends AbstractCheckedFuture<V, X> {
-    private final Function<Exception, X> mapper;
-
-    private MappingCheckedFuture(final ListenableFuture<V> delegate, final Function<Exception, X> mapper) {
-        super(delegate);
-        this.mapper = requireNonNull(mapper);
-    }
-
-    /**
-     * Creates a new <code>MappingCheckedFuture</code> that wraps the given {@link ListenableFuture}
-     * delegate.
-     *
-     * @param delegate the {@link ListenableFuture} to wrap
-     * @param mapper the mapping {@link Function} used to translate exceptions from the delegate
-     * @return a new <code>MappingCheckedFuture</code>
-     */
-    public static <V, X extends Exception> MappingCheckedFuture<V, X> create(
-            final ListenableFuture<V> delegate, final Function<Exception, X> mapper) {
-        return new MappingCheckedFuture<>(delegate, mapper);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:parameterName")
-    protected X mapException(final Exception e) {
-        return mapper.apply(e);
-    }
-
-    private ExecutionException wrapInExecutionException(final String message, final Exception ex) {
-        return new ExecutionException(message, mapException(ex));
-    }
-
-    @Override
-    public V get() throws InterruptedException, ExecutionException {
-        try {
-            return super.get();
-        } catch (final InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw wrapInExecutionException("Operation was interrupted", e);
-        } catch (final CancellationException e) {
-            throw wrapInExecutionException("Operation was cancelled", e);
-        } catch (final ExecutionException e) {
-            throw wrapInExecutionException(e.getMessage(), e);
-        }
-    }
-
-    @Override
-    public V get(final long timeout, final TimeUnit unit)
-            throws InterruptedException, ExecutionException, TimeoutException {
-        try {
-            return super.get(timeout, unit);
-        } catch (final InterruptedException e) {
-            Thread.currentThread().interrupt();
-            throw wrapInExecutionException("Operation was interrupted", e);
-        } catch (final CancellationException e) {
-            throw wrapInExecutionException("Operation was cancelled", e);
-        } catch (final ExecutionException e) {
-            throw wrapInExecutionException(e.getMessage(), e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/RegistrationListener.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/RegistrationListener.java
deleted file mode 100644 (file)
index 3b3217d..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api;
-
-import java.util.EventListener;
-
-import org.opendaylight.yangtools.concepts.Registration;
-
-public interface RegistrationListener<T extends Registration> extends EventListener {
-
-    void onRegister(T registration);
-
-    void onUnregister(T registration);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCohort.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCohort.java
deleted file mode 100644 (file)
index 6f81e94..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Three phase Commit Cohort for subtree, which is uniquely associated with user submitted transaction.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncConfigurationCommitCohort<P extends Path<P>, D> {
-
-    /**
-     * Initiates a pre-commit of associated request
-     *
-     * <p>
-     * Implementation MUST NOT do any blocking calls during this callback, all
-     * pre-commit preparation SHOULD happen asynchronously and MUST result in
-     * completing returned future object.
-     *
-     * @param rebasedTransaction
-     *            Read-only view of transaction as if happened on top of actual
-     *            data store
-     * @return Future which is completed once pre-commit phase for this request
-     *         is finished.
-     */
-    ListenableFuture<Void> preCommit(AsyncReadTransaction<P, D> rebasedTransaction);
-
-    /**
-     * Initiates a commit phase of associated request
-     *
-     * <p>
-     * Implementation MUST NOT do any blocking calls during this callback, all
-     * commit finalization SHOULD happen asynchronously and MUST result in
-     * completing returned future object.
-     *
-     * @return Future which is completed once commit phase for associated
-     *         request is finished.
-     */
-    ListenableFuture<Void> commit();
-
-    /**
-     * Initiates abort phase of associated request
-     *
-     * <p>
-     * Implementation MUST NOT do any blocking calls during this callback, all
-     * commit finalization SHOULD happen asynchronously and MUST result in
-     * completing returned future object.
-     *
-     * @return Future which is completed once commit phase for associated
-     *         request is finished.
-     */
-    ListenableFuture<Void> abort();
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCoordinator.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitCoordinator.java
deleted file mode 100644 (file)
index 4cc608c..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Three Phase Commit Coordinator with support of user-supplied commit cohorts
- * which participates in three-phase commit protocols.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncConfigurationCommitCoordinator<P extends Path<P>, D> {
-
-    /**
-     * Register configuration commit handler for particular subtree.
-     *
-     * Configuration commit handler is invoked for all write transactions
-     * which modifies <code>subtree</code>
-     *
-     * @param subtree Subtree which configuration commit handler is interested it
-     * @param commitHandler Instance of user-provided commit handler
-     * @return Registration object representing this registration. Invoking {@link ObjectRegistration#close()}
-     *   will unregister configuration commit handler.
-     */
-    <C extends AsyncConfigurationCommitCohort<P, D>> ObjectRegistration<C> registerConfigurationCommitHandler(
-            P subtree, C commitHandler);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitHandler.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncConfigurationCommitHandler.java
deleted file mode 100644 (file)
index 4ad4680..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * User-supplied participant in three-phase commit of transaction for configuration data tree.
- *
- * <p>
- * Client-supplied implementation of commit handler for subtree, which
- * is responsible for processing CAN-COMMIT phase of three-phase commit protocol
- * and return CommitCohort, which provides access to additional transitions
- * such as PRE-COMMIT, COMMIT and ABORT.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncConfigurationCommitHandler<P extends Path<P>, D> {
-
-    /**
-     * Requests a can commit phase
-     *
-     * <p>
-     * Implementations SHOULD NOT do any blocking operation during
-     * processing this callback.
-     *
-     * <b>Implementation Notes</b>
-     * <ul>
-     * <li>Implementation are REQUIRED to use <code>request</code> object for any data related access</li>
-     * <li>Implementations SHOULD NOT use any other state stored outside configuration subtree for validation</li>
-     * <li>Validation should happen asynchronously, outside callback call by updating returned {@link CheckedFuture}
-     *     object.</li>
-     * <li>If validation (CAN_COMMIT) phase:
-     * <ul>
-     * <li><b>is successful</b> - invocation of {@link CheckedFuture#checkedGet()} on returned future MUST
-     *     return {@link AsyncConfigurationCommitCohort} associated with request.</li>
-     * <li><b>is unsuccessful</b> - invocation of {@link CheckedFuture#checkedGet()} must throw instance
-     *     of {@link DataValidationFailedException}
-     * with human readable explanaition of error condition.
-     * </li>
-     * </ul>
-     * </li>
-     * </ul>
-     * @param request
-     *            Commit Request submitted by client, which contains
-     *            information about modifications and read-only view as
-     *            if transaction happened.
-     * @return CheckedFuture which contains client-supplied implementation of {@link AsyncConfigurationCommitCohort}
-     *         associated with submitted request, if can commit phase is
-     *         successful, if can commit was unsuccessful, future must fail with
-     *         {@link TransactionCommitFailedException} exception.
-     */
-    CheckedFuture<AsyncConfigurationCommitCohort<P, D>, DataValidationFailedException> canCommit(
-            ConfigurationCommitRequest<P, D> request);
-
-    /**
-     * Commit Request as was submitted by client code
-     *
-     * <p>
-     * Commit Request contains list view of created / updated / removed
-     * path and read-only view of proposed client transaction,
-     * which may be used to retrieve modified or referenced data.
-     *
-     *
-     * @param <P>
-     *            Type of path (subtree identifier), which represents location
-     *            in tree
-     * @param <D>
-     *            Type of data (payload), which represents data payload
-     */
-    interface ConfigurationCommitRequest<P extends Path<P>, D> {
-
-        /**
-         * Read-only transaction which provides access only to configuration
-         * data tree as if submitted transaction successfully happened and
-         * no other concurrent modifications happened between allocation
-         * of client transactions and write of client transactions.
-         *
-         * <p>
-         * Implementations of Commit Handlers are REQUIRED to use this
-         * read-only view to access any data from configuration data tree,
-         * in order to capture them as preconditions for this transaction.
-         *
-         * @return Read-only transaction which provides access only to configuration
-         *     data tree as if submitted transaction successfully happened
-         */
-        AsyncReadTransaction<P, D> getReadOnlyView();
-
-        /**
-         * Returns iteration of paths, to data which was introduced by this transaction.
-         *
-         * @return Iteration of paths, which was introduced by this transaction.
-         */
-        Iterable<P> getCreatedPaths();
-
-        /**
-         * Returns iteration of paths, to data which was updated by this transaction.
-         *
-         * @return Iteration of paths, which was updated by this transaction.
-         */
-        Iterable<P> getUpdatedPaths();
-
-        /**
-         * Returns iteration of paths, to data which was removed by this transaction.
-         *
-         * @return Iteration of paths, which was removed by this transaction.
-         */
-        Iterable<P> getRemovedPaths();
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataBroker.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataBroker.java
deleted file mode 100644 (file)
index 26935f2..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Base interface that provides access to a conceptual data tree store and also provides the ability to
- * subscribe for changes to data under a given branch of the tree.
- *
- * <p>
- * All operations on the data tree are performed via one of the transactions:
- * <ul>
- * <li>Read-Only - allocated using {@link #newReadOnlyTransaction()}
- * <li>Write-Only - allocated using {@link #newWriteOnlyTransaction()}
- * <li>Read-Write - allocated using {@link #newReadWriteTransaction()}
- * </ul>
- *
- * <p>
- * These transactions provide a stable isolated view of data tree, which is
- * guaranteed to be not affected by other concurrent transactions, until
- * transaction is committed.
- *
- * <p>
- * For a detailed explanation of how transaction are isolated and how transaction-local
- * changes are committed to global data tree, see
- * {@link AsyncReadTransaction}, {@link AsyncWriteTransaction},
- * {@link AsyncReadWriteTransaction} and {@link AsyncWriteTransaction#submit()}.
- *
- *
- * <p>
- * It is strongly recommended to use the type of transaction, which
- * provides only the minimal capabilities you need. This allows for
- * optimizations at the data broker / data store level. For example,
- * implementations may optimize the transaction for reading if they know ahead
- * of time that you only need to read data - such as not keeping additional meta-data,
- * which may be required for write transactions.
- *
- * <p>
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL, but only to be consumed by them.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncDataBroker<P extends Path<P>, D> extends AsyncDataTransactionFactory<P, D> {
-
-    @Override
-    AsyncReadOnlyTransaction<P, D> newReadOnlyTransaction();
-
-    @Override
-    AsyncReadWriteTransaction<P, D> newReadWriteTransaction();
-
-    @Override
-    AsyncWriteTransaction<P, D> newWriteOnlyTransaction();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataTransactionFactory.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncDataTransactionFactory.java
deleted file mode 100644 (file)
index 46f6ccc..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * A factory which allocates new transactions to operate on the data
- * tree.
- *
- * <p>
- * <b>Note:</b> This interface is not intended to be used directly, but rather
- * via subinterfaces which introduces additional semantics to allocated
- * transactions.
- * <ul>
- * <li> {@link AsyncDataBroker}
- * <li> {@link TransactionChain}
- * </ul>
- *
- * <p>
- * All operations on the data tree are performed via one of the transactions:
- * <ul>
- * <li>Read-Only - allocated using {@link #newReadOnlyTransaction()}
- * <li>Write-Only - allocated using {@link #newWriteOnlyTransaction()}
- * <li>Read-Write - allocated using {@link #newReadWriteTransaction()}
- * </ul>
- *
- * <p>
- * These transactions provides a stable isolated view of the data tree, which is
- * guaranteed to be not affected by other concurrent transactions, until
- * transaction is committed.
- *
- * <p>
- * For a detailed explanation of how transaction are isolated and how transaction-local
- * changes are committed to global data tree, see
- * {@link AsyncReadTransaction}, {@link AsyncWriteTransaction},
- * {@link AsyncReadWriteTransaction} and {@link AsyncWriteTransaction#submit()}.
- *
- * <p>
- * It is strongly recommended to use the type of transaction, which
- * provides only the minimal capabilities you need. This allows for
- * optimizations at the data broker / data store level. For example,
- * implementations may optimize the transaction for reading if they know ahead
- * of time that you only need to read data - such as not keeping additional meta-data,
- * which may be required for write transactions.
- *
- * <p>
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL, but only to be consumed by them.
- *
- * @see AsyncDataBroker
- * @see TransactionChain
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncDataTransactionFactory<P extends Path<P>, D> {
-
-    /**
-     * Allocates a new read-only transaction which provides an immutable snapshot of the data tree.
-     *
-     * <p>
-     * The view of data tree is an immutable snapshot of current data tree state when
-     * transaction was allocated.
-     *
-     * @return new read-only transaction
-     */
-    AsyncReadOnlyTransaction<P, D> newReadOnlyTransaction();
-
-    /**
-     * Allocates new read-write transaction which provides a mutable view of the data
-     * tree.
-     *
-     * <p>
-     * Preconditions for mutation of data tree are captured from the snapshot of
-     * data tree state, when the transaction is allocated. If data was
-     * changed during transaction in an incompatible way then the commit of this transaction
-     * will fail. See {@link AsyncWriteTransaction#submit()} for more
-     * details about conflicting and not-conflicting changes and
-     * failure scenarios.
-     *
-     * @return new read-write transaction
-     */
-    AsyncReadWriteTransaction<P, D> newReadWriteTransaction();
-
-    /**
-     * Allocates new write-only transaction based on latest state of data
-     * tree.
-     *
-     * <p>
-     * Preconditions for mutation of data tree are captured from the snapshot of
-     * data tree state, when the transaction is allocated. If data was
-     * changed during transaction in an incompatible way then the commit of this transaction
-     * will fail. See {@link AsyncWriteTransaction#submit()} for more
-     * details about conflicting and not-conflicting changes and
-     * failure scenarios.
-     *
-     * <p>
-     * Since this transaction does not provide a view of the data it SHOULD BE
-     * used only by callers which are exclusive writers (exporters of data)
-     * to the subtree they modify. This prevents optimistic
-     * lock failures as described in {@link AsyncWriteTransaction#submit()}.
-     *
-     * <p>
-     * Exclusivity of writers to particular subtree SHOULD BE enforced by
-     * external locking mechanism.
-     *
-     * @return new write-only transaction
-     */
-    AsyncWriteTransaction<P, D> newWriteOnlyTransaction();
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadOnlyTransaction.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadOnlyTransaction.java
deleted file mode 100644 (file)
index ab4b44f..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Marker interface for a read-only view of the data tree.
- *
- * @see AsyncReadTransaction
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncReadOnlyTransaction<P extends Path<P>, D> extends AsyncReadTransaction<P, D>, AutoCloseable {
-
-    /**
-     * Closes this transaction and releases all resources associated with it.
-     *
-     */
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadTransaction.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadTransaction.java
deleted file mode 100644 (file)
index ae34213..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Marker interface for stateful read view of the data tree.
- *
- * <p>
- * View of the data tree is a stable point-in-time snapshot of the current data tree state when
- * the transaction was created. It's state and underlying data tree
- * is not affected by other concurrently running transactions.
- *
- * <p>
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL, but only to be consumed by them.
- *
- * <h2>Transaction isolation example</h2>
- * Lets assume initial state of data tree for <code>PATH</code> is <code>A</code>.
- *
- * <pre>
- * txRead = broker.newReadOnlyTransaction();   // read Transaction is snapshot of data
- * txWrite = broker.newReadWriteTransactoin(); // concurrent write transaction
- *
- * txRead.read(OPERATIONAL,PATH).get();        // will return Optional containing A
- * txWrite = broker.put(OPERATIONAL,PATH,B);   // writes B to PATH
- *
- * txRead.read(OPERATIONAL,PATH).get();        // still returns Optional containing A
- *
- * txWrite.commit().get();                     // data tree is updated, PATH contains B
- * txRead.read(OPERATIONAL,PATH).get();        // still returns Optional containing A
- *
- * txAfterCommit = broker.newReadOnlyTransaction(); // read Transaction is snapshot of new state
- * txAfterCommit.read(OPERATIONAL,PATH).get(); // returns Optional containing B;
- * </pre>
- *
- * <p>
- * <b>Note:</b> example contains blocking calls on future only to illustrate
- * that action happened after other asynchronous action. Use of blocking call
- * {@link com.google.common.util.concurrent.ListenableFuture#get()} is discouraged for most
- * uses and you should use
- * {@link com.google.common.util.concurrent.Futures#addCallback(com.google.common.util.concurrent.ListenableFuture,
- *     com.google.common.util.concurrent.FutureCallback)}
- * or other functions from {@link com.google.common.util.concurrent.Futures} to
- * register more specific listeners.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncReadTransaction<P extends Path<P>, D> extends AsyncTransaction<P, D> {
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadWriteTransaction.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncReadWriteTransaction.java
deleted file mode 100644 (file)
index 4905e6b..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Transaction enabling a client to have a combined read/write capabilities.
- *
- * <p>
- * The initial state of the write transaction is stable snapshot of current data tree
- * state captured when transaction was created and it's state and underlying
- * data tree are not affected by other concurrently running transactions.
- *
- * <p>
- * Write transactions are isolated from other concurrent write transactions. All
- * writes are local to the transaction and represents only a proposal of state
- * change for data tree and it is not visible to any other concurrently running
- * transactions.
- *
- * <p>
- * Applications publish the changes proposed in the transaction by calling {@link #submit}
- * on the transaction. This seals the transaction
- * (preventing any further writes using this transaction) and submits it to be
- * processed and applied to global conceptual data tree.
- *
- * <p>
- * The transaction commit may fail due to a concurrent transaction modifying and committing data in
- * an incompatible way. See {@link #submit()} for more concrete commit failure examples.
- *
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL, but only to be consumed by them.
- *
- * <h2>Examples</h2>
- *
- * <h3>Transaction local state</h3>
- *
- * <p>
- * Let assume initial state of data tree for <code>PATH</code> is <code>A</code>
- * .
- *
- * <pre>
- * txWrite = broker.newReadWriteTransaction(); // concurrent write transaction
- *
- * txWrite.read(OPERATIONAL,PATH).get()        // will return Optional containing A
- * txWrite.put(OPERATIONAL,PATH,B);            // writes B to PATH
- * txWrite.read(OPERATIONAL,PATH).get()        // will return Optional Containing B
- *
- * txWrite.commit().get();                     // data tree is updated, PATH contains B
- *
- * tx1afterCommit = broker.newReadOnlyTransaction(); // read Transaction is snapshot of new state
- * tx1afterCommit.read(OPERATIONAL,PATH).get(); // returns Optional containing B
- * </pre>
- *
- * <p>
- * As you could see read-write transaction provides capabilities as
- * {@link AsyncWriteTransaction} but also allows for reading proposed changes as
- * if they already happened.
- *
- * <h3>Transaction isolation (read transaction, read-write transaction)</h3> Let
- * assume initial state of data tree for <code>PATH</code> is <code>A</code>.
- *
- * <pre>
- * txRead = broker.newReadOnlyTransaction();   // read Transaction is snapshot of data
- * txWrite = broker.newReadWriteTransaction(); // concurrent write transaction
- *
- * txRead.read(OPERATIONAL,PATH).get();        // will return Optional containing A
- * txWrite.read(OPERATIONAL,PATH).get()        // will return Optional containing A
- *
- * txWrite.put(OPERATIONAL,PATH,B);            // writes B to PATH
- * txWrite.read(OPERATIONAL,PATH).get()        // will return Optional Containing B
- *
- * txRead.read(OPERATIONAL,PATH).get();        // concurrent read transaction still returns
- *                                             // Optional containing A
- *
- * txWrite.commit().get();                     // data tree is updated, PATH contains B
- * txRead.read(OPERATIONAL,PATH).get();        // still returns Optional containing A
- *
- * tx1afterCommit = broker.newReadOnlyTransaction(); // read Transaction is snapshot of new state
- * tx1afterCommit.read(OPERATIONAL,PATH).get(); // returns Optional containing B
- * </pre>
- *
- * <h3>Transaction isolation (2 concurrent read-write transactions)</h3> Let
- * assume initial state of data tree for <code>PATH</code> is <code>A</code>.
- *
- * <pre>
- * tx1 = broker.newReadWriteTransaction(); // read Transaction is snapshot of data
- * tx2 = broker.newReadWriteTransaction(); // concurrent write transaction
- *
- * tx1.read(OPERATIONAL,PATH).get();       // will return Optional containing A
- * tx2.read(OPERATIONAL,PATH).get()        // will return Optional containing A
- *
- * tx2.put(OPERATIONAL,PATH,B);            // writes B to PATH
- * tx2.read(OPERATIONAL,PATH).get()        // will return Optional Containing B
- *
- * tx1.read(OPERATIONAL,PATH).get();       // tx1 read-write transaction still sees Optional
- *                                         // containing A since is isolated from tx2
- * tx1.put(OPERATIONAL,PATH,C);            // writes C to PATH
- * tx1.read(OPERATIONAL,PATH).get()        // will return Optional Containing C
- *
- * tx2.read(OPERATIONAL,PATH).get()        // tx2 read-write transaction still sees Optional
- *                                         // containing B since is isolated from tx1
- *
- * tx2.commit().get();                     // data tree is updated, PATH contains B
- * tx1.read(OPERATIONAL,PATH).get();       // still returns Optional containing C since is isolated from tx2
- *
- * tx1afterCommit = broker.newReadOnlyTransaction(); // read Transaction is snapshot of new state
- * tx1afterCommit.read(OPERATIONAL,PATH).get(); // returns Optional containing B
- *
- * tx1.commit()                            // Will fail with OptimisticLockFailedException
- *                                         // which means concurrent transaction changed the same PATH
- *
- * </pre>
- *
- * <p>
- * <b>Note:</b> examples contains blocking calls on future only to illustrate
- * that action happened after other asynchronous action. Use of blocking call
- * {@link com.google.common.util.concurrent.ListenableFuture#get()} is discouraged for most uses and you should
- * use {@link com.google.common.util.concurrent.Futures#addCallback(com.google.common.util.concurrent.ListenableFuture,
- *      com.google.common.util.concurrent.FutureCallback)}
- * or other functions from {@link com.google.common.util.concurrent.Futures} to
- * register more specific listeners.
- *
- * @see AsyncReadTransaction
- * @see AsyncWriteTransaction
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncReadWriteTransaction<P extends Path<P>, D> extends AsyncReadTransaction<P, D>,
-        AsyncWriteTransaction<P, D> {
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncTransaction.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncTransaction.java
deleted file mode 100644 (file)
index 2fe3760..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.concepts.Path;
-
-
-/**
- * A common parent for all transactions which operate on a conceptual data tree.
- *
- * <p>
- * See derived transaction types for more concrete behavior:
- * <ul>
- * <li>{@link AsyncReadTransaction} - Read capabilities, user is able to read data from data tree</li>
- * <li>{@link AsyncWriteTransaction} - Write capabilities, user is able to propose changes to data tree</li>
- * <li>{@link AsyncReadWriteTransaction} - Read and Write capabilities, user is able to read state and to propose
- *     changes of state.</li>
- * </ul>
- *
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL.
- *
- * @param <P> Type of path (subtree identifier), which represents location in tree
- * @param <D> Type of data (payload), which represents data payload
- */
-public interface AsyncTransaction<P extends Path<P>,D> extends //
-    Identifiable<Object> {
-
-    @Override
-    Object getIdentifier();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncWriteTransaction.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/AsyncWriteTransaction.java
deleted file mode 100644 (file)
index 70789cc..0000000
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Write transaction provides mutation capabilities for a data tree.
- *
- * <p>
- * Initial state of write transaction is a stable snapshot of the current data tree.
- * The state is captured when the transaction is created and its state and underlying
- * data tree are not affected by other concurrently running transactions.
- *
- * <p>
- * Write transactions are isolated from other concurrent write transactions. All
- * writes are local to the transaction and represent only a proposal of state
- * change for the data tree and it is not visible to any other concurrently running
- * transaction.
- *
- * <p>
- * Applications make changes to the local data tree in the transaction by via the
- * <b>put</b>, <b>merge</b>, and <b>delete</b> operations.
- *
- * <h2>Put operation</h2>
- * Stores a piece of data at a specified path. This acts as an add / replace
- * operation, which is to say that whole subtree will be replaced by the
- * specified data.
- *
- * <p>
- * Performing the following put operations:
- *
- * <pre>
- * 1) container { list [ a ] }
- * 2) container { list [ b ] }
- * </pre>
- *
- * <p>
- * will result in the following data being present:
- *
- * <pre>
- * container { list [ b ] }
- * </pre>
- * <h2>Merge operation</h2>
- * Merges a piece of data with the existing data at a specified path. Any pre-existing data
- * which is not explicitly overwritten will be preserved. This means that if you store a container,
- * its child lists will be merged.
- *
- * <p>
- * Performing the following merge operations:
- *
- * <pre>
- * 1) container { list [ a ] }
- * 2) container { list [ b ] }
- * </pre>
- *
- * <p>
- * will result in the following data being present:
- *
- * <pre>
- * container { list [ a, b ] }
- * </pre>
- *
- * <p>
- * This also means that storing the container will preserve any
- * augmentations which have been attached to it.
- *
- * <h2>Delete operation</h2>
- * Removes a piece of data from a specified path.
- *
- * <p>
- * After applying changes to the local data tree, applications publish the changes proposed in the
- * transaction by calling {@link #submit} on the transaction. This seals the transaction
- * (preventing any further writes using this transaction) and submits it to be
- * processed and applied to global conceptual data tree.
- *
- * <p>
- * The transaction commit may fail due to a concurrent transaction modifying and committing data in
- * an incompatible way. See {@link #submit} for more concrete commit failure examples.
- *
- * <p>
- * <b>Implementation Note:</b> This interface is not intended to be implemented
- * by users of MD-SAL, but only to be consumed by them.
- *
- * @param <P>
- *            Type of path (subtree identifier), which represents location in
- *            tree
- * @param <D>
- *            Type of data (payload), which represents data payload
- */
-public interface AsyncWriteTransaction<P extends Path<P>, D> extends AsyncTransaction<P, D> {
-    /**
-     * Cancels the transaction.
-     *
-     * <p>
-     * Transactions can only be cancelled if it's state is new or submitted.
-     *
-     * <p>
-     * Invoking cancel() on a failed or cancelled transaction will have no effect, and transaction
-     * is considered cancelled.
-     *
-     * <p>
-     * Invoking cancel() on a finished transaction (future returned by {@link #submit()} already completed will always
-     * fail (return false).
-     *
-     * @return <tt>false</tt> if the task could not be cancelled, typically because it has already completed normally
-     * <tt>true</tt> otherwise
-     *
-     */
-    boolean cancel();
-
-    /**
-     * Removes a piece of data from specified path. This operation does not fail
-     * if the specified path does not exist.
-     *
-     * @param store
-     *            Logical data store which should be modified
-     * @param path
-     *            Data object path
-     * @throws IllegalStateException
-     *             if the transaction as already been submitted or cancelled
-     */
-    void delete(LogicalDatastoreType store, P path);
-
-    /**
-     * Submits this transaction to be asynchronously applied to update the logical data tree.
-     * The returned CheckedFuture conveys the result of applying the data changes.
-     *
-     * <p>
-     * <b>Note:</b> It is strongly recommended to process the CheckedFuture result in an asynchronous
-     * manner rather than using the blocking get() method. See example usage below.
-     *
-     * <p>
-     * This call logically seals the transaction, which prevents the client from
-     * further changing data tree using this transaction. Any subsequent calls to
-     * {@link #delete(LogicalDatastoreType, Path)} will fail with
-     * {@link IllegalStateException}.
-     *
-     * <p>
-     * The transaction is marked as submitted and enqueued into the data store back-end for processing.
-     *
-     * <p>
-     * Whether or not the commit is successful is determined by versioning
-     * of the data tree and validation of registered commit participants
-     * ({@link AsyncConfigurationCommitHandler}) if the transaction changes the data tree.
-     *
-     * <p>
-     * The effects of a successful commit of data depends on data tree change listeners and commit participants
-     * ({@link AsyncConfigurationCommitHandler}) that are registered with the data broker.
-     *
-     * <h3>Example usage:</h3>
-     * <pre>
-     *  private void doWrite( final int tries ) {
-     *      WriteTransaction writeTx = dataBroker.newWriteOnlyTransaction();
-     *
-     *      MyDataObject data = ...;
-     *      InstanceIdentifier&lt;MyDataObject&gt; path = ...;
-     *      writeTx.put( LogicalDatastoreType.OPERATIONAL, path, data );
-     *
-     *      Futures.addCallback( writeTx.submit(), new FutureCallback&lt;Void&gt;() {
-     *          public void onSuccess( Void result ) {
-     *              // succeeded
-     *          }
-     *
-     *          public void onFailure( Throwable t ) {
-     *              if( t instanceof OptimisticLockFailedException ) {
-     *                  if( ( tries - 1 ) &gt; 0 ) {
-     *                      // do retry
-     *                      doWrite( tries - 1 );
-     *                  } else {
-     *                      // out of retries
-     *                  }
-     *              } else {
-     *                  // failed due to another type of TransactionCommitFailedException.
-     *              }
-     *          } );
-     * }
-     * ...
-     * doWrite( 2 );
-     * </pre>
-     * <h2>Failure scenarios</h2>
-     *
-     * <p>
-     * Transaction may fail because of multiple reasons, such as
-     * <ul>
-     * <li>Another transaction finished earlier and modified the same node in a
-     * non-compatible way (see below). In this case the returned future will fail with an
-     * {@link OptimisticLockFailedException}. It is the responsibility of the
-     * caller to create a new transaction and submit the same modification again in
-     * order to update data tree. <i><b>Warning</b>: In most cases, retrying after an
-     * OptimisticLockFailedException will result in a high probability of success.
-     * However, there are scenarios, albeit unusual, where any number of retries will
-     * not succeed. Therefore it is strongly recommended to limit the number of retries (2 or 3)
-     * to avoid an endless loop.</i>
-     * </li>
-     * <li>Data change introduced by this transaction did not pass validation by
-     * commit handlers or data was incorrectly structured. Returned future will
-     * fail with a {@link DataValidationFailedException}. User should not retry to
-     * create new transaction with same data, since it probably will fail again.
-     * </li>
-     * </ul>
-     *
-     * <h3>Change compatibility</h3>
-     *
-     * <p>
-     * There are several sets of changes which could be considered incompatible
-     * between two transactions which are derived from same initial state.
-     * Rules for conflict detection applies recursively for each subtree
-     * level.
-     *
-     * <h4>Change compatibility of leafs, leaf-list items</h4>
-     *
-     * <p>
-     * Following table shows  state changes and failures between two concurrent transactions,
-     * which are based on same initial state, Tx 1 completes successfully
-     * before Tx 2 is submitted.
-     *
-     * <table summary="">
-     * <tr><th>Initial state</th><th>Tx 1</th><th>Tx 2</th><th>Result</th></tr>
-     * <tr><td>Empty</td><td>put(A,1)</td><td>put(A,2)</td><td>Tx 2 will fail, state is A=1</td></tr>
-     * <tr><td>Empty</td><td>put(A,1)</td><td>merge(A,2)</td><td>A=2</td></tr>
-     *
-     * <tr><td>Empty</td><td>merge(A,1)</td><td>put(A,2)</td><td>Tx 2 will fail, state is A=1</td></tr>
-     * <tr><td>Empty</td><td>merge(A,1)</td><td>merge(A,2)</td><td>A=2</td></tr>
-     *
-     *
-     * <tr><td>A=0</td><td>put(A,1)</td><td>put(A,2)</td><td>Tx 2 will fail, A=1</td></tr>
-     * <tr><td>A=0</td><td>put(A,1)</td><td>merge(A,2)</td><td>A=2</td></tr>
-     * <tr><td>A=0</td><td>merge(A,1)</td><td>put(A,2)</td><td>Tx 2 will fail, A=1</td></tr>
-     * <tr><td>A=0</td><td>merge(A,1)</td><td>merge(A,2)</td><td>A=2</td></tr>
-     *
-     * <tr><td>A=0</td><td>delete(A)</td><td>put(A,2)</td><td>Tx 2 will fail, A does not exists</td></tr>
-     * <tr><td>A=0</td><td>delete(A)</td><td>merge(A,2)</td><td>A=2</td></tr>
-     * </table>
-     *
-     * <h4>Change compatibility of subtrees</h4>
-     *
-     * <p>
-     * Following table shows  state changes and failures between two concurrent transactions,
-     * which are based on same initial state, Tx 1 completes successfully
-     * before Tx 2 is submitted.
-     *
-     * <table summary="">
-     * <tr><th>Initial state</th><th>Tx 1</th><th>Tx 2</th><th>Result</th></tr>
-     *
-     * <tr><td>Empty</td><td>put(TOP,[])</td><td>put(TOP,[])</td><td>Tx 2 will fail, state is TOP=[]</td></tr>
-     * <tr><td>Empty</td><td>put(TOP,[])</td><td>merge(TOP,[])</td><td>TOP=[]</td></tr>
-     *
-     * <tr><td>Empty</td><td>put(TOP,[FOO=1])</td><td>put(TOP,[BAR=1])</td><td>Tx 2 will fail, state is TOP=[FOO=1]
-     * </td></tr>
-     * <tr><td>Empty</td><td>put(TOP,[FOO=1])</td><td>merge(TOP,[BAR=1])</td><td>TOP=[FOO=1,BAR=1]</td></tr>
-     *
-     * <tr><td>Empty</td><td>merge(TOP,[FOO=1])</td><td>put(TOP,[BAR=1])</td><td>Tx 2 will fail, state is TOP=[FOO=1]
-     * </td></tr>
-     * <tr><td>Empty</td><td>merge(TOP,[FOO=1])</td><td>merge(TOP,[BAR=1])</td><td>TOP=[FOO=1,BAR=1]</td></tr>
-     *
-     * <tr><td>TOP=[]</td><td>put(TOP,[FOO=1])</td><td>put(TOP,[BAR=1])</td><td>Tx 2 will fail, state is TOP=[FOO=1]
-     * </td></tr>
-     * <tr><td>TOP=[]</td><td>put(TOP,[FOO=1])</td><td>merge(TOP,[BAR=1])</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>merge(TOP,[FOO=1])</td><td>put(TOP,[BAR=1])</td><td>Tx 2 will fail, state is TOP=[FOO=1]
-     * </td></tr>
-     * <tr><td>TOP=[]</td><td>merge(TOP,[FOO=1])</td><td>merge(TOP,[BAR=1])</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>delete(TOP)</td><td>put(TOP,[BAR=1])</td><td>Tx 2 will fail, state is empty store
-     * </td></tr>
-     * <tr><td>TOP=[]</td><td>delete(TOP)</td><td>merge(TOP,[BAR=1])</td><td>state is TOP=[BAR=1]</td></tr>
-     *
-     * <tr><td>TOP=[]</td><td>put(TOP/FOO,1)</td><td>put(TOP/BAR,1])</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>put(TOP/FOO,1)</td><td>merge(TOP/BAR,1)</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>merge(TOP/FOO,1)</td><td>put(TOP/BAR,1)</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>merge(TOP/FOO,1)</td><td>merge(TOP/BAR,1)</td><td>state is TOP=[FOO=1,BAR=1]</td></tr>
-     * <tr><td>TOP=[]</td><td>delete(TOP)</td><td>put(TOP/BAR,1)</td><td>Tx 2 will fail, state is empty store</td></tr>
-     * <tr><td>TOP=[]</td><td>delete(TOP)</td><td>merge(TOP/BAR,1]</td><td>Tx 2 will fail, state is empty store
-     * </td></tr>
-     *
-     * <tr><td>TOP=[FOO=1]</td><td>put(TOP/FOO,2)</td><td>put(TOP/BAR,1)</td><td>state is TOP=[FOO=2,BAR=1]</td></tr>
-     * <tr><td>TOP=[FOO=1]</td><td>put(TOP/FOO,2)</td><td>merge(TOP/BAR,1)</td><td>state is TOP=[FOO=2,BAR=1]</td></tr>
-     * <tr><td>TOP=[FOO=1]</td><td>merge(TOP/FOO,2)</td><td>put(TOP/BAR,1)</td><td>state is TOP=[FOO=2,BAR=1]</td></tr>
-     * <tr><td>TOP=[FOO=1]</td><td>merge(TOP/FOO,2)</td><td>merge(TOP/BAR,1)</td><td>state is TOP=[FOO=2,BAR=1]
-     * </td></tr>
-     * <tr><td>TOP=[FOO=1]</td><td>delete(TOP/FOO)</td><td>put(TOP/BAR,1)</td><td>state is TOP=[BAR=1]</td></tr>
-     * <tr><td>TOP=[FOO=1]</td><td>delete(TOP/FOO)</td><td>merge(TOP/BAR,1]</td><td>state is TOP=[BAR=1]</td></tr>
-     * </table>
-     *
-     *
-     * <h3>Examples of failure scenarios</h3>
-     *
-     * <h4>Conflict of two transactions</h4>
-     *
-     * <p>
-     * This example illustrates two concurrent transactions, which derived from
-     * same initial state of data tree and proposes conflicting modifications.
-     *
-     * <pre>
-     * txA = broker.newWriteTransaction(); // allocates new transaction, data tree is empty
-     * txB = broker.newWriteTransaction(); // allocates new transaction, data tree is empty
-     *
-     * txA.put(CONFIGURATION, PATH, A);    // writes to PATH value A
-     * txB.put(CONFIGURATION, PATH, B)     // writes to PATH value B
-     *
-     * ListenableFuture futureA = txA.submit(); // transaction A is sealed and submitted
-     * ListenebleFuture futureB = txB.submit(); // transaction B is sealed and submitted
-     * </pre>
-     *
-     * <p>
-     * Commit of transaction A will be processed asynchronously and data tree
-     * will be updated to contain value <code>A</code> for <code>PATH</code>.
-     * Returned {@link ListenableFuture} will successfully complete once
-     * state is applied to data tree.
-     *
-     * <p>
-     * Commit of Transaction B will fail, because previous transaction also
-     * modified path in a concurrent way. The state introduced by transaction B
-     * will not be applied. Returned {@link ListenableFuture} object will fail
-     * with {@link OptimisticLockFailedException} exception, which indicates to
-     * client that concurrent transaction prevented the submitted transaction from being
-     * applied.
-     * <br>
-     * @return a CheckFuture containing the result of the commit. The Future blocks until the
-     *         commit operation is complete. A successful commit returns nothing. On failure,
-     *         the Future will fail with a {@link TransactionCommitFailedException} or an exception
-     *         derived from TransactionCommitFailedException.
-     *
-     * @throws IllegalStateException
-     *             if the transaction is not new
-     * @deprecated Use {@link #commit()} instead.
-     */
-    @Deprecated
-    default CheckedFuture<Void, TransactionCommitFailedException> submit() {
-        return MappingCheckedFuture.create(commit().transform(ignored -> null, MoreExecutors.directExecutor()),
-                SUBMIT_EXCEPTION_MAPPER);
-    }
-
-    /**
-     * Submits this transaction to be asynchronously applied to update the logical data tree. The returned
-     * {@link FluentFuture} conveys the result of applying the data changes.
-     *
-     * <p>
-     * This call logically seals the transaction, which prevents the client from further changing the data tree using
-     * this transaction. Any subsequent calls to <code>put(LogicalDatastoreType, Path, Object)</code>,
-     * <code>merge(LogicalDatastoreType, Path, Object)</code>, <code>delete(LogicalDatastoreType, Path)</code> will fail
-     * with {@link IllegalStateException}. The transaction is marked as submitted and enqueued into the data store
-     * back-end for processing.
-     *
-     * <p>
-     * Whether or not the commit is successful is determined by versioning of the data tree and validation of registered
-     * commit participants if the transaction changes the data tree.
-     *
-     * <p>
-     * The effects of a successful commit of data depends on listeners and commit participants that are registered with
-     * the data broker.
-     *
-     * <p>
-     * A successful commit produces implementation-specific {@link CommitInfo} structure, which is used to communicate
-     * post-condition information to the caller. Such information can contain commit-id, timing information or any
-     * other information the implementation wishes to share.
-     *
-     * @return a FluentFuture containing the result of the commit information. The Future blocks until the commit
-     *         operation is complete. A successful commit returns nothing. On failure, the Future will fail with a
-     *         {@link TransactionCommitFailedException} or an exception derived from TransactionCommitFailedException.
-     * @throws IllegalStateException if the transaction is already committed or was canceled.
-     */
-    @NonNull FluentFuture<? extends @NonNull CommitInfo> commit();
-
-    /**
-     * This only exists for reuse by the deprecated {@link #submit} method and is not intended for general use.
-     */
-    @Deprecated
-    ExceptionMapper<TransactionCommitFailedException> SUBMIT_EXCEPTION_MAPPER =
-        new ExceptionMapper<TransactionCommitFailedException>("submit", TransactionCommitFailedException.class) {
-            @Override
-            protected TransactionCommitFailedException newWithCause(final String message, final Throwable cause) {
-                return new TransactionCommitFailedException(message, cause);
-            }
-        };
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataReader.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataReader.java
deleted file mode 100644 (file)
index 18ae7d3..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Reader for reading YANG subtrees based on their path.
- *
- * <p>
- * Reader is requested to return object at specified path and all it's subnodes
- * known to the reader or null if node is not found in this reader.
- *
- * @param <P> Path Type
- * @param <D> Data Type
- * @deprecated Replaced by org.opendaylight.controller.sal.core.spi.data.DOMStore contract.
- */
-@Deprecated
-public interface DataReader<P extends Path<P>, D> {
-
-    /**
-     * Reads data from Operational data store located at provided path.
-     *
-     * @param path Path to data
-     * @return the data
-     */
-    D readOperationalData(P path);
-
-    D readConfigurationData(P path);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataStoreUnavailableException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataStoreUnavailableException.java
deleted file mode 100644 (file)
index ebbd116..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-/**
- * This exception occurs if the datastore is temporarily unavailable.
- * A retry of the transaction may succeed after a period of time
- */
-
-public class DataStoreUnavailableException extends Exception {
-    private static final long serialVersionUID = 1L;
-
-    public DataStoreUnavailableException(String message, Throwable cause) {
-        super(message, cause);
-    }
-
-
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataValidationFailedException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/DataValidationFailedException.java
deleted file mode 100644 (file)
index a744548..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-
-/**
- * Failure of asynchronous transaction commit caused by invalid data.
- *
- * <p>
- * This exception is raised and returned when transaction commit
- * failed, because other data submitted via transactions.
- *
- * <p>
- *  Clients usually are not able recover from this error condition by
- *  retrieving same transaction, since data introduced by this transaction
- *  are invalid.
- */
-public class DataValidationFailedException extends TransactionCommitFailedException {
-
-    private static final long serialVersionUID = 1L;
-
-    @SuppressFBWarnings("SE_BAD_FIELD")
-    private final Path<?> path;
-
-    private final Class<? extends Path<?>> pathType;
-
-    public <P extends Path<P>> DataValidationFailedException(final Class<P> pathType,final P path,
-                                                             final String message, final Throwable cause) {
-        super(message, cause, RpcResultBuilder.newError(ErrorType.APPLICATION, "invalid-value", message, null,
-                                                        path != null ? path.toString() : null, cause));
-        this.pathType = Preconditions.checkNotNull(pathType, "path type must not be null");
-        this.path = Preconditions.checkNotNull(path,"path must not be null.");
-    }
-
-    public  <P extends Path<P>> DataValidationFailedException(final Class<P> pathType,final P path,
-                                                              final String message) {
-        this(pathType, path, message, null);
-    }
-
-    public final Path<?> getPath() {
-        return path;
-    }
-
-    public final Class<? extends Path<?>> getPathType() {
-        return pathType;
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/LogicalDatastoreType.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/LogicalDatastoreType.java
deleted file mode 100644 (file)
index ed99552..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.eclipse.jdt.annotation.NonNullByDefault;
-
-@NonNullByDefault
-public enum LogicalDatastoreType {
-    /**
-     * Logical atastore representing operational state of the system
-     * and it's components
-     *
-     * <p>
-     * This datastore is used to describe operational state of
-     * the system and it's operation related data.
-     *
-     */
-    OPERATIONAL {
-        @Override
-        public org.opendaylight.mdsal.common.api.LogicalDatastoreType toMdsal() {
-            return org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL;
-        }
-    },
-    /**
-     * Logical Datastore representing configuration state of the system
-     * and it's components.
-     *
-     * <p>
-     * This datastore is used to describe intended state of
-     * the system and intended operation mode.
-     *
-     */
-    CONFIGURATION {
-        @Override
-        public org.opendaylight.mdsal.common.api.LogicalDatastoreType toMdsal() {
-            return org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION;
-        }
-    };
-
-    /**
-     * Convert this logical datastore type to its MD-SAL counterpart.
-     *
-     * @return MD-SAL counterpart of this type.
-     */
-    public abstract org.opendaylight.mdsal.common.api.LogicalDatastoreType toMdsal();
-
-    /**
-     * Convert MD-SAL logical datastore type to this counterpart.
-     *
-     * @param type MD-SAL counterpart of this type.
-     * @return Corresponding value in this type.
-     */
-    public static LogicalDatastoreType fromMdsal(final org.opendaylight.mdsal.common.api.LogicalDatastoreType type) {
-        switch (type) {
-            case CONFIGURATION:
-                return CONFIGURATION;
-            case OPERATIONAL:
-                return OPERATIONAL;
-            default:
-                throw new IllegalArgumentException("Unhandled type " + type);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/OptimisticLockFailedException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/OptimisticLockFailedException.java
deleted file mode 100644 (file)
index 6c7c54f..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-
-/**
-* Failure of asynchronous transaction commit caused by failure
-* of optimistic locking.
-*
-* <p>
-* This exception is raised and returned when transaction commit
-* failed, because other transaction finished successfully
-* and modified same data as failed transaction.
-*
-* <p>
-*  Clients may recover from this error condition by
-*  retrieving current state and submitting new updated
-*  transaction.
-*
-*/
-public class OptimisticLockFailedException extends TransactionCommitFailedException {
-
-    private static final long serialVersionUID = 1L;
-
-    public OptimisticLockFailedException(final String message, final Throwable cause) {
-        super(message, cause, RpcResultBuilder.newError(ErrorType.APPLICATION, "resource-denied",
-                                                        message, null, null, cause));
-    }
-
-    public OptimisticLockFailedException(final String message) {
-        this(message, null);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/ReadFailedException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/ReadFailedException.java
deleted file mode 100644 (file)
index 8d6b999..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-import org.opendaylight.yangtools.yang.common.OperationFailedException;
-import org.opendaylight.yangtools.yang.common.RpcError;
-
-/**
- * An exception for a failed read.
- */
-public class ReadFailedException extends OperationFailedException {
-
-    private static final long serialVersionUID = 1L;
-
-    public static final ExceptionMapper<ReadFailedException> MAPPER =
-        new ExceptionMapper<ReadFailedException>("read", ReadFailedException.class) {
-            @Override
-            protected ReadFailedException newWithCause(String message, Throwable cause) {
-                return new ReadFailedException(message, cause);
-            }
-    };
-
-    public ReadFailedException(String message, RpcError... errors) {
-        super(message, errors);
-    }
-
-    public ReadFailedException(String message, Throwable cause, RpcError... errors) {
-        super(message, cause, errors);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChain.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChain.java
deleted file mode 100644 (file)
index c469104..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * A chain of transactions. Transactions in a chain need to be committed in
- * sequence and each transaction should see the effects of previous committed transactions
- * as they occurred. A chain makes no guarantees of atomicity across the chained transactions -
- * the transactions are committed as soon as possible in the order that they were submitted.
- *
- * <p>
- * This behaviour is different from the default AsyncDataBroker, where a
- * transaction is always created from the current global state, not taking into
- * account any transactions previously committed by the calling thread. Due to
- * the asynchronous nature of transaction submission this can lead to surprising
- * results. If a thread executes the following sequence sufficiently quickly:
- *
- * <p>
- * AsyncWriteTransaction t1 = broker.newWriteOnlyTransaction();
- * t1.put(id, data);
- * t1.submit();
- *
- * <p>
- * AsyncReadTransaction t2 = broker.newReadOnlyTransaction();
- * Optional&lt;?&gt; maybeData = t2.read(id).get();
- *
- * <p>
- * it may happen, that it sees maybeData.isPresent() == false, simply because
- * t1 has not completed the processes of being applied and t2 is actually
- * allocated from the previous state. This is obviously bad for users who create
- * incremental state in the datastore and actually read what they write in
- * subsequent transactions.
- *
- * <p>
- * Using a TransactionChain instead of a broker solves this particular problem,
- * and leads to expected behavior: t2 will always see the data written in t1
- * present.
- */
-public interface TransactionChain<P extends Path<P>, D> extends AutoCloseable,
-        AsyncDataTransactionFactory<P, D> {
-
-    /**
-     * Create a new read only transaction which will continue the chain.
-     *
-     * <p>
-     * The previous write transaction has to be either SUBMITTED
-     * ({@link AsyncWriteTransaction#submit submit} was invoked) or CANCELLED
-     * ({@link #close close} was invoked).
-     *
-     * <p>
-     * The returned read-only transaction presents an isolated view of the data if the previous
-     * write transaction was successful - in other words, this read-only transaction will see the
-     * state changes made by the previous write transaction in the chain. However, state which
-     * was introduced by other transactions outside this transaction chain after creation of
-     * the previous transaction is not visible.
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not SUBMITTED or CANCELLED.
-     * @throws TransactionChainClosedException
-     *             if the chain has been closed.
-     */
-    @Override
-    AsyncReadOnlyTransaction<P, D> newReadOnlyTransaction();
-
-    /**
-     * Create a new read-write transaction which will continue the chain.
-     *
-     * <p>
-     * The previous write transaction has to be either SUBMITTED
-     * ({@link AsyncWriteTransaction#submit submit} was invoked) or CANCELLED
-     * ({@link #close close} was invoked).
-     *
-     * <p>
-     * The returned read-write transaction presents an isolated view of the data if the previous
-     * write transaction was successful - in other words, this read-write transaction will see the
-     * state changes made by the previous write transaction in the chain. However, state which
-     * was introduced by other transactions outside this transaction chain after creation of
-     * the previous transaction is not visible.
-     *
-     * <p>
-     * Committing this read-write transaction using {@link AsyncWriteTransaction#submit submit}
-     * will submit the state changes in this transaction to be visible to any subsequent
-     * transaction in this chain and also to any transaction outside this chain.
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not SUBMITTED or CANCELLED.
-     * @throws TransactionChainClosedException
-     *             if the chain has been closed.
-     */
-    @Override
-    AsyncReadWriteTransaction<P, D> newReadWriteTransaction();
-
-    /**
-     * Create a new write-only transaction which will continue the chain.
-     *
-     * <p>
-     * The previous write transaction has to be either SUBMITTED
-     * ({@link AsyncWriteTransaction#submit submit} was invoked) or CANCELLED
-     * ({@link #close close} was invoked).
-     *
-     * <p>
-     * The returned write-only transaction presents an isolated view of the data if the previous
-     * write transaction was successful - in other words, this write-only transaction will see the
-     * state changes made by the previous write transaction in the chain. However, state which
-     * was introduced by other transactions outside this transaction chain after creation of
-     * the previous transaction is not visible.
-     *
-     * <p>
-     * Committing this write-only transaction using {@link AsyncWriteTransaction#submit submit}
-     * will submit the state changes in this transaction to be visible to any subsequent
-     * transaction in this chain and also to any transaction outside this chain.
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not SUBMITTED or CANCELLED.
-     * @throws TransactionChainClosedException
-     *             if the chain has been closed.
-     */
-    @Override
-    AsyncWriteTransaction<P, D> newWriteOnlyTransaction();
-
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainClosedException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainClosedException.java
deleted file mode 100644 (file)
index 5e1b35d..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-/**
- * Exception thrown when an attempt is made to open a new transaction in a closed
- * chain.
- */
-public final class TransactionChainClosedException extends IllegalStateException {
-    private static final long serialVersionUID = 1L;
-
-    public TransactionChainClosedException(final String message) {
-        super(message);
-    }
-
-    public TransactionChainClosedException(final String message, final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainFactory.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainFactory.java
deleted file mode 100644 (file)
index 470e611..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-/**
- * Interface for creating transaction chains.
- */
-public interface TransactionChainFactory<P extends Path<P>, D> {
-
-    /**
-     * Create a new transaction chain. The chain will be initialized to read
-     * from its backing datastore, with no outstanding transaction. Listener
-     * will be registered to handle chain-level events.
-     *
-     * @param listener Transaction chain event listener
-     * @return A new transaction chain.
-     */
-    TransactionChain<P, D> createTransactionChain(TransactionChainListener listener);
-}
-
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainListener.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionChainListener.java
deleted file mode 100644 (file)
index 52b0812..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import java.util.EventListener;
-
-/**
- * Listener for transaction chain events.
- */
-public interface TransactionChainListener extends EventListener {
-    /**
-     * Invoked if when a transaction in the chain fails. All other transactions are automatically cancelled by the time
-     * this notification is invoked. Implementations should invoke chain.close() to close the chain.
-     *
-     * @param chain Transaction chain which failed
-     * @param transaction Transaction which caused the chain to fail
-     * @param cause The cause of transaction failure
-     */
-    void onTransactionChainFailed(TransactionChain<?, ?> chain, AsyncTransaction<?, ?> transaction, Throwable cause);
-
-    /**
-     * Invoked when a transaction chain is completed. A transaction chain is considered completed when it has been
-     * closed and all its instructions have completed successfully.
-     *
-     * @param chain Transaction chain which completed
-     */
-    void onTransactionChainSuccessful(TransactionChain<?, ?> chain);
-}
-
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java
deleted file mode 100644 (file)
index 8d9d1c2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import com.google.common.base.Supplier;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-
-/**
- * A type of TransactionCommitFailedException that indicates a situation that would result in a
- * threading deadlock. This can occur if a caller that submits a write transaction tries to perform
- * a blocking call via one of the <code>get</code> methods on the returned ListenableFuture. Callers
- * should process the commit result asynchronously (via Futures#addCallback) to ensure deadlock
- * won't occur.
- *
- * @author Thomas Pantelis
- */
-public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-    private static final long serialVersionUID = 1L;
-    private static final String DEADLOCK_MESSAGE =
-            "An attempt to block on a ListenableFuture via a get method from a write "
-            + "transaction submit was detected that would result in deadlock. The commit "
-            + "result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
-    private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION,
-            "lock-denied", DEADLOCK_MESSAGE);
-
-    public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER =
-        () -> new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
-
-    public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
-        super(message, errors);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitFailedException.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitFailedException.java
deleted file mode 100644 (file)
index d92eed5..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.data;
-
-import org.opendaylight.yangtools.yang.common.OperationFailedException;
-import org.opendaylight.yangtools.yang.common.RpcError;
-
-/**
- * Failed commit of asynchronous transaction. This exception is raised and returned when transaction commit failed.
- */
-public class TransactionCommitFailedException extends OperationFailedException {
-
-    private static final long serialVersionUID = 1L;
-
-    public TransactionCommitFailedException(final String message, final RpcError... errors) {
-        this(message, null, errors);
-    }
-
-    public TransactionCommitFailedException(final String message, final Throwable cause,
-                                            final RpcError... errors) {
-        super(message, cause, errors);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationPublishService.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationPublishService.java
deleted file mode 100644 (file)
index 4a3d866..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.notify;
-
-import java.util.concurrent.ExecutorService;
-
-/**
- * Interface for publishing YANG-modeled notifications.
- *
- * <p>
- * Users of this interface can publish any YANG-modeled notification which will
- * be delivered to all subscribed listeners.
- *
- * <p>
- * Preferred way of publishing of notifications is done by invoking {@link #publish(Object)}.
- *
- * <p>You may consider using {@link #publish(Object, ExecutorService)} if and only if
- * your use-case requires customized  execution policy or run-to-completion
- * inside process.
- *
- * <p>
- * The metadata required to deliver a notification to the correct listeners is
- * extracted from the published notification.
- *
- * <p>
- * FIXME: Consider clarification of execution/delivery policy, how it will be
- * affected by Actor model and cluster-wide notifications.
- *
- * @param <N>
- *            the type of notifications
- */
-public interface NotificationPublishService<N> {
-
-    /**
-     * Publishes a notification and notifies subscribed listeners. All listener
-     * notifications are done via a default executor.
-     *
-     * <p>
-     * <b>Note:</b> This call will block when the default executor is saturated
-     * and the notification queue for this executor is full.
-     *
-     * @param notification
-     *            the notification to publish.
-     */
-    void publish(N notification);
-
-    /**
-     * Publishes a notification and notifies subscribed listeners. All listener
-     * notifications are done via the provided executor.
-     *
-     * <p>
-     * <b>Note:</b> Use only if necessary. Consider using
-     * {@link #publish(Object)} for most use-cases.
-     *
-     * <p>
-     * By using this method you could customize execution policy of listeners present
-     * inside process (e.g. using  single-threaded executor or even same-thread executor
-     * delivery.
-     *
-     * <p>
-     * This executor is used only for inside-process notification deliveries.
-     *
-     * @param notification
-     *            the notification to publish.
-     * @param executor
-     *            the executor that will be used to deliver notifications to
-     *            subscribed listeners.
-     */
-    void publish(N notification, ExecutorService executor);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationSubscriptionService.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/notify/NotificationSubscriptionService.java
deleted file mode 100644 (file)
index a83610f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.notify;
-
-import java.util.EventListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-public interface NotificationSubscriptionService<T, N, L extends EventListener> {
-
-    ListenerRegistration<L> registerNotificationListener(T type, L listener);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChange.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChange.java
deleted file mode 100644 (file)
index 1575cc5..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.routing;
-
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Event representing change in RPC routing table.
- *
- *
- * @param <C> Type, which is used to represent Routing context.
- * @param <P> Type of data tree path, which is used to identify route.
- */
-public interface RouteChange<C, P> {
-
-    /**
-     * Returns a map of removed routes in associated routing contexts.
-     *
-     * <p>
-     * This map represents routes, which were withdrawn from broker local
-     * routing table and broker may need to forward RPC to other broker
-     * in order to process RPC request.
-     *
-     * @return Map of contexts and removed routes
-     */
-    Map<C,Set<P>> getRemovals();
-
-    /**
-     * Returns a map of announced routes in associated routing contexts.
-     *
-     * <p>
-     * This map represents routes, which were announced by broker
-     * and are present in broker's local routing table. This routes
-     * are processed by implementations which are registered
-     * to originating broker.
-     *
-     * @return Map of contexts and announced routes
-     */
-    Map<C, Set<P>> getAnnouncements();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangeListener.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangeListener.java
deleted file mode 100644 (file)
index 7283cd4..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.routing;
-
-import java.util.EventListener;
-
-/**
- * Listener which is interested in receiving RouteChangeEvents for its local broker.
- *
- * <p>
- * Listener is registered via {@link RouteChangePublisher#registerRouteChangeListener(RouteChangeListener)}
- *
- * @param <C> Type, which is used to represent Routing context.
- * @param <P> Type of data tree path, which is used to identify route.
- */
-public interface RouteChangeListener<C, P> extends EventListener {
-
-    /**
-     * Callback which is invoked if there is an rpc routing table change.
-     *
-     * @param change Event representing change in local RPC routing table.
-     */
-    void onRouteChange(RouteChange<C, P> change);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangePublisher.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RouteChangePublisher.java
deleted file mode 100644 (file)
index dc6b6dd..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.routing;
-
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * Publishes changes in local RPC routing table to registered listener.
- *
- * @param <C> Type, which is used to represent Routing context.
- * @param <P> Type of data tree path, which is used to identify route.
- */
-public interface RouteChangePublisher<C,P> {
-
-    <L extends RouteChangeListener<C,P>> ListenerRegistration<L> registerRouteChangeListener(L listener);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutedRegistration.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutedRegistration.java
deleted file mode 100644 (file)
index 7b1cdea..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.routing;
-
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.concepts.Registration;
-
-/**
- * Base interface for a routed RPC RPC implementation registration.
- *
- * @param <C> the context type used for routing
- * @param <P> the path identifier type
- * @param <S> the RPC implementation type
- */
-public interface RoutedRegistration<C, P extends Path<P>, S> extends Registration {
-
-    /**
-     * Registers the RPC implementation associated with this registration for the given path
-     * identifier and context.
-     *
-     * @param context the context used for routing RPCs to this implementation.
-     * @param path the path identifier for which to register.
-     */
-    void registerPath(C context, P path);
-
-    /**
-     * Unregisters the RPC implementation associated with this registration for the given path
-     * identifier and context.
-     *
-     * @param context the context used for routing RPCs to this implementation.
-     * @param path the path identifier for which to unregister.
-     */
-    void unregisterPath(C context, P path);
-
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutingTable.java b/opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/routing/RoutingTable.java
deleted file mode 100644 (file)
index 9e72b68..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api.routing;
-
-import java.util.Map;
-
-import org.opendaylight.yangtools.concepts.Path;
-
-public interface RoutingTable<C, P extends Path<P>, T> {
-
-    C getIdentifier();
-
-    T getDefaultRoute();
-
-    Map<P,T> getRoutes();
-
-    T getRoute(P path);
-}
diff --git a/opendaylight/md-sal/sal-common-api/src/test/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFutureTest.java b/opendaylight/md-sal/sal-common-api/src/test/java/org/opendaylight/controller/md/sal/common/api/MappingCheckedFutureTest.java
deleted file mode 100644 (file)
index bee3060..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.api;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.Test;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Unit tests for MappingCheckedFuture.
- *
- * @author Thomas Pantelis
- */
-public class MappingCheckedFutureTest {
-
-    interface FutureInvoker {
-        void invokeGet(CheckedFuture<?,?> future) throws Exception;
-
-        Throwable extractWrappedTestEx(Exception from);
-    }
-
-    static class TestException extends Exception {
-        private static final long serialVersionUID = 1L;
-
-        TestException(final String message, final Throwable cause) {
-            super(message, cause);
-        }
-    }
-
-    static final ExceptionMapper<TestException> MAPPER = new ExceptionMapper<TestException>(
-                                                                      "Test", TestException.class) {
-
-        @Override
-        protected TestException newWithCause(final String message, final Throwable cause) {
-            return new TestException(message, cause);
-        }
-    };
-
-    static final FutureInvoker GET = new FutureInvoker() {
-        @Override
-        public void invokeGet(final CheckedFuture<?, ?> future) throws Exception {
-            future.get();
-        }
-
-        @Override
-        public Throwable extractWrappedTestEx(final Exception from) {
-            if (from instanceof ExecutionException) {
-                return from.getCause();
-            }
-
-            return from;
-        }
-    };
-
-    static final FutureInvoker TIMED_GET = new FutureInvoker() {
-        @Override
-        public void invokeGet(final CheckedFuture<?, ?> future) throws Exception {
-            future.get(1, TimeUnit.HOURS);
-        }
-
-        @Override
-        public Throwable extractWrappedTestEx(final Exception from) {
-            if (from instanceof ExecutionException) {
-                return from.getCause();
-            }
-
-            return from;
-        }
-    };
-
-    static final FutureInvoker CHECKED_GET = new FutureInvoker() {
-        @Override
-        public void invokeGet(final CheckedFuture<?,?> future) throws Exception {
-            future.checkedGet();
-        }
-
-        @Override
-        public Throwable extractWrappedTestEx(final Exception from) {
-            return from;
-        }
-    };
-
-    static final FutureInvoker TIMED_CHECKED_GET = new FutureInvoker() {
-        @Override
-        public void invokeGet(final CheckedFuture<?,?> future) throws Exception {
-            future.checkedGet(50, TimeUnit.MILLISECONDS);
-        }
-
-        @Override
-        public Throwable extractWrappedTestEx(final Exception from) {
-            return from;
-        }
-    };
-
-    @Test
-    public void testGet() throws Exception {
-        SettableFuture<String> delegate = SettableFuture.create();
-        MappingCheckedFuture<String,TestException> future = MappingCheckedFuture.create(delegate, MAPPER);
-        delegate.set("test");
-        assertEquals("get", "test", future.get());
-    }
-
-    @Test
-    public void testGetWithExceptions() throws Exception {
-        testExecutionException(GET, new RuntimeException());
-        testExecutionException(GET, new TestException("mock", null));
-        testCancellationException(GET);
-        testInterruptedException(GET);
-    }
-
-    @Test
-    public void testTimedGet() throws Exception {
-        SettableFuture<String> delegate = SettableFuture.create();
-        MappingCheckedFuture<String,TestException> future = MappingCheckedFuture.create(delegate, MAPPER);
-        delegate.set("test");
-        assertEquals("get", "test", future.get(50, TimeUnit.MILLISECONDS));
-    }
-
-    @Test
-    public void testTimedGetWithExceptions() throws Exception {
-        testExecutionException(TIMED_GET, new RuntimeException());
-        testCancellationException(TIMED_GET);
-        testInterruptedException(TIMED_GET);
-    }
-
-    @Test
-    public void testCheckedGetWithExceptions() throws Exception {
-        testExecutionException(CHECKED_GET, new RuntimeException());
-        testCancellationException(CHECKED_GET);
-        testInterruptedException(CHECKED_GET);
-    }
-
-    @Test
-    public void testTimedCheckedWithExceptions() throws Exception {
-        testExecutionException(TIMED_CHECKED_GET, new RuntimeException());
-        testCancellationException(TIMED_CHECKED_GET);
-        testInterruptedException(TIMED_CHECKED_GET);
-    }
-
-    @SuppressWarnings("checkstyle:illegalCatch")
-    private static void testExecutionException(final FutureInvoker invoker, final Throwable cause) {
-        SettableFuture<String> delegate = SettableFuture.create();
-        MappingCheckedFuture<String, TestException> mappingFuture = MappingCheckedFuture.create(delegate, MAPPER);
-
-        delegate.setException(cause);
-
-        try {
-            invoker.invokeGet(mappingFuture);
-            fail("Expected exception thrown");
-        } catch (Exception e) {
-            Throwable expectedTestEx = invoker.extractWrappedTestEx(e);
-            assertNotNull("Expected returned exception is null", expectedTestEx);
-            assertEquals("Exception type", TestException.class, expectedTestEx.getClass());
-
-            if (cause instanceof TestException) {
-                assertNull("Expected null cause", expectedTestEx.getCause());
-            } else {
-                assertSame("TestException cause", cause, expectedTestEx.getCause());
-            }
-        }
-    }
-
-    @SuppressWarnings("checkstyle:illegalCatch")
-    private static void testCancellationException(final FutureInvoker invoker) {
-        SettableFuture<String> delegate = SettableFuture.create();
-        MappingCheckedFuture<String, TestException> mappingFuture = MappingCheckedFuture.create(delegate, MAPPER);
-
-        mappingFuture.cancel(false);
-
-        try {
-            invoker.invokeGet(mappingFuture);
-            fail("Expected exception thrown");
-        } catch (Exception e) {
-            Throwable expectedTestEx = invoker.extractWrappedTestEx(e);
-            assertNotNull("Expected returned exception is null", expectedTestEx);
-            assertEquals("Exception type", TestException.class, expectedTestEx.getClass());
-            assertEquals("TestException cause type", CancellationException.class, expectedTestEx.getCause().getClass());
-        }
-    }
-
-    @SuppressWarnings("checkstyle:illegalCatch")
-    private static void testInterruptedException(final FutureInvoker invoker) throws Exception {
-        SettableFuture<String> delegate = SettableFuture.create();
-        final MappingCheckedFuture<String, TestException> mappingFuture = MappingCheckedFuture.create(delegate, MAPPER);
-
-        final AtomicReference<AssertionError> assertError = new AtomicReference<>();
-        final CountDownLatch doneLatch = new CountDownLatch(1);
-        Thread thread = new Thread() {
-            @Override
-            public void run() {
-                try {
-                    doInvoke();
-                } catch (AssertionError e) {
-                    assertError.set(e);
-                } finally {
-                    doneLatch.countDown();
-                }
-            }
-
-            void doInvoke() {
-                try {
-                    invoker.invokeGet(mappingFuture);
-                    fail("Expected exception thrown");
-                } catch (Exception e) {
-                    Throwable expectedTestEx = invoker.extractWrappedTestEx(e);
-                    assertNotNull("Expected returned exception is null", expectedTestEx);
-                    assertEquals("Exception type", TestException.class, expectedTestEx.getClass());
-                    assertEquals("TestException cause type", InterruptedException.class,
-                                  expectedTestEx.getCause().getClass());
-                }
-            }
-        };
-        thread.start();
-
-        thread.interrupt();
-        assertTrue("get call completed", doneLatch.await(5, TimeUnit.SECONDS));
-
-        if (assertError.get() != null) {
-            throw assertError.get();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-impl/pom.xml b/opendaylight/md-sal/sal-common-impl/pom.xml
deleted file mode 100644 (file)
index f11c53e..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-common-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-parser-impl</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <configuration>
-          <instructions>
-            <Export-Package>org.opendaylight.controller.md.sal.common.impl,
-                            org.opendaylight.controller.md.sal.common.impl.*</Export-Package>
-          </instructions>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationException.java b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationException.java
deleted file mode 100644 (file)
index daecfdb..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.impl.util.compat;
-
-public class DataNormalizationException extends Exception {
-    private static final long serialVersionUID = 1L;
-
-    public DataNormalizationException(String message) {
-        super(message);
-    }
-
-    public DataNormalizationException(String message, Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationOperation.java b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizationOperation.java
deleted file mode 100644 (file)
index e2a1d55..0000000
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.impl.util.compat;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.AugmentationTarget;
-import org.opendaylight.yangtools.yang.model.api.CaseSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ChoiceSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaNode;
-import org.opendaylight.yangtools.yang.model.util.EffectiveAugmentationSchema;
-
-@Deprecated
-public abstract class DataNormalizationOperation<T extends PathArgument> implements Identifiable<T> {
-
-    private final T identifier;
-    private final Optional<DataSchemaNode> dataSchemaNode;
-
-    @Override
-    public T getIdentifier() {
-        return identifier;
-    }
-
-    protected DataNormalizationOperation(final T identifier, final SchemaNode schema) {
-        this.identifier = identifier;
-        if (schema instanceof DataSchemaNode) {
-            this.dataSchemaNode = Optional.of((DataSchemaNode) schema);
-        } else {
-            this.dataSchemaNode = Optional.absent();
-        }
-    }
-
-    public boolean isMixin() {
-        return false;
-    }
-
-
-    public boolean isKeyedEntry() {
-        return false;
-    }
-
-    protected Set<QName> getQNameIdentifiers() {
-        return Collections.singleton(identifier.getNodeType());
-    }
-
-    public abstract DataNormalizationOperation<?> getChild(PathArgument child) throws DataNormalizationException;
-
-    public abstract DataNormalizationOperation<?> getChild(QName child) throws DataNormalizationException;
-
-
-    public abstract boolean isLeaf();
-
-    public Optional<DataSchemaNode> getDataSchemaNode() {
-        // FIXME
-        return dataSchemaNode;
-    }
-
-    private abstract static class SimpleTypeNormalization<T extends PathArgument>
-            extends DataNormalizationOperation<T> {
-
-        protected SimpleTypeNormalization(final T identifier, final DataSchemaNode potential) {
-            super(identifier,potential);
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            return null;
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            return null;
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return null;
-        }
-
-        @Override
-        public boolean isLeaf() {
-            return true;
-        }
-    }
-
-    private static final class LeafNormalization extends SimpleTypeNormalization<NodeIdentifier> {
-
-        protected LeafNormalization(final LeafSchemaNode potential) {
-            super(new NodeIdentifier(potential.getQName()),potential);
-        }
-
-    }
-
-    private static final class LeafListEntryNormalization extends SimpleTypeNormalization<NodeWithValue> {
-
-        LeafListEntryNormalization(final LeafListSchemaNode potential) {
-            super(new NodeWithValue(potential.getQName(), null),potential);
-        }
-
-        @Override
-        public boolean isKeyedEntry() {
-            return true;
-        }
-    }
-
-    private abstract static class CompositeNodeNormalizationOperation<T extends PathArgument>
-            extends DataNormalizationOperation<T> {
-
-        protected CompositeNodeNormalizationOperation(final T identifier, final DataSchemaNode schema) {
-            super(identifier,schema);
-        }
-
-        @Override
-        public boolean isLeaf() {
-            return false;
-        }
-
-
-    }
-
-    private abstract static class DataContainerNormalizationOperation<T extends PathArgument>
-            extends CompositeNodeNormalizationOperation<T> {
-
-        private final DataNodeContainer schema;
-        private final Map<QName, DataNormalizationOperation<?>> byQName;
-        private final Map<PathArgument, DataNormalizationOperation<?>> byArg;
-
-        protected DataContainerNormalizationOperation(final T identifier, final DataNodeContainer schema,
-                final DataSchemaNode node) {
-            super(identifier,node);
-            this.schema = schema;
-            this.byArg = new ConcurrentHashMap<>();
-            this.byQName = new ConcurrentHashMap<>();
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) throws DataNormalizationException {
-            DataNormalizationOperation<?> potential = byArg.get(child);
-            if (potential != null) {
-                return potential;
-            }
-            potential = fromLocalSchema(child);
-            return register(potential);
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) throws DataNormalizationException {
-            DataNormalizationOperation<?> potential = byQName.get(child);
-            if (potential != null) {
-                return potential;
-            }
-            potential = fromLocalSchemaAndQName(schema, child);
-            return register(potential);
-        }
-
-        private DataNormalizationOperation<?> fromLocalSchema(final PathArgument child)
-                throws DataNormalizationException {
-            if (child instanceof AugmentationIdentifier) {
-                return fromSchemaAndQNameChecked(schema, ((AugmentationIdentifier) child).getPossibleChildNames()
-                        .iterator().next());
-            }
-            return fromSchemaAndQNameChecked(schema, child.getNodeType());
-        }
-
-        protected DataNormalizationOperation<?> fromLocalSchemaAndQName(final DataNodeContainer schema2,
-                final QName child) throws DataNormalizationException {
-            return fromSchemaAndQNameChecked(schema2, child);
-        }
-
-        private DataNormalizationOperation<?> register(final DataNormalizationOperation<?> potential) {
-            if (potential != null) {
-                byArg.put(potential.getIdentifier(), potential);
-                for (final QName qname : potential.getQNameIdentifiers()) {
-                    byQName.put(qname, potential);
-                }
-            }
-            return potential;
-        }
-
-    }
-
-    private static final class ListItemNormalization extends
-            DataContainerNormalizationOperation<NodeIdentifierWithPredicates> {
-
-        protected ListItemNormalization(final NodeIdentifierWithPredicates identifier, final ListSchemaNode schema) {
-            super(identifier, schema, schema);
-        }
-
-        @Override
-        @SuppressFBWarnings("BC_UNCONFIRMED_CAST")
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            final DataContainerNodeAttrBuilder<NodeIdentifierWithPredicates, MapEntryNode> builder = Builders
-                    .mapEntryBuilder().withNodeIdentifier((NodeIdentifierWithPredicates) currentArg);
-            for (final Entry<QName, Object> keyValue :
-                    ((NodeIdentifierWithPredicates) currentArg).getKeyValues().entrySet()) {
-                builder.addChild(Builders.leafBuilder()
-                        //
-                        .withNodeIdentifier(new NodeIdentifier(keyValue.getKey())).withValue(keyValue.getValue())
-                        .build());
-            }
-            return builder.build();
-        }
-
-
-        @Override
-        public boolean isKeyedEntry() {
-            return true;
-        }
-    }
-
-    private static final class UnkeyedListItemNormalization
-            extends DataContainerNormalizationOperation<NodeIdentifier> {
-
-        protected UnkeyedListItemNormalization(final ListSchemaNode schema) {
-            super(new NodeIdentifier(schema.getQName()), schema,schema);
-        }
-
-        @Override
-        @SuppressFBWarnings("BC_UNCONFIRMED_CAST")
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.unkeyedListEntryBuilder().withNodeIdentifier((NodeIdentifier) currentArg).build();
-        }
-
-    }
-
-    private static final class ContainerNormalization extends DataContainerNormalizationOperation<NodeIdentifier> {
-
-        protected ContainerNormalization(final ContainerSchemaNode schema) {
-            super(new NodeIdentifier(schema.getQName()),schema, schema);
-        }
-
-        @Override
-        @SuppressFBWarnings("BC_UNCONFIRMED_CAST")
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.containerBuilder().withNodeIdentifier((NodeIdentifier) currentArg).build();
-        }
-
-    }
-
-    private abstract static class MixinNormalizationOp<T extends PathArgument>
-            extends CompositeNodeNormalizationOperation<T> {
-
-        protected MixinNormalizationOp(final T identifier, final DataSchemaNode schema) {
-            super(identifier,schema);
-        }
-
-        @Override
-        public final boolean isMixin() {
-            return true;
-        }
-
-    }
-
-
-    private static final class OrderedLeafListMixinNormalization extends UnorderedLeafListMixinNormalization {
-        OrderedLeafListMixinNormalization(final LeafListSchemaNode potential) {
-            super(potential);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.orderedLeafSetBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-    }
-
-    private static class UnorderedLeafListMixinNormalization extends MixinNormalizationOp<NodeIdentifier> {
-
-        private final DataNormalizationOperation<?> innerOp;
-
-        UnorderedLeafListMixinNormalization(final LeafListSchemaNode potential) {
-            super(new NodeIdentifier(potential.getQName()),potential);
-            innerOp = new LeafListEntryNormalization(potential);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.leafSetBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            if (child instanceof NodeWithValue) {
-                return innerOp;
-            }
-            return null;
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            if (getIdentifier().getNodeType().equals(child)) {
-                return innerOp;
-            }
-            return null;
-        }
-    }
-
-    private static final class AugmentationNormalization
-            extends DataContainerNormalizationOperation<AugmentationIdentifier> {
-
-        AugmentationNormalization(final AugmentationSchemaNode augmentation, final DataNodeContainer schema) {
-            super(augmentationIdentifierFrom(augmentation), augmentationProxy(augmentation,schema),null);
-        }
-
-        @Override
-        public boolean isMixin() {
-            return true;
-        }
-
-
-
-        @Override
-        protected DataNormalizationOperation<?> fromLocalSchemaAndQName(final DataNodeContainer schema,
-                final QName child) {
-            final Optional<DataSchemaNode> potential = findChildSchemaNode(schema, child);
-            if (!potential.isPresent()) {
-                return null;
-            }
-
-            final DataSchemaNode result = potential.get();
-            // We try to look up if this node was added by augmentation
-            if (schema instanceof DataSchemaNode && result.isAugmenting()) {
-                return fromAugmentation(schema, (AugmentationTarget) schema, result);
-            }
-            return fromDataSchemaNode(result);
-        }
-
-        @Override
-        protected Set<QName> getQNameIdentifiers() {
-            return getIdentifier().getPossibleChildNames();
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.augmentationBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-
-    }
-
-    private static class UnorderedMapMixinNormalization extends MixinNormalizationOp<NodeIdentifier> {
-
-        private final ListItemNormalization innerNode;
-
-        UnorderedMapMixinNormalization(final ListSchemaNode list) {
-            super(new NodeIdentifier(list.getQName()),list);
-            this.innerNode = new ListItemNormalization(new NodeIdentifierWithPredicates(list.getQName(),
-                    Collections.<QName, Object>emptyMap()), list);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.mapBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            if (child.getNodeType().equals(getIdentifier().getNodeType())) {
-                return innerNode;
-            }
-            return null;
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            if (getIdentifier().getNodeType().equals(child)) {
-                return innerNode;
-            }
-            return null;
-        }
-
-    }
-
-
-    private static class UnkeyedListMixinNormalization extends MixinNormalizationOp<NodeIdentifier> {
-
-        private final UnkeyedListItemNormalization innerNode;
-
-        UnkeyedListMixinNormalization(final ListSchemaNode list) {
-            super(new NodeIdentifier(list.getQName()),list);
-            this.innerNode = new UnkeyedListItemNormalization(list);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.unkeyedListBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            if (child.getNodeType().equals(getIdentifier().getNodeType())) {
-                return innerNode;
-            }
-            return null;
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            if (getIdentifier().getNodeType().equals(child)) {
-                return innerNode;
-            }
-            return null;
-        }
-
-    }
-
-    private static final class OrderedMapMixinNormalization extends UnorderedMapMixinNormalization {
-
-        OrderedMapMixinNormalization(final ListSchemaNode list) {
-            super(list);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.orderedMapBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-
-    }
-
-    private static class ChoiceNodeNormalization extends MixinNormalizationOp<NodeIdentifier> {
-
-        private final ImmutableMap<QName, DataNormalizationOperation<?>> byQName;
-        private final ImmutableMap<PathArgument, DataNormalizationOperation<?>> byArg;
-
-        protected ChoiceNodeNormalization(final ChoiceSchemaNode schema) {
-            super(new NodeIdentifier(schema.getQName()),schema);
-            final ImmutableMap.Builder<QName, DataNormalizationOperation<?>> byQNameBuilder = ImmutableMap.builder();
-            final ImmutableMap.Builder<PathArgument, DataNormalizationOperation<?>> byArgBuilder =
-                    ImmutableMap.builder();
-
-            for (final CaseSchemaNode caze : schema.getCases().values()) {
-                for (final DataSchemaNode cazeChild : caze.getChildNodes()) {
-                    final DataNormalizationOperation<?> childOp = fromDataSchemaNode(cazeChild);
-                    byArgBuilder.put(childOp.getIdentifier(), childOp);
-                    for (final QName qname : childOp.getQNameIdentifiers()) {
-                        byQNameBuilder.put(qname, childOp);
-                    }
-                }
-            }
-            byQName = byQNameBuilder.build();
-            byArg = byArgBuilder.build();
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            return byArg.get(child);
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            return byQName.get(child);
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return Builders.choiceBuilder().withNodeIdentifier(getIdentifier()).build();
-        }
-    }
-
-    private static class AnyXmlNormalization extends DataNormalizationOperation<NodeIdentifier> {
-
-        protected AnyXmlNormalization(final AnyXmlSchemaNode schema) {
-            super(new NodeIdentifier(schema.getQName()), schema);
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final PathArgument child) {
-            return null;
-        }
-
-        @Override
-        public DataNormalizationOperation<?> getChild(final QName child) {
-            return null;
-        }
-
-        @Override
-        public boolean isLeaf() {
-            return false;
-        }
-
-        @Override
-        public NormalizedNode<?, ?> createDefault(final PathArgument currentArg) {
-            return null;
-        }
-    }
-
-    private static Optional<DataSchemaNode> findChildSchemaNode(final DataNodeContainer parent,
-            final QName child) {
-        DataSchemaNode potential = parent.getDataChildByName(child);
-        if (potential == null) {
-            final Iterable<ChoiceSchemaNode> choices = FluentIterable.from(parent.getChildNodes())
-                    .filter(ChoiceSchemaNode.class);
-            potential = findChoice(choices, child);
-        }
-        return Optional.fromNullable(potential);
-    }
-
-    private static DataNormalizationOperation<?> fromSchemaAndQNameChecked(final DataNodeContainer schema,
-            final QName child) throws DataNormalizationException {
-
-        final Optional<DataSchemaNode> potential = findChildSchemaNode(schema, child);
-        if (!potential.isPresent()) {
-            throw new DataNormalizationException(String.format(
-                    "Supplied QName %s is not valid according to schema %s, potential children nodes: %s", child,
-                    schema,schema.getChildNodes()));
-        }
-
-        final DataSchemaNode result = potential.get();
-        // We try to look up if this node was added by augmentation
-        if (schema instanceof DataSchemaNode && result.isAugmenting()) {
-            return fromAugmentation(schema, (AugmentationTarget) schema, result);
-        }
-        return fromDataSchemaNode(result);
-    }
-
-    private static ChoiceSchemaNode findChoice(final Iterable<ChoiceSchemaNode> choices, final QName child) {
-        ChoiceSchemaNode foundChoice = null;
-        choiceLoop: for (final ChoiceSchemaNode choice : choices) {
-            for (final CaseSchemaNode caze : choice.getCases().values()) {
-                if (findChildSchemaNode(caze, child).isPresent()) {
-                    foundChoice = choice;
-                    break choiceLoop;
-                }
-            }
-        }
-        return foundChoice;
-    }
-
-    public static AugmentationIdentifier augmentationIdentifierFrom(final AugmentationSchemaNode augmentation) {
-        final ImmutableSet.Builder<QName> potentialChildren = ImmutableSet.builder();
-        for (final DataSchemaNode child : augmentation.getChildNodes()) {
-            potentialChildren.add(child.getQName());
-        }
-        return new AugmentationIdentifier(potentialChildren.build());
-    }
-
-    private static DataNodeContainer augmentationProxy(final AugmentationSchemaNode augmentation,
-            final DataNodeContainer schema) {
-        final Set<DataSchemaNode> children = new HashSet<>();
-        for (final DataSchemaNode augNode : augmentation.getChildNodes()) {
-            children.add(schema.getDataChildByName(augNode.getQName()));
-        }
-        return new EffectiveAugmentationSchema(augmentation, children);
-    }
-
-    /**
-     * Returns a DataNormalizationOperation for provided child node.
-     *
-     * <p>
-     * If supplied child is added by Augmentation this operation returns
-     * a DataNormalizationOperation for augmentation,
-     * otherwise returns a DataNormalizationOperation for child as
-     * call for {@link #fromDataSchemaNode(DataSchemaNode)}.
-     */
-    private static DataNormalizationOperation<?> fromAugmentation(final DataNodeContainer parent,
-            final AugmentationTarget parentAug, final DataSchemaNode child) {
-        AugmentationSchemaNode augmentation = null;
-        for (final AugmentationSchemaNode aug : parentAug.getAvailableAugmentations()) {
-            final DataSchemaNode potential = aug.getDataChildByName(child.getQName());
-            if (potential != null) {
-                augmentation = aug;
-                break;
-            }
-
-        }
-        if (augmentation != null) {
-            return new AugmentationNormalization(augmentation, parent);
-        } else {
-            return fromDataSchemaNode(child);
-        }
-    }
-
-    public static DataNormalizationOperation<?> fromDataSchemaNode(final DataSchemaNode potential) {
-        if (potential instanceof ContainerSchemaNode) {
-            return new ContainerNormalization((ContainerSchemaNode) potential);
-        } else if (potential instanceof ListSchemaNode) {
-
-            return fromListSchemaNode((ListSchemaNode) potential);
-        } else if (potential instanceof LeafSchemaNode) {
-            return new LeafNormalization((LeafSchemaNode) potential);
-        } else if (potential instanceof ChoiceSchemaNode) {
-            return new ChoiceNodeNormalization((ChoiceSchemaNode) potential);
-        } else if (potential instanceof LeafListSchemaNode) {
-            return fromLeafListSchemaNode((LeafListSchemaNode) potential);
-        } else if (potential instanceof AnyXmlSchemaNode) {
-            return new AnyXmlNormalization((AnyXmlSchemaNode) potential);
-        }
-        return null;
-    }
-
-    private static DataNormalizationOperation<?> fromListSchemaNode(final ListSchemaNode potential) {
-        final List<QName> keyDefinition = potential.getKeyDefinition();
-        if (keyDefinition == null || keyDefinition.isEmpty()) {
-            return new UnkeyedListMixinNormalization(potential);
-        }
-        if (potential.isUserOrdered()) {
-            return new OrderedMapMixinNormalization(potential);
-        }
-        return new UnorderedMapMixinNormalization(potential);
-    }
-
-    private static DataNormalizationOperation<?> fromLeafListSchemaNode(final LeafListSchemaNode potential) {
-        if (potential.isUserOrdered()) {
-            return new OrderedLeafListMixinNormalization(potential);
-        }
-        return new UnorderedLeafListMixinNormalization(potential);
-    }
-
-
-    public static DataNormalizationOperation<?> from(final SchemaContext ctx) {
-        return new ContainerNormalization(ctx);
-    }
-
-    public abstract NormalizedNode<?, ?> createDefault(PathArgument currentArg);
-}
diff --git a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizer.java b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataNormalizer.java
deleted file mode 100644 (file)
index df34b63..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.impl.util.compat;
-
-import static com.google.common.base.Preconditions.checkArgument;
-
-import com.google.common.collect.ImmutableList;
-import java.util.Iterator;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Deprecated.
- *
- * @deprecated This class provides compatibility between XML semantics
- *     and {@link org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree}
- */
-@Deprecated
-public class DataNormalizer {
-
-    private final DataNormalizationOperation<?> operation;
-
-    public DataNormalizer(final SchemaContext ctx) {
-        operation = DataNormalizationOperation.from(ctx);
-    }
-
-    public YangInstanceIdentifier toNormalized(final YangInstanceIdentifier legacy) {
-        ImmutableList.Builder<PathArgument> normalizedArgs = ImmutableList.builder();
-
-        DataNormalizationOperation<?> currentOp = operation;
-        Iterator<PathArgument> arguments = legacy.getPathArguments().iterator();
-
-        try {
-            while (arguments.hasNext()) {
-                PathArgument legacyArg = arguments.next();
-                currentOp = currentOp.getChild(legacyArg);
-                checkArgument(currentOp != null,
-                        "Legacy Instance Identifier %s is not correct. Normalized Instance Identifier so far %s",
-                        legacy, normalizedArgs.build());
-                while (currentOp.isMixin()) {
-                    normalizedArgs.add(currentOp.getIdentifier());
-                    currentOp = currentOp.getChild(legacyArg.getNodeType());
-                }
-                normalizedArgs.add(legacyArg);
-            }
-        } catch (DataNormalizationException e) {
-            throw new IllegalArgumentException(String.format("Failed to normalize path %s", legacy), e);
-        }
-
-        return YangInstanceIdentifier.create(normalizedArgs.build());
-    }
-
-    public DataNormalizationOperation<?> getOperation(final YangInstanceIdentifier legacy)
-            throws DataNormalizationException {
-        DataNormalizationOperation<?> currentOp = operation;
-
-        for (PathArgument pathArgument : legacy.getPathArguments()) {
-            currentOp = currentOp.getChild(pathArgument);
-        }
-        return currentOp;
-    }
-
-    public YangInstanceIdentifier toLegacy(final YangInstanceIdentifier normalized) throws DataNormalizationException {
-        ImmutableList.Builder<PathArgument> legacyArgs = ImmutableList.builder();
-        DataNormalizationOperation<?> currentOp = operation;
-        for (PathArgument normalizedArg : normalized.getPathArguments()) {
-            currentOp = currentOp.getChild(normalizedArg);
-            if (!currentOp.isMixin()) {
-                legacyArgs.add(normalizedArg);
-            }
-        }
-        return YangInstanceIdentifier.create(legacyArgs.build());
-    }
-
-    public DataNormalizationOperation<?> getRootOperation() {
-        return operation;
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataSchemaContainerProxy.java b/opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/util/compat/DataSchemaContainerProxy.java
deleted file mode 100644 (file)
index 132cc6e..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.common.impl.util.compat;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.GroupingDefinition;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.UsesNode;
-
-class DataSchemaContainerProxy implements DataNodeContainer {
-
-    private final Set<DataSchemaNode> realChildSchemas;
-    private final Map<QName, DataSchemaNode> mappedChildSchemas;
-
-    DataSchemaContainerProxy(final Set<DataSchemaNode> realChildSchema) {
-        realChildSchemas = realChildSchema;
-        mappedChildSchemas = new HashMap<>();
-        for (DataSchemaNode schema : realChildSchemas) {
-            mappedChildSchemas.put(schema.getQName(), schema);
-        }
-    }
-
-    @Override
-    public Optional<DataSchemaNode> findDataChildByName(final QName name) {
-        return Optional.ofNullable(mappedChildSchemas.get(name));
-    }
-
-    @Override
-    public Set<DataSchemaNode> getChildNodes() {
-        return realChildSchemas;
-    }
-
-    @Override
-    public Set<GroupingDefinition> getGroupings() {
-        return Collections.emptySet();
-    }
-
-    @Override
-    public Set<TypeDefinition<?>> getTypeDefinitions() {
-        return Collections.emptySet();
-    }
-
-    @Override
-    public Set<UsesNode> getUses() {
-        return Collections.emptySet();
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-impl/src/test/resources/normalization-test.yang b/opendaylight/md-sal/sal-common-impl/src/test/resources/normalization-test.yang
deleted file mode 100644 (file)
index 6df5306..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-module normalization-test {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:normalization:test";
-    prefix "norm-test";
-
-    revision "2014-03-13" {
-        description "Initial revision.";
-    }
-
-    grouping outer-grouping {
-    }
-
-    container test {
-        list outer-list {
-            key id;
-            leaf id {
-                type uint16;
-            }
-            choice outer-choice {
-                case one {
-                    leaf one {
-                        type string;
-                    }
-                }
-                case two-three {
-                    leaf two {
-                        type string;
-                    }
-                    leaf three {
-                        type string;
-                    }
-               }
-           }
-           list inner-list {
-                key name;
-                ordered-by user;
-
-                leaf name {
-                    type string;
-                }
-                leaf value {
-                    type string;
-                }
-            }
-        }
-
-        list unkeyed-list {
-            leaf name {
-                type string;
-            }
-        }
-
-        leaf-list unordered-leaf-list {
-            type string;
-        }
-
-        leaf-list ordered-leaf-list {
-            ordered-by user;
-            type string;
-        }
-
-        container outer-container {
-        }
-
-        anyxml any-xml-data;
-    }
-
-    augment /norm-test:test/norm-test:outer-container {
-
-        leaf augmented-leaf {
-           type string;
-        }
-    }
-}
\ No newline at end of file
index cc6e3371765bc75f4b97ac6b3b2573308908df38..a0bf479f16c90da69b96431ebb19a2ae2b6316f7 100644 (file)
@@ -4,31 +4,21 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-common-util</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>org.eclipse.jdt.annotation</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>concepts</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-util</artifactId>
+      <artifactId>util</artifactId>
     </dependency>
   </dependencies>
 
index 0ce05ebd1e315b05ed91c521b2a7569809fa12eb..53d7a2f22a95b65d003a4a2d7e836bc100162143 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.md.sal.common.util.jmx;
 
-import com.google.common.annotations.Beta;
 import java.lang.management.ManagementFactory;
 import javax.management.InstanceAlreadyExistsException;
 import javax.management.InstanceNotFoundException;
@@ -32,9 +31,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-@Beta
 public abstract class AbstractMXBean {
-
     private static final Logger LOG = LoggerFactory.getLogger(AbstractMXBean.class);
 
     public static final String BASE_JMX_PREFIX = "org.opendaylight.controller:";
@@ -89,7 +86,7 @@ public abstract class AbstractMXBean {
         boolean registered = false;
         try {
             // Object to identify MBean
-            final ObjectName mbeanName = this.getMBeanObjectName();
+            final ObjectName mbeanName = getMBeanObjectName();
 
             LOG.debug("Register MBean {}", mbeanName);
 
@@ -110,7 +107,7 @@ public abstract class AbstractMXBean {
             LOG.debug("MBean {} registered successfully", mbeanName.getCanonicalName());
         } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException
                 | MalformedObjectNameException e) {
-            LOG.error("registration failed {}", e);
+            LOG.error("registration failed", e);
         }
         return registered;
     }
@@ -129,16 +126,13 @@ public abstract class AbstractMXBean {
      * @return true is successfully unregistered, false otherwise.
      */
     public boolean unregisterMBean() {
-        boolean unregister = false;
         try {
-            ObjectName mbeanName = this.getMBeanObjectName();
-            unregisterMBean(mbeanName);
-            unregister = true;
+            unregisterMBean(getMBeanObjectName());
+            return true;
         } catch (MBeanRegistrationException | InstanceNotFoundException | MalformedObjectNameException e) {
-            LOG.debug("Failed when unregistering MBean {}", e);
+            LOG.debug("Failed when unregistering MBean", e);
+            return false;
         }
-
-        return unregister;
     }
 
     private void unregisterMBean(ObjectName mbeanName) throws MBeanRegistrationException,
diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBean.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBean.java
deleted file mode 100644 (file)
index dd89e00..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.util.jmx;
-
-import java.util.List;
-
-import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
-
-/**
- * MXBean interface for {@link org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager} statistic metrics.
- *
- * @author Thomas Pantelis
- */
-public interface QueuedNotificationManagerMXBean {
-
-    /**
-     * Returns a list of stat instances for each current listener notification task in progress.
-     */
-    List<ListenerNotificationQueueStats> getCurrentListenerQueueStats();
-
-    /**
-     * Returns the configured maximum listener queue size.
-     */
-    int getMaxListenerQueueSize();
-}
diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBeanImpl.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerMXBeanImpl.java
deleted file mode 100644 (file)
index 0f324cb..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.util.jmx;
-
-import com.google.common.base.Preconditions;
-import java.util.List;
-import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
-
-/**
- * Implementation of the QueuedNotificationManagerMXBean interface.
- *
- * <p>
- * This class is not intended for use outside of MD-SAL and its part of private
- * implementation (still exported as public to be reused across MD-SAL implementation
- * components) and may be removed in subsequent
- * releases.
- *
- * @author Thomas Pantelis
- */
-public class QueuedNotificationManagerMXBeanImpl extends AbstractMXBean
-                                                 implements QueuedNotificationManagerMXBean {
-
-    private final QueuedNotificationManager<?,?> manager;
-
-    public QueuedNotificationManagerMXBeanImpl(QueuedNotificationManager<?,?> manager,
-            String beanName, String beanType, String beanCategory) {
-        super(beanName, beanType, beanCategory);
-        this.manager = Preconditions.checkNotNull(manager);
-    }
-
-    @Override
-    public List<ListenerNotificationQueueStats> getCurrentListenerQueueStats() {
-        return manager.getListenerNotificationQueueStats();
-    }
-
-    @Override
-    public int getMaxListenerQueueSize() {
-        return manager.getMaxQueueCapacity();
-    }
-
-    public QueuedNotificationManagerStats toQueuedNotificationManagerStats() {
-        return new QueuedNotificationManagerStats(getMaxListenerQueueSize(), getCurrentListenerQueueStats());
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerStats.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/QueuedNotificationManagerStats.java
deleted file mode 100644 (file)
index 1cc62f4..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.common.util.jmx;
-
-import java.beans.ConstructorProperties;
-import java.util.List;
-
-import org.opendaylight.yangtools.util.concurrent.ListenerNotificationQueueStats;
-
-/**
- * A bean class that holds various QueuedNotificationManager statistic metrics. This class is
- * suitable for mapping to the MXBean CompositeDataSupport type.
- *
- * <p>
- * This class is not intended for use outside of MD-SAL and its part of private
- * implementation (still exported as public to be reused across MD-SAL implementation
- * components) and may be removed in subsequent
- * releases.
- * @author Thomas Pantelis
- * @see QueuedNotificationManagerMXBeanImpl
- */
-public class QueuedNotificationManagerStats {
-
-    private final int maxListenerQueueSize;
-    private final List<ListenerNotificationQueueStats> currentListenerQueueStats;
-
-    @ConstructorProperties({"maxListenerQueueSize","currentListenerQueueStats"})
-    public QueuedNotificationManagerStats(int maxListenerQueueSize,
-            List<ListenerNotificationQueueStats> currentListenerQueueStats) {
-        this.maxListenerQueueSize = maxListenerQueueSize;
-        this.currentListenerQueueStats = currentListenerQueueStats;
-    }
-
-    public List<ListenerNotificationQueueStats> getCurrentListenerQueueStats() {
-        return currentListenerQueueStats;
-    }
-
-    public int getMaxListenerQueueSize() {
-        return maxListenerQueueSize;
-    }
-}
index 0a766c013d541eca8f6eaef67d05b0bb62e19578..2e8b8376d69ba6c232b6e199f784b21c273f2d11 100644 (file)
@@ -8,7 +8,7 @@
 
 package org.opendaylight.controller.md.sal.common.util.jmx;
 
-import java.beans.ConstructorProperties;
+import javax.management.ConstructorParameters;
 
 /**
  * A bean class that holds various thread executor statistic metrics. This class is suitable for
@@ -33,7 +33,7 @@ public class ThreadExecutorStats {
     private final Long largestQueueSize;
     private final Long rejectedTaskCount;
 
-    @ConstructorProperties({"activeThreadCount","currentThreadPoolSize","largestThreadPoolSize",
+    @ConstructorParameters({"activeThreadCount","currentThreadPoolSize","largestThreadPoolSize",
         "maxThreadPoolSize","currentQueueSize","largestQueueSize","maxQueueSize",
         "completedTaskCount","totalTaskCount","rejectedTaskCount"})
     public ThreadExecutorStats(long activeThreadCount, long currentThreadPoolSize,
diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/Arguments.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/Arguments.java
deleted file mode 100644 (file)
index d09024a..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.common.util;
-
-public final class Arguments {
-
-    private Arguments() {
-        throw new UnsupportedOperationException("Utility class");
-    }
-
-    /**
-     * Checks if value is instance of provided class.
-     *
-     * @param value Value to check
-     * @param type Type to check
-     * @return Reference which was checked
-     */
-    @SuppressWarnings("unchecked")
-    public static <T> T checkInstanceOf(Object value, Class<T> type) {
-        if (!type.isInstance(value)) {
-            throw new IllegalArgumentException(String.format("Value %s is not of type %s", value, type));
-        }
-        return (T) value;
-    }
-}
diff --git a/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/NoopAutoCloseable.java b/opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/sal/common/util/NoopAutoCloseable.java
deleted file mode 100644 (file)
index 7f95f96..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.common.util;
-
-/**
- * An AutoCloseable that does nothing.
- *
- * @author Thomas Pantelis
- */
-public final class NoopAutoCloseable implements AutoCloseable {
-    public static final NoopAutoCloseable INSTANCE = new NoopAutoCloseable();
-
-    private NoopAutoCloseable() {
-    }
-
-    @Override
-    public void close() {
-    }
-}
diff --git a/opendaylight/md-sal/sal-connector-api/pom.xml b/opendaylight/md-sal/sal-connector-api/pom.xml
deleted file mode 100644 (file)
index 8de0258..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-connector-api</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-api</artifactId>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareRpcRouter.java b/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareRpcRouter.java
deleted file mode 100644 (file)
index 22366f8..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connector.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.concurrent.Future;
-import org.opendaylight.yangtools.concepts.Immutable;
-
-public interface BindingAwareRpcRouter extends RpcRouter<String, String, String, byte[]> {
-
-    @Override
-    Future<org.opendaylight.controller.sal.connector.api.RpcRouter.RpcReply<byte[]>> sendRpc(
-            RpcRequest<String, String, String, byte[]> input);
-
-    @SuppressFBWarnings("EI_EXPOSE_REP2")
-    class BindingAwareRequest implements RpcRequest<String, String, String, byte[]>, Immutable {
-
-        private final BindingAwareRouteIdentifier routingInformation;
-        private final byte[] payload;
-
-        public BindingAwareRequest(BindingAwareRouteIdentifier routingInformation, byte[] payload) {
-            this.routingInformation = routingInformation;
-            this.payload = payload;
-        }
-
-        @Override
-        public BindingAwareRouteIdentifier getRoutingInformation() {
-            return this.routingInformation;
-        }
-
-        @Override
-        @SuppressFBWarnings("EI_EXPOSE_REP")
-        public byte[] getPayload() {
-            return payload;
-        }
-    }
-
-    class BindingAwareRouteIdentifier implements RouteIdentifier<String, String, String>, Immutable {
-
-        private final String type;
-        private final String route;
-        private final String content;
-
-        public BindingAwareRouteIdentifier(String type, String route, String content) {
-            this.type = type;
-            this.route = route;
-            this.content = content;
-        }
-
-        /**
-         * Java class name of Rpc Context.
-         */
-        @Override
-        public String getContext() {
-            return this.content;
-        }
-
-        /**
-         * String representation of route e.g. node-id
-         *
-         */
-        @Override
-        public String getRoute() {
-            return this.route;
-        }
-
-        /**
-         * Java class name of Rpc Type e.g org.opendaylight.AddFlowInput.
-         */
-        @Override
-        public String getType() {
-            return this.type;
-        }
-
-        @Override
-        public int hashCode() {
-            final int prime = 31;
-            int result = 1;
-            result = prime * result + (content == null ? 0 : content.hashCode());
-            result = prime * result + (route == null ? 0 : route.hashCode());
-            result = prime * result + (type == null ? 0 : type.hashCode());
-            return result;
-        }
-
-        @Override
-        public boolean equals(Object obj) {
-            if (this == obj) {
-                return true;
-            }
-            if (obj == null) {
-                return false;
-            }
-            if (getClass() != obj.getClass()) {
-                return false;
-            }
-            BindingAwareRouteIdentifier other = (BindingAwareRouteIdentifier) obj;
-            if (content == null) {
-                if (other.content != null) {
-                    return false;
-                }
-            } else if (!content.equals(other.content)) {
-                return false;
-            }
-            if (route == null) {
-                if (other.route != null) {
-                    return false;
-                }
-            } else if (!route.equals(other.route)) {
-                return false;
-            }
-            if (type == null) {
-                if (other.type != null) {
-                    return false;
-                }
-            } else if (!type.equals(other.type)) {
-                return false;
-            }
-            return true;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareZeroMqRpcRouter.java b/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/BindingAwareZeroMqRpcRouter.java
deleted file mode 100644 (file)
index 27e403e..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connector.api;
-
-import java.util.concurrent.Future;
-
-public class BindingAwareZeroMqRpcRouter implements BindingAwareRpcRouter {
-
-    BindingAwareRpcRouter mdSalRouter;
-
-    public BindingAwareRpcRouter getMdSalRouter() {
-        return mdSalRouter;
-    }
-
-
-    public void setMdSalRouter(BindingAwareRpcRouter mdSalRouter) {
-        this.mdSalRouter = mdSalRouter;
-    }
-
-
-    @Override
-    public Future<RpcReply<byte[]>> sendRpc(RpcRequest<String, String, String, byte[]> input) {
-        // Write message down to the wire
-        return null;
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/ConnectorListener.java b/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/ConnectorListener.java
deleted file mode 100644 (file)
index 5f99add..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connector.api;
-
-import java.util.Set;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public interface ConnectorListener {
-    void onPrefixesAnnounced(Set<YangInstanceIdentifier> prefixes);
-
-    void onPrefixesWithdrawn(Set<YangInstanceIdentifier> prefixes);
-}
diff --git a/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/RpcRouter.java b/opendaylight/md-sal/sal-connector-api/src/main/java/org/opendaylight/controller/sal/connector/api/RpcRouter.java
deleted file mode 100644 (file)
index c56560a..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connector.api;
-
-import java.util.concurrent.Future;
-
-/**
- * Interface for an RPC router.
- *
- * @author ttkacik
- *
- * @param <C> Routing Context Identifier
- * @param <R> Route Type
- * @param <T> Rpc Type
- * @param <D> Data Type
- */
-public interface RpcRouter<C,T,R,D> {
-    Future<RpcReply<D>> sendRpc(RpcRequest<C, T, R, D> input);
-
-
-    /**
-     * Interface for an RPC request.
-     *
-     * @param <C> Routing Context Identifier
-     * @param <R> Route Type
-     * @param <T> Rpc Type
-     * @param <D> Data Type
-     */
-    interface RpcRequest<C,T,R,D> {
-
-        RouteIdentifier<C,T,R> getRoutingInformation();
-
-        D getPayload();
-    }
-
-    interface RouteIdentifier<C,T,R> {
-
-        C getContext(); // defines a routing table (e.g. NodeContext)
-
-        T getType(); // rpc type
-
-        R getRoute(); // e.g. (node identity)
-    }
-
-    interface RpcReply<D> {
-        D getPayload();
-    }
-}
index 19bd47553a5f4f006ce3c929596f820758d03769..2d397cdcf5b5534e5c98e36f6f30421163221392 100644 (file)
@@ -4,33 +4,21 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
   <artifactId>sal-distributed-datastore</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
-  <dependencyManagement>
-    <dependencies>
-      <dependency>
-        <groupId>tech.pantheon.triemap</groupId>
-        <artifactId>bom</artifactId>
-        <version>1.0.4</version>
-        <scope>import</scope>
-        <type>pom</type>
-      </dependency>
-    </dependencies>
-  </dependencyManagement>
-
   <dependencies>
-    <!-- Java -->
     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <optional>true</optional>
     </dependency>
+
+    <!-- Java -->
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-simple</artifactId>
 
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-    </dependency>
-
-    <!-- Akka -->
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-actor_2.12</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-cluster_2.12</artifactId>
+      <artifactId>org.osgi.framework</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-osgi_2.12</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence_2.12</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-remote_2.12</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
     </dependency>
     <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-slf4j_2.12</artifactId>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.metatype.annotations</artifactId>
     </dependency>
+
+    <!-- Akka -->
     <dependency>
       <groupId>org.scala-lang.modules</groupId>
-      <artifactId>scala-java8-compat_2.12</artifactId>
+      <artifactId>scala-java8-compat_2.13</artifactId>
     </dependency>
     <dependency>
       <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.12</artifactId>
-      <scope>test</scope>
+      <artifactId>akka-testkit_2.13</artifactId>
     </dependency>
 
     <!-- Scala -->
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>cds-dom-api</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>cds-mgmt-api</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft-example</artifactId>
       <type>test-jar</type>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-clustering-commons</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-common-util</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-compat</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-segmented-journal</artifactId>
 
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-api</artifactId>
+      <artifactId>mdsal-binding-dom-codec-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-eos-dom-api</artifactId>
+      <artifactId>mdsal-common-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-spi</artifactId>
+      <artifactId>mdsal-dom-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-broker</artifactId>
+      <artifactId>mdsal-dom-spi</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-codec</artifactId>
+      <artifactId>mdsal-dom-broker</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-spi</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>concepts</artifactId>
     </dependency>
-
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>concepts</artifactId>
+      <artifactId>util</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-impl</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-tree-ri</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-binfmt</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.yangtools</groupId>
       <artifactId>yang-data-codec-xml</artifactId>
     </dependency>
     <dependency>
-      <groupId>tech.pantheon.triemap</groupId>
-      <artifactId>triemap</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-codec-gson</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-model-util</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.commons</groupId>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-text</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-inmemory-datastore</artifactId>
-    </dependency>
 
     <dependency>
-      <groupId>io.atomix</groupId>
-      <artifactId>atomix-storage</artifactId>
-      <version>3.1.5</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>io.atomix</groupId>
-      <artifactId>atomix-utils</artifactId>
-      <version>3.1.5</version>
+      <groupId>org.awaitility</groupId>
+      <artifactId>awaitility</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-test-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-dom-codec</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-test-utils</artifactId>
     </dependency>
   </dependencies>
 
         <configuration>
           <instructions>
             <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
+
             <Export-Package>
                 org.opendaylight.controller.cluster.datastore;
                 org.opendaylight.controller.cluster.datastore.config;
                 org.opendaylight.controller.cluster.datastore.exceptions;
+                org.opendaylight.controller.cluster.datastore.identifiers;
                 org.opendaylight.controller.cluster.datastore.messages;
+                org.opendaylight.controller.cluster.datastore.modification;
                 org.opendaylight.controller.cluster.datastore.persisted;
                 org.opendaylight.controller.cluster.datastore.utils;
                 org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
       </plugin>
 
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
           <execution>
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <artifactId>maven-source-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar-no-fork</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
     </plugins>
   </build>
 
index 0d43250f5d42c2c67370f8e83d01f3aeaa82bb9f..4868d04f7d8df9188b4b324d5cd9176d132f9e1e 100644 (file)
@@ -9,8 +9,12 @@ package org.opendaylight.controller.cluster.akka.impl;
 
 import akka.actor.ActorSystem;
 import akka.actor.Props;
+import akka.actor.Terminated;
+import akka.dispatch.OnComplete;
 import com.typesafe.config.Config;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.ActorSystemProviderListener;
 import org.opendaylight.controller.cluster.common.actor.QuarantinedMonitorActor;
@@ -20,13 +24,15 @@ import org.opendaylight.yangtools.util.ListenerRegistry;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
+import scala.concurrent.ExecutionContext;
+import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseable {
     private static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
     private static final Logger LOG = LoggerFactory.getLogger(ActorSystemProviderImpl.class);
 
-    private final ActorSystem actorSystem;
+    private final @NonNull ActorSystem actorSystem;
     private final ListenerRegistry<ActorSystemProviderListener> listeners = ListenerRegistry.create();
 
     public ActorSystemProviderImpl(
@@ -34,7 +40,6 @@ public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseab
         LOG.info("Creating new ActorSystem");
 
         actorSystem = ActorSystem.create(ACTOR_SYSTEM_NAME, akkaConfig, classLoader);
-
         actorSystem.actorOf(Props.create(TerminationMonitor.class), TerminationMonitor.ADDRESS);
         actorSystem.actorOf(quarantinedMonitorActorProps, QuarantinedMonitorActor.ADDRESS);
     }
@@ -50,15 +55,29 @@ public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseab
         return listeners.register(listener);
     }
 
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
+    public Future<Terminated> asyncClose() {
         LOG.info("Shutting down ActorSystem");
 
-        try {
-            Await.result(actorSystem.terminate(), FiniteDuration.create(10, TimeUnit.SECONDS));
-        } catch (final Exception e) {
-            LOG.warn("Error awaiting actor termination", e);
-        }
+        final Future<Terminated> ret = actorSystem.terminate();
+        ret.onComplete(new OnComplete<Terminated>() {
+            @Override
+            public void onComplete(final Throwable failure, final Terminated success) throws Throwable {
+                if (failure != null) {
+                    LOG.warn("ActorSystem failed to shut down", failure);
+                } else {
+                    LOG.info("ActorSystem shut down");
+                }
+            }
+        }, ExecutionContext.global());
+        return ret;
+    }
+
+    public void close(final FiniteDuration wait) throws TimeoutException, InterruptedException {
+        Await.result(asyncClose(), wait);
+    }
+
+    @Override
+    public void close() throws TimeoutException, InterruptedException {
+        close(FiniteDuration.create(10, TimeUnit.SECONDS));
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/akka/osgi/impl/OSGiActorSystemProvider.java
new file mode 100644 (file)
index 0000000..60a72b0
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.akka.osgi.impl;
+
+import akka.actor.ActorSystem;
+import java.util.concurrent.TimeoutException;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
+import org.opendaylight.controller.cluster.ActorSystemProviderListener;
+import org.opendaylight.controller.cluster.akka.impl.ActorSystemProviderImpl;
+import org.opendaylight.controller.cluster.akka.impl.AkkaConfigFactory;
+import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.osgi.framework.BundleContext;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Await;
+import scala.concurrent.duration.Duration;
+
+@Component(immediate = true)
+public final class OSGiActorSystemProvider implements ActorSystemProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiActorSystemProvider.class);
+
+    private ActorSystemProviderImpl delegate;
+
+    @Activate
+    public OSGiActorSystemProvider(@Reference final AkkaConfigurationReader reader, final BundleContext bundleContext) {
+        LOG.info("Actor System provider starting");
+        final var akkaConfig = AkkaConfigFactory.createAkkaConfig(reader);
+        delegate = new ActorSystemProviderImpl(BundleClassLoaderFactory.createClassLoader(bundleContext),
+            QuarantinedMonitorActorPropsFactory.createProps(bundleContext, akkaConfig), akkaConfig);
+        LOG.info("Actor System provider started");
+    }
+
+    @Deactivate
+    void deactivate() throws TimeoutException, InterruptedException {
+        LOG.info("Actor System provider stopping");
+        Await.result(delegate.asyncClose(), Duration.Inf());
+        delegate = null;
+        LOG.info("Actor System provider stopped");
+    }
+
+    @Override
+    public ActorSystem getActorSystem() {
+        return delegate.getActorSystem();
+    }
+
+    @Override
+    public ListenerRegistration<ActorSystemProviderListener> registerActorSystemProviderListener(
+            final ActorSystemProviderListener listener) {
+        return delegate.registerActorSystemProviderListener(listener);
+    }
+}
+
index 75872f4ddbf202d3ed5f4d68b22fe523072572f8..eb98c4f3399973d17f57facb9c1961aceb3ad1db 100644 (file)
@@ -8,6 +8,8 @@
 package org.opendaylight.controller.cluster.akka.osgi.impl;
 
 import akka.actor.Props;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigException;
 import org.opendaylight.controller.cluster.common.actor.QuarantinedMonitorActor;
 import org.osgi.framework.BundleContext;
 import org.slf4j.Logger;
@@ -16,11 +18,23 @@ import org.slf4j.LoggerFactory;
 public final class QuarantinedMonitorActorPropsFactory {
     private static final Logger LOG = LoggerFactory.getLogger(QuarantinedMonitorActorPropsFactory.class);
 
+    private static final String DEFAULT_HANDLING_DISABLED =
+        "akka.disable-default-actor-system-quarantined-event-handling";
+
     private QuarantinedMonitorActorPropsFactory() {
 
     }
 
-    public static Props createProps(final BundleContext bundleContext) {
+    public static Props createProps(final BundleContext bundleContext, final Config akkaConfig) {
+        try {
+            if (akkaConfig.getBoolean(DEFAULT_HANDLING_DISABLED)) {
+                LOG.info("{} was set, default handling is disabled", DEFAULT_HANDLING_DISABLED);
+                return QuarantinedMonitorActor.props(() -> { });
+            }
+        } catch (ConfigException configEx) {
+            LOG.info("Akka config doesn't contain property {}. Therefore default handling will be used",
+                DEFAULT_HANDLING_DISABLED);
+        }
         return QuarantinedMonitorActor.props(() -> {
             // restart the entire karaf container
             LOG.warn("Restarting karaf container");
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java
deleted file mode 100644 (file)
index f059676..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap.Builder;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore> implements DOMDataBroker {
-
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
-
-    private final AtomicLong txNum = new AtomicLong();
-    private final AtomicLong chainNum = new AtomicLong();
-    private final ClassToInstanceMap<DOMDataBrokerExtension> extensions;
-
-    private volatile AutoCloseable closeable;
-
-    protected AbstractDOMBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
-        super(datastores);
-
-        Builder<DOMDataBrokerExtension> extBuilder = ImmutableClassToInstanceMap.builder();
-        if (isSupported(datastores, DOMStoreTreeChangePublisher.class)) {
-            extBuilder.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
-                @Override
-                public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-                        final DOMDataTreeIdentifier treeId, final L listener) {
-                    DOMStore store = getDOMStore(treeId.getDatastoreType());
-                    return ((DOMStoreTreeChangePublisher) store).registerTreeChangeListener(
-                            treeId.getRootIdentifier(), listener);
-                }
-            });
-        }
-
-        if (isSupported(datastores, DOMDataTreeCommitCohortRegistry.class)) {
-            extBuilder.put(DOMDataTreeCommitCohortRegistry.class, new DOMDataTreeCommitCohortRegistry() {
-                @Override
-                public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
-                        final DOMDataTreeIdentifier path, final T cohort) {
-                    DOMStore store = getDOMStore(path.getDatastoreType());
-                    return ((DOMDataTreeCommitCohortRegistry) store).registerCommitCohort(path, cohort);
-                }
-            });
-        }
-
-        extensions = extBuilder.build();
-    }
-
-    private static boolean isSupported(final Map<LogicalDatastoreType, DOMStore> datastores,
-            final Class<?> expDOMStoreInterface) {
-        return datastores.values().stream().allMatch(expDOMStoreInterface::isInstance);
-    }
-
-    public void setCloseable(final AutoCloseable closeable) {
-        this.closeable = closeable;
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
-        super.close();
-
-        if (closeable != null) {
-            try {
-                closeable.close();
-            } catch (Exception e) {
-                LOG.debug("Error closing instance", e);
-            }
-        }
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
-        return extensions;
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains =
-                new EnumMap<>(LogicalDatastoreType.class);
-        for (Map.Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
-            backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
-        }
-
-        final long chainId = chainNum.getAndIncrement();
-        LOG.debug("Transaction chain {} created with listener {}, backing store chains {}", chainId, listener,
-                backingChains);
-        return new DOMBrokerTransactionChain(chainId, backingChains, this, listener);
-    }
-
-    private DOMStore getDOMStore(final LogicalDatastoreType type) {
-        DOMStore store = getTxFactories().get(type);
-        checkState(store != null, "Requested logical data store is not available.");
-        return store;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerTransaction.java
deleted file mode 100644 (file)
index 2655b61..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMBrokerTransaction<T extends DOMStoreTransaction> implements DOMDataTreeTransaction {
-
-    private final EnumMap<LogicalDatastoreType, T> backingTxs;
-    private final Object identifier;
-    private final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories;
-
-    /**
-     * Creates new composite Transactions.
-     *
-     * @param identifier Identifier of transaction.
-     */
-    protected AbstractDOMBrokerTransaction(final Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
-        this.identifier = requireNonNull(identifier, "Identifier should not be null");
-        this.storeTxFactories = requireNonNull(storeTxFactories, "Store Transaction Factories should not be null");
-        this.backingTxs = new EnumMap<>(LogicalDatastoreType.class);
-    }
-
-    /**
-     * Returns subtransaction associated with supplied key.
-     *
-     * @param key the data store type key
-     * @return the subtransaction
-     * @throws NullPointerException
-     *             if key is null
-     * @throws IllegalArgumentException
-     *             if no subtransaction is associated with key.
-     */
-    protected final T getSubtransaction(final LogicalDatastoreType key) {
-        requireNonNull(key, "key must not be null.");
-
-        T ret = backingTxs.get(key);
-        if (ret == null) {
-            ret = createTransaction(key);
-            backingTxs.put(key, ret);
-        }
-        checkArgument(ret != null, "No subtransaction associated with %s", key);
-        return ret;
-    }
-
-    protected abstract T createTransaction(LogicalDatastoreType key);
-
-    /**
-     * Returns immutable Iterable of all subtransactions.
-     *
-     */
-    protected Collection<T> getSubtransactions() {
-        return backingTxs.values();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return identifier;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void closeSubtransactions() {
-        /*
-         * We share one exception for all failures, which are added
-         * as supressedExceptions to it.
-         */
-        IllegalStateException failure = null;
-        for (T subtransaction : backingTxs.values()) {
-            try {
-                subtransaction.close();
-            } catch (Exception e) {
-                // If we did not allocated failure we allocate it
-                if (failure == null) {
-                    failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
-                } else {
-                    // We update it with additional exceptions, which occurred during error.
-                    failure.addSuppressed(e);
-                }
-            }
-        }
-        // If we have failure, we throw it at after all attempts to close.
-        if (failure != null) {
-            throw failure;
-        }
-    }
-
-    protected DOMStoreTransactionFactory getTxFactory(LogicalDatastoreType type) {
-        return storeTxFactories.get(type);
-    }
-
-    @Override
-    public final String toString() {
-        return addToStringAttributes(MoreObjects.toStringHelper(this).omitNullValues()).toString();
-    }
-
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("identifier", identifier);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransaction.java
deleted file mode 100644 (file)
index 1d661eb..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMBrokerWriteTransaction<T extends DOMStoreWriteTransaction>
-        extends AbstractDOMBrokerTransaction<T> implements DOMDataTreeWriteTransaction {
-
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, AbstractDOMTransactionFactory>
-            IMPL_UPDATER = AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class,
-                    AbstractDOMTransactionFactory.class, "commitImpl");
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<AbstractDOMBrokerWriteTransaction, Future> FUTURE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(AbstractDOMBrokerWriteTransaction.class, Future.class,
-                    "commitFuture");
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBrokerWriteTransaction.class);
-    private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
-
-    /**
-     * Implementation of real commit. It also acts as an indication that
-     * the transaction is running -- which we flip atomically using
-     * {@link #IMPL_UPDATER}.
-     */
-    private volatile AbstractDOMTransactionFactory<?> commitImpl;
-
-    /**
-     * Future task of transaction commit. It starts off as null, but is
-     * set appropriately on {@link #submit()} and {@link #cancel()} via
-     * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
-     * <p/>
-     * Lazy set is safe for use because it is only referenced to in the
-     * {@link #cancel()} slow path, where we will busy-wait for it. The
-     * fast path gets the benefit of a store-store barrier instead of the
-     * usual store-load barrier.
-     */
-    private volatile Future<?> commitFuture;
-
-    protected AbstractDOMBrokerWriteTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
-            final AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories);
-        this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        checkRunning(commitImpl);
-        checkInstanceIdentifierReferencesData(path,data);
-        getSubtransaction(store).write(path, data);
-    }
-
-    private static void checkInstanceIdentifierReferencesData(final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        Preconditions.checkArgument(data != null, "Attempted to store null data at %s", path);
-        final PathArgument lastArg = path.getLastPathArgument();
-        Preconditions.checkArgument(
-                lastArg == data.getIdentifier() || lastArg != null && lastArg.equals(data.getIdentifier()),
-                "Instance identifier references %s but data identifier is %s", lastArg, data);
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        checkRunning(commitImpl);
-        getSubtransaction(store).delete(path);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        checkRunning(commitImpl);
-        checkInstanceIdentifierReferencesData(path, data);
-        getSubtransaction(store).merge(path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        if (impl != null) {
-            LOG.trace("Transaction {} cancelled before submit", getIdentifier());
-            FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
-            closeSubtransactions();
-            return true;
-        }
-
-        // The transaction is in process of being submitted or cancelled. Busy-wait
-        // for the corresponding future.
-        Future<?> future;
-        do {
-            future = commitFuture;
-        }
-        while (future == null);
-
-        return future.cancel(false);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public FluentFuture<? extends CommitInfo> commit() {
-        final AbstractDOMTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        checkRunning(impl);
-
-        final Collection<T> txns = getSubtransactions();
-        final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
-
-        FluentFuture<? extends CommitInfo> ret;
-        try {
-            for (final T txn : txns) {
-                cohorts.add(txn.ready());
-            }
-
-            ret = impl.commit(this, cohorts);
-        } catch (RuntimeException e) {
-            ret = FluentFuture.from(Futures.immediateFailedFuture(
-                    TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e)));
-        }
-        FUTURE_UPDATER.lazySet(this, ret);
-        return ret;
-    }
-
-    private void checkRunning(final AbstractDOMTransactionFactory<?> impl) {
-        Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
-    }
-
-    @Override
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return super.addToStringAttributes(toStringHelper).add("running", commitImpl == null);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMTransactionFactory.java
deleted file mode 100644 (file)
index 91ca744..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-
-public abstract class AbstractDOMTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
-    @SuppressWarnings("rawtypes")
-    private static final AtomicIntegerFieldUpdater<AbstractDOMTransactionFactory> UPDATER =
-            AtomicIntegerFieldUpdater.newUpdater(AbstractDOMTransactionFactory.class, "closed");
-    private final Map<LogicalDatastoreType, T> storeTxFactories;
-    private volatile int closed = 0;
-
-    protected AbstractDOMTransactionFactory(final Map<LogicalDatastoreType, T> txFactories) {
-        this.storeTxFactories = new EnumMap<>(txFactories);
-    }
-
-    /**
-     * Implementations must return unique identifier for each and every call of
-     * this method.
-     *
-     * @return new Unique transaction identifier.
-     */
-    protected abstract Object newTransactionIdentifier();
-
-    /**
-     * Submits a transaction asynchronously for commit.
-     *
-     * @param transaction the transaction to submit
-     * @param cohorts the associated cohorts
-     * @return a resulting Future
-     */
-    protected abstract FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction transaction,
-            Collection<DOMStoreThreePhaseCommitCohort> cohorts);
-
-    /**
-     * Creates a new read-only transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeReadTransaction newReadOnlyTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerReadOnlyTransaction(newTransactionIdentifier(), storeTxFactories);
-    }
-
-
-    /**
-     * Creates a new write-only transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerWriteOnlyTransaction(newTransactionIdentifier(), storeTxFactories, this);
-    }
-
-
-    /**
-     * Creates a new read-write transaction.
-     *
-     * @return the transaction instance
-     */
-    public final DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
-        checkNotClosed();
-
-        return new DOMBrokerReadWriteTransaction(newTransactionIdentifier(), storeTxFactories, this);
-    }
-
-    /**
-     * Convenience accessor of backing factories intended to be used only by
-     * finalization of this class.
-     *
-     * <b>Note:</b>
-     * Finalization of this class may want to access other functionality of
-     * supplied Transaction factories.
-     *
-     * @return Map of backing transaction factories.
-     */
-    public final Map<LogicalDatastoreType, T> getTxFactories() {
-        return storeTxFactories;
-    }
-
-    /**
-     * Checks if instance is not closed.
-     *
-     * @throws IllegalStateException If instance of this class was closed.
-     *
-     */
-    protected final void checkNotClosed() {
-        Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
-    }
-
-    @Override
-    public void close() {
-        final boolean success = UPDATER.compareAndSet(this, 0, 1);
-        Preconditions.checkState(success, "Transaction factory was already closed");
-    }
-}
index ea6948f85984fae1224ebd3dd2c8e7a5925222ad..9ee43f20bb585031911ca0209bebbe083f2fce5a 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
 
@@ -22,7 +23,7 @@ abstract class AbstractShardedTransaction implements DOMStoreTransaction {
     private final ClientTransaction tx;
 
     AbstractShardedTransaction(final ClientTransaction tx) {
-        this.tx = Preconditions.checkNotNull(tx);
+        this.tx = requireNonNull(tx);
     }
 
     @Override
index de4ae324892f9ec7d75ea27c2aba59e8d8dbe749..274c07e99bdbd34ca03065c4d046e5fae0d32296 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.databroker;
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -31,14 +31,14 @@ final class ClientBackedReadTransaction extends ClientBackedTransaction<ClientSn
     @SuppressWarnings("unused")
     private volatile ClientBackedTransactionChain parent;
 
-    ClientBackedReadTransaction(final ClientSnapshot delegate, @Nullable final ClientBackedTransactionChain parent,
-        @Nullable final Throwable allocationContext) {
+    ClientBackedReadTransaction(final ClientSnapshot delegate, final @Nullable ClientBackedTransactionChain parent,
+            final @Nullable Throwable allocationContext) {
         super(delegate, allocationContext);
         this.parent = parent;
     }
 
     @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+    public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
         return delegate().read(path);
     }
 
index cfc251d6c89e3b303171670d8a282f838e1cb6d0..861dcd3a7daa781f7dc673d0368e8edab4165c24 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.cluster.databroker;
 
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -23,12 +23,12 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 final class ClientBackedReadWriteTransaction extends ClientBackedWriteTransaction
         implements DOMStoreReadWriteTransaction {
 
-    ClientBackedReadWriteTransaction(final ClientTransaction delegate, @Nullable final Throwable allocationContext) {
+    ClientBackedReadWriteTransaction(final ClientTransaction delegate, final @Nullable Throwable allocationContext) {
         super(delegate, allocationContext);
     }
 
     @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+    public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
         return delegate().read(path);
     }
 
index 4d979cdf96774915289026f2c14ce391eaee5249..a0f369638d50e17aaa7882fec9137bab9c48cae9 100644 (file)
@@ -7,17 +7,15 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import com.google.common.base.Preconditions;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import static java.util.Objects.requireNonNull;
+
+import java.lang.ref.Cleaner;
+import java.lang.ref.Cleaner.Cleanable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.databroker.actors.dds.AbstractClientHandle;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
+import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,51 +28,43 @@ import org.slf4j.LoggerFactory;
  */
 abstract class ClientBackedTransaction<T extends AbstractClientHandle<?>> extends
         AbstractDOMStoreTransaction<TransactionIdentifier> {
-    private static final class Finalizer extends FinalizablePhantomReference<ClientBackedTransaction<?>> {
-        private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue();
-        private static final Set<Finalizer> FINALIZERS = ConcurrentHashMap.newKeySet();
-        private static final Logger LOG = LoggerFactory.getLogger(Finalizer.class);
-
+    private static final class Cleanup implements Runnable {
         private final AbstractClientHandle<?> transaction;
         private final Throwable allocationContext;
 
-        private Finalizer(final ClientBackedTransaction<?> referent, final AbstractClientHandle<?> transaction,
-                final Throwable allocationContext) {
-            super(referent, QUEUE);
-            this.transaction = Preconditions.checkNotNull(transaction);
+        Cleanup(final AbstractClientHandle<?> transaction, final Throwable allocationContext) {
+            this.transaction = transaction;
             this.allocationContext = allocationContext;
         }
 
-        @Nonnull
-        static <T extends AbstractClientHandle<?>> T recordTransaction(
-                @Nonnull final ClientBackedTransaction<T> referent, @Nonnull final T transaction,
-                @Nullable final Throwable allocationContext) {
-            FINALIZERS.add(new Finalizer(referent, transaction, allocationContext));
-            return transaction;
-        }
-
         @Override
-        public void finalizeReferent() {
-            FINALIZERS.remove(this);
+        public void run() {
             if (transaction.abort()) {
                 LOG.info("Aborted orphan transaction {}", transaction, allocationContext);
             }
         }
     }
 
+    private static final Logger LOG = LoggerFactory.getLogger(ClientBackedTransaction.class);
+    private static final Cleaner CLEANER = Cleaner.create();
+
     private final T delegate;
+    private final Cleanable cleanable;
 
     ClientBackedTransaction(final T delegate, final Throwable allocationContext) {
         super(delegate.getIdentifier());
-        this.delegate = Finalizer.recordTransaction(this, delegate, allocationContext);
-    }
-
-    final T delegate() {
-        return delegate;
+        this.delegate = requireNonNull(delegate);
+        this.cleanable = CLEANER.register(this, new Cleanup(delegate, allocationContext));
     }
 
     @Override
     public void close() {
         delegate.abort();
+        // Run cleaning immediate so the references is not stuck in cleaner queue
+        cleanable.clean();
+    }
+
+    final T delegate() {
+        return delegate;
     }
 }
index 6ae7e51c54baea21c7a4315b6c90d1289f957902..3893dbd21f89f2363cfe3f424e150682728669be 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.WeakHashMap;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.databroker.actors.dds.AbstractClientHandle;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
@@ -40,7 +41,7 @@ final class ClientBackedTransactionChain implements DOMStoreTransactionChain {
     private final boolean debugAllocation;
 
     ClientBackedTransactionChain(final ClientLocalHistory history, final boolean debugAllocation) {
-        this.history = Preconditions.checkNotNull(history);
+        this.history = requireNonNull(history);
         this.debugAllocation = debugAllocation;
     }
 
index 737e70d6f02194ab2decc3833a97ad00b9c22bf3..2b5cf89ff9cc42c3103d9f97fb46b66ed1436bf1 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
@@ -21,17 +21,17 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  */
 class ClientBackedWriteTransaction extends ClientBackedTransaction<ClientTransaction>
         implements DOMStoreWriteTransaction {
-    ClientBackedWriteTransaction(final ClientTransaction delegate, @Nullable final Throwable allocationContext) {
+    ClientBackedWriteTransaction(final ClientTransaction delegate, final @Nullable Throwable allocationContext) {
         super(delegate, allocationContext);
     }
 
     @Override
-    public final void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    public final void write(final YangInstanceIdentifier path, final NormalizedNode data) {
         delegate().write(path, data);
     }
 
     @Override
-    public final void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    public final void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
         delegate().merge(path, data);
     }
 
@@ -42,6 +42,6 @@ class ClientBackedWriteTransaction extends ClientBackedTransaction<ClientTransac
 
     @Override
     public final DOMStoreThreePhaseCommitCohort ready() {
-        return new DOMStoreThreePhaseCommitCohortAdaptor(delegate().ready());
+        return delegate().ready();
     }
 }
@@ -5,9 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
+package org.opendaylight.controller.cluster.databroker;
 
 import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.CommitStatsMXBean;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.yangtools.util.DurationStatisticsTracker;
 
@@ -16,8 +17,7 @@ import org.opendaylight.yangtools.util.DurationStatisticsTracker;
  *
  * @author Thomas Pantelis
  */
-public class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStatsMXBean {
-
+final class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStatsMXBean {
     private final DurationStatisticsTracker commitStatsTracker;
 
     /**
@@ -26,8 +26,8 @@ public class CommitStatsMXBeanImpl extends AbstractMXBean implements CommitStats
      * @param commitStatsTracker the DurationStatsTracker used to obtain the stats.
      * @param mbeantype mBeanType Used as the <code>type</code> property in the bean's ObjectName.
      */
-    public CommitStatsMXBeanImpl(@NonNull DurationStatisticsTracker commitStatsTracker,
-            @NonNull String mbeantype) {
+    CommitStatsMXBeanImpl(final @NonNull DurationStatisticsTracker commitStatsTracker,
+            final @NonNull String mbeantype) {
         super("CommitStats", mbeantype, null);
         this.commitStatsTracker = commitStatsTracker;
     }
index 1199fbf7bca4a4c5f60485690b4e82eb37ca062f..d6da8487e0f4af80278028d48bfb660f08d6cf0c 100644 (file)
@@ -7,22 +7,18 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
-import static org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER;
+import static org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER;
 
 import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.AbstractFuture;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Executor;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
@@ -31,11 +27,18 @@ import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.AbstractDOMDataBroker;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,7 +50,8 @@ import org.slf4j.LoggerFactory;
  * @author Thomas Pantelis
  */
 @Beta
-public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
+@Component(service = DOMDataBroker.class, property = "type=default")
+public class ConcurrentDOMDataBroker extends AbstractDOMDataBroker {
     private static final Logger LOG = LoggerFactory.getLogger(ConcurrentDOMDataBroker.class);
     private static final String CAN_COMMIT = "CAN_COMMIT";
     private static final String PRE_COMMIT = "PRE_COMMIT";
@@ -68,138 +72,97 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
     public ConcurrentDOMDataBroker(final Map<LogicalDatastoreType, DOMStore> datastores,
             final Executor listenableFutureExecutor, final DurationStatisticsTracker commitStatsTracker) {
         super(datastores);
-        this.clientFutureCallbackExecutor = Preconditions.checkNotNull(listenableFutureExecutor);
-        this.commitStatsTracker = Preconditions.checkNotNull(commitStatsTracker);
+        clientFutureCallbackExecutor = requireNonNull(listenableFutureExecutor);
+        this.commitStatsTracker = requireNonNull(commitStatsTracker);
     }
 
-    public DurationStatisticsTracker getCommitStatsTracker() {
-        return commitStatsTracker;
+    @Activate
+    public ConcurrentDOMDataBroker(@Reference final DataBrokerCommitExecutor commitExecutor,
+            @Reference(target = "(type=distributed-config)") final DOMStore configDatastore,
+            @Reference(target = "(type=distributed-operational)") final DOMStore operDatastore) {
+        this(Map.of(
+            LogicalDatastoreType.CONFIGURATION, configDatastore, LogicalDatastoreType.OPERATIONAL, operDatastore),
+            commitExecutor.executor(), commitExecutor.commitStatsTracker());
+        LOG.info("DOM Data Broker started");
     }
 
     @Override
-    protected FluentFuture<? extends CommitInfo> commit(
-            final DOMDataTreeWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
-        Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
-        Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
-        LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
-
-        if (cohorts.isEmpty()) {
-            return CommitInfo.emptyFluentFuture();
-        }
+    @Deactivate
+    public void close() {
+        LOG.info("DOM Data Broker stopping");
+        super.close();
+        LOG.info("DOM Data Broker stopped");
+    }
 
-        final AsyncNotifyingSettableFuture clientSubmitFuture =
-                new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
+    @Override
+    protected FluentFuture<? extends CommitInfo> commit(final DOMDataTreeWriteTransaction transaction,
+            final DOMStoreThreePhaseCommitCohort cohort) {
 
-        doCanCommit(clientSubmitFuture, transaction, cohorts);
+        checkArgument(transaction != null, "Transaction must not be null.");
+        checkArgument(cohort != null, "Cohorts must not be null.");
+        LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
 
-        return FluentFuture.from(clientSubmitFuture).transform(ignored -> CommitInfo.empty(),
-                MoreExecutors.directExecutor());
+        final var clientSubmitFuture = new AsyncNotifyingSettableFuture(clientFutureCallbackExecutor);
+        doCanCommit(clientSubmitFuture, transaction, cohort);
+        return FluentFuture.from(clientSubmitFuture);
     }
 
     private void doCanCommit(final AsyncNotifyingSettableFuture clientSubmitFuture,
             final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
+            final DOMStoreThreePhaseCommitCohort cohort) {
         final long startTime = System.nanoTime();
 
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Boolean> futureCallback = new FutureCallback<Boolean>() {
+        Futures.addCallback(cohort.canCommit(), new FutureCallback<>() {
             @Override
             public void onSuccess(final Boolean result) {
                 if (result == null || !result) {
-                    handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER,
-                            new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
-                } else if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we can move on to the preCommit phase
-                    doPreCommit(startTime, clientSubmitFuture, transaction, cohorts);
+                    onFailure(new TransactionCommitFailedException("Can Commit failed, no detailed cause available."));
                 } else {
-                    Futures.addCallback(cohortIterator.next().canCommit(), this, MoreExecutors.directExecutor());
+                    doPreCommit(startTime, clientSubmitFuture, transaction, cohort);
                 }
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                handleException(clientSubmitFuture, transaction, cohorts, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
+                handleException(clientSubmitFuture, transaction, cohort, CAN_COMMIT, CAN_COMMIT_ERROR_MAPPER, failure);
             }
-        };
-
-        ListenableFuture<Boolean> canCommitFuture = cohortIterator.next().canCommit();
-        Futures.addCallback(canCommitFuture, futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
     private void doPreCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+        Futures.addCallback(cohort.preCommit(), new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void notUsed) {
-                if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we can move on to the commit phase
-                    doCommit(startTime, clientSubmitFuture, transaction, cohorts);
-                } else {
-                    ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
-                    Futures.addCallback(preCommitFuture, this, MoreExecutors.directExecutor());
-                }
+            public void onSuccess(final Empty result) {
+                doCommit(startTime, clientSubmitFuture, transaction, cohort);
             }
 
             @Override
             public void onFailure(final Throwable failure) {
-                handleException(clientSubmitFuture, transaction, cohorts, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
+                handleException(clientSubmitFuture, transaction, cohort, PRE_COMMIT, PRE_COMMIT_MAPPER, failure);
             }
-        };
-
-        ListenableFuture<Void> preCommitFuture = cohortIterator.next().preCommit();
-        Futures.addCallback(preCommitFuture, futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
     private void doCommit(final long startTime, final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-
-        final Iterator<DOMStoreThreePhaseCommitCohort> cohortIterator = cohorts.iterator();
-
-        // Not using Futures.allAsList here to avoid its internal overhead.
-        FutureCallback<Void> futureCallback = new FutureCallback<Void>() {
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort) {
+        Futures.addCallback(cohort.commit(), new FutureCallback<CommitInfo>() {
             @Override
-            public void onSuccess(final Void notUsed) {
-                if (!cohortIterator.hasNext()) {
-                    // All cohorts completed successfully - we're done.
-                    commitStatsTracker.addDuration(System.nanoTime() - startTime);
-
-                    clientSubmitFuture.set();
-                } else {
-                    ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
-                    Futures.addCallback(commitFuture, this, MoreExecutors.directExecutor());
-                }
+            public void onSuccess(final CommitInfo result) {
+                commitStatsTracker.addDuration(System.nanoTime() - startTime);
+                clientSubmitFuture.set();
             }
 
             @Override
             public void onFailure(final Throwable throwable) {
-                handleException(clientSubmitFuture, transaction, cohorts, COMMIT, COMMIT_ERROR_MAPPER, throwable);
+                handleException(clientSubmitFuture, transaction, cohort, COMMIT, COMMIT_ERROR_MAPPER, throwable);
             }
-        };
-
-        ListenableFuture<Void> commitFuture = cohortIterator.next().commit();
-        Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor());
+        }, MoreExecutors.directExecutor());
     }
 
-    @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE",
-            justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
-                + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
-                + "TransactionCommitFailedException and thus should be deemed as confirmed.")
     private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
-            final DOMDataTreeWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
-            final String phase, final TransactionCommitFailedExceptionMapper exMapper,
-            final Throwable throwable) {
-
+            final DOMDataTreeWriteTransaction transaction, final DOMStoreThreePhaseCommitCohort cohort,
+            final String phase, final TransactionCommitFailedExceptionMapper exMapper, final Throwable throwable) {
         if (clientSubmitFuture.isDone()) {
             // We must have had failures from multiple cohorts.
             return;
@@ -208,29 +171,21 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
         // Use debug instead of warn level here because this exception gets propagate back to the caller via the Future
         LOG.debug("Tx: {} Error during phase {}, starting Abort", transaction.getIdentifier(), phase, throwable);
 
-        // Transaction failed - tell all cohorts to abort.
-        @SuppressWarnings("unchecked")
-        ListenableFuture<Void>[] canCommitFutures = new ListenableFuture[cohorts.size()];
-        int index = 0;
-        for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            canCommitFutures[index++] = cohort.abort();
-        }
-
         // Propagate the original exception
         final Exception e;
         if (throwable instanceof NoShardLeaderException || throwable instanceof ShardLeaderNotRespondingException) {
             e = new DataStoreUnavailableException(throwable.getMessage(), throwable);
-        } else if (throwable instanceof Exception) {
-            e = (Exception)throwable;
+        } else if (throwable instanceof Exception ex) {
+            e = ex;
         } else {
             e = new RuntimeException("Unexpected error occurred", throwable);
         }
         clientSubmitFuture.setException(exMapper.apply(e));
 
-        ListenableFuture<List<Void>> combinedFuture = Futures.allAsList(canCommitFutures);
-        Futures.addCallback(combinedFuture, new FutureCallback<List<Void>>() {
+        // abort
+        Futures.addCallback(cohort.abort(), new FutureCallback<Empty>() {
             @Override
-            public void onSuccess(final List<Void> notUsed) {
+            public void onSuccess(final Empty result) {
                 // Propagate the original exception to the client.
                 LOG.debug("Tx: {} aborted successfully", transaction.getIdentifier());
             }
@@ -252,8 +207,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
      * FIXME: This class should probably be moved to yangtools common utils for re-usability and
      * unified with AsyncNotifyingListenableFutureTask.
      */
-    private static class AsyncNotifyingSettableFuture extends AbstractFuture<Void> {
-
+    private static class AsyncNotifyingSettableFuture extends AbstractFuture<CommitInfo> {
         /**
          * ThreadLocal used to detect if the task completion thread is running the future listener Runnables.
          */
@@ -262,7 +216,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
         private final Executor listenerExecutor;
 
         AsyncNotifyingSettableFuture(final Executor listenerExecutor) {
-            this.listenerExecutor = Preconditions.checkNotNull(listenerExecutor);
+            this.listenerExecutor = requireNonNull(listenerExecutor);
         }
 
         @Override
@@ -282,7 +236,7 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
         boolean set() {
             ON_TASK_COMPLETION_THREAD_TL.set(Boolean.TRUE);
             try {
-                return super.set(null);
+                return super.set(CommitInfo.empty());
             } finally {
                 ON_TASK_COMPLETION_THREAD_TL.set(null);
             }
@@ -303,8 +257,8 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker {
             private final Executor executor;
 
             DelegatingRunnable(final Runnable delegate, final Executor executor) {
-                this.delegate = Preconditions.checkNotNull(delegate);
-                this.executor = Preconditions.checkNotNull(executor);
+                this.delegate = requireNonNull(delegate);
+                this.executor = requireNonNull(executor);
             }
 
             @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadOnlyTransaction.java
deleted file mode 100644 (file)
index afce5c0..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadOnlyTransaction
-        extends AbstractDOMBrokerTransaction<DOMStoreReadTransaction> implements DOMDataTreeReadTransaction {
-
-    /**
-     * Creates new composite Transactions.
-     *
-     * @param identifier Identifier of transaction.
-     */
-    protected DOMBrokerReadOnlyTransaction(Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories) {
-        super(identifier, storeTxFactories);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-
-    @Override
-    public void close() {
-        closeSubtransactions();
-    }
-
-    @Override
-    protected DOMStoreReadTransaction createTransaction(LogicalDatastoreType key) {
-        return getTxFactory(key).newReadOnlyTransaction();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerReadWriteTransaction.java
deleted file mode 100644 (file)
index b160d5b..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMBrokerReadWriteTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreReadWriteTransaction>
-        implements DOMDataTreeReadWriteTransaction {
-
-    /**
-     * Constructs an instance.
-     *
-     * @param identifier identifier of transaction.
-     * @param storeTxFactories the backing transaction store factories
-     */
-    protected DOMBrokerReadWriteTransaction(Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory>  storeTxFactories,
-            final AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories, commitImpl);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode<?,?>>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-
-    @Override
-    protected DOMStoreReadWriteTransaction createTransaction(LogicalDatastoreType key) {
-        return getTxFactory(key).newReadWriteTransaction();
-    }
-
-    @Override
-    public void close() {
-        cancel();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerTransactionChain.java
deleted file mode 100644 (file)
index 1f1ea7f..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class DOMBrokerTransactionChain extends AbstractDOMTransactionFactory<DOMStoreTransactionChain>
-        implements DOMTransactionChain {
-    private enum State {
-        RUNNING,
-        CLOSING,
-        CLOSED,
-        FAILED,
-    }
-
-    private static final AtomicIntegerFieldUpdater<DOMBrokerTransactionChain> COUNTER_UPDATER =
-            AtomicIntegerFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, "counter");
-    private static final AtomicReferenceFieldUpdater<DOMBrokerTransactionChain, State> STATE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(DOMBrokerTransactionChain.class, State.class, "state");
-    private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerTransactionChain.class);
-    private final AtomicLong txNum = new AtomicLong();
-    private final AbstractDOMBroker broker;
-    private final DOMTransactionChainListener listener;
-    private final long chainId;
-
-    private volatile State state = State.RUNNING;
-    private volatile int counter = 0;
-
-    /**
-     * Constructs an instance.
-     *
-     * @param chainId
-     *            ID of transaction chain
-     * @param chains
-     *            Backing {@link DOMStoreTransactionChain}s.
-     * @param listener
-     *            Listener, which listens on transaction chain events.
-     * @throws NullPointerException
-     *             If any of arguments is null.
-     */
-    DOMBrokerTransactionChain(final long chainId, final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
-            final AbstractDOMBroker broker, final DOMTransactionChainListener listener) {
-        super(chains);
-        this.chainId = chainId;
-        this.broker = Preconditions.checkNotNull(broker);
-        this.listener = Preconditions.checkNotNull(listener);
-    }
-
-    private void checkNotFailed() {
-        Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit(
-            final DOMDataTreeWriteTransaction transaction, final Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-        checkNotFailed();
-        checkNotClosed();
-
-        final FluentFuture<? extends CommitInfo> ret = broker.commit(transaction, cohorts);
-
-        COUNTER_UPDATER.incrementAndGet(this);
-        ret.addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                transactionCompleted();
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                transactionFailed(transaction, failure);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-
-    @Override
-    public void close() {
-        final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
-        if (!success) {
-            LOG.debug("Chain {} is no longer running", this);
-            return;
-        }
-
-        super.close();
-        for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
-            subChain.close();
-        }
-
-        if (counter == 0) {
-            finishClose();
-        }
-    }
-
-    private void finishClose() {
-        state = State.CLOSED;
-        listener.onTransactionChainSuccessful(this);
-    }
-
-    private void transactionCompleted() {
-        if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
-            finishClose();
-        }
-    }
-
-    private void transactionFailed(final DOMDataTreeWriteTransaction tx, final Throwable cause) {
-        state = State.FAILED;
-        LOG.debug("Transaction chain {} failed.", this, cause);
-        listener.onTransactionChainFailed(this, tx, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMBrokerWriteOnlyTransaction.java
deleted file mode 100644 (file)
index 2f0915d..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Huawei Technologies Co. Ltd. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.databroker;
-
-import java.util.Map;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionFactory;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-public class DOMBrokerWriteOnlyTransaction extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
-    /**
-     * Constructs an instance.
-     *
-     * @param identifier identifier of transaction.
-     * @param storeTxFactories the backing transaction store factories
-     */
-    public DOMBrokerWriteOnlyTransaction(Object identifier,
-            Map<LogicalDatastoreType, ? extends DOMStoreTransactionFactory> storeTxFactories,
-            AbstractDOMTransactionFactory<?> commitImpl) {
-        super(identifier, storeTxFactories, commitImpl);
-    }
-
-    @Override
-    protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) {
-        return getTxFactory(key).newWriteOnlyTransaction();
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMStoreThreePhaseCommitCohortAdaptor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DOMStoreThreePhaseCommitCohortAdaptor.java
deleted file mode 100644 (file)
index 5111a32..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-
-/**
- * Utility class from bridging {@link DOMStoreThreePhaseCommitCohort} and
- * {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}.
- *
- * @author Robert Varga
- */
-final class DOMStoreThreePhaseCommitCohortAdaptor extends ForwardingObject implements DOMStoreThreePhaseCommitCohort {
-    private final org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort delegate;
-
-    DOMStoreThreePhaseCommitCohortAdaptor(
-        final org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort delegate) {
-        this.delegate = Preconditions.checkNotNull(delegate);
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return delegate.canCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegate.preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate.abort();
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return delegate.commit();
-    }
-
-    @Override
-    protected Object delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/DataBrokerCommitExecutor.java
new file mode 100644 (file)
index 0000000..bdea393
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2024 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker;
+
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
+import org.opendaylight.yangtools.util.DurationStatisticsTracker;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(
+    service = DataBrokerCommitExecutor.class,
+    configurationPid = "org.opendaylight.controller.cluster.datastore.broker")
+@Designate(ocd = DataBrokerCommitExecutor.Config.class)
+public final class DataBrokerCommitExecutor {
+    @ObjectClassDefinition
+    public @interface Config {
+        @AttributeDefinition(name = "max-data-broker-future-callback-queue-size")
+        int callbackQueueSize() default 1000;
+        @AttributeDefinition(name = "max-data-broker-future-callback-pool-size")
+        int callbackPoolSize() default 20;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(DataBrokerCommitExecutor.class);
+
+    private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
+    private final ThreadExecutorStatsMXBeanImpl threadStats;
+    private final CommitStatsMXBeanImpl commitStats;
+    private final ExecutorService executorService;
+
+    @Activate
+    public DataBrokerCommitExecutor(final Config config) {
+        executorService = SpecialExecutors.newBlockingBoundedCachedThreadPool(config.callbackPoolSize(),
+            config.callbackQueueSize(), "CommitFutures", ConcurrentDOMDataBroker.class);
+        threadStats = ThreadExecutorStatsMXBeanImpl.create(executorService, "CommitFutureExecutorStats",
+            "DOMDataBroker");
+        commitStats = new CommitStatsMXBeanImpl(commitStatsTracker, "DOMDataBroker");
+        commitStats.register();
+        LOG.info("DOM Data Broker commit exector started");
+    }
+
+    @Deactivate
+    void deactivate() {
+        LOG.info("DOM Data Broker commit exector stopping");
+        commitStats.unregister();
+        threadStats.unregister();
+        executorService.shutdown();
+        try {
+            executorService.awaitTermination(1, TimeUnit.MINUTES);
+        } catch (InterruptedException e) {
+            LOG.warn("Future executor failed to finish in time, giving up", e);
+        }
+        LOG.info("DOM Data Broker commit exector stopped");
+    }
+
+    Executor executor() {
+        return executorService;
+    }
+
+    DurationStatisticsTracker commitStatsTracker() {
+        return commitStatsTracker;
+    }
+}
index 4b1d613636c89f5c125e66f5cf9e21b7c2d5b1f8..d10627dcf93e065df03c5f5130da50303d9ecc79 100644 (file)
@@ -7,15 +7,17 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import java.util.Collection;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import java.util.function.Function;
-import javax.annotation.Nullable;
+import java.util.stream.Stream;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -42,18 +44,19 @@ public abstract class AbstractClientHandle<T extends AbstractProxyTransaction> e
     private static final AtomicReferenceFieldUpdater<AbstractClientHandle, State> STATE_UPDATER =
             AtomicReferenceFieldUpdater.newUpdater(AbstractClientHandle.class, State.class, "state");
 
-    private final TransactionIdentifier transactionId;
-    private final AbstractClientHistory parent;
+    private final @NonNull TransactionIdentifier transactionId;
+    private final @NonNull AbstractClientHistory parent;
 
     private volatile State<T> state = new State<>();
 
     // Hidden to prevent outside instantiation
     AbstractClientHandle(final AbstractClientHistory parent, final TransactionIdentifier transactionId) {
-        this.transactionId = Preconditions.checkNotNull(transactionId);
-        this.parent = Preconditions.checkNotNull(parent);
+        this.transactionId = requireNonNull(transactionId);
+        this.parent = requireNonNull(parent);
     }
 
     @Override
+    // Non-final for mocking
     public TransactionIdentifier getIdentifier() {
         return transactionId;
     }
@@ -63,6 +66,7 @@ public abstract class AbstractClientHandle<T extends AbstractProxyTransaction> e
      *
      * @return True if this transaction became closed during this call
      */
+    // Non-final for mocking
     public boolean abort() {
         if (commonAbort()) {
             parent.onTransactionAbort(this);
@@ -73,12 +77,13 @@ public abstract class AbstractClientHandle<T extends AbstractProxyTransaction> e
     }
 
     private boolean commonAbort() {
-        final Collection<T> toClose = ensureClosed();
+        final Map<Long, T> toClose = ensureClosed();
         if (toClose == null) {
             return false;
         }
 
-        toClose.forEach(AbstractProxyTransaction::abort);
+        toClose.values().forEach(AbstractProxyTransaction::abort);
+        parent.onTransactionShardsBound(transactionId, toClose.keySet());
         return true;
     }
 
@@ -92,29 +97,38 @@ public abstract class AbstractClientHandle<T extends AbstractProxyTransaction> e
      * Make sure this snapshot is closed. If it became closed as the effect of this call, return a collection of
      * {@link AbstractProxyTransaction} handles which need to be closed, too.
      *
-     * @return null if this snapshot has already been closed, otherwise a collection of proxies, which need to be
+     * @return null if this snapshot has already been closed, otherwise a State with of proxies, which need to be
      *         closed, too.
      */
-    @Nullable final Collection<T> ensureClosed() {
-        @SuppressWarnings("unchecked")
-        final State<T> local = STATE_UPDATER.getAndSet(this, null);
-        return local == null ? null : local.values();
+    final @Nullable Map<Long, T> ensureClosed() {
+        // volatile read and a conditional CAS. This ends up being better in the typical case when we are invoked more
+        // than once (see ClientBackedTransaction) than performing a STATE_UPDATER.getAndSet().
+        final State<T> local = state;
+        return local != null && STATE_UPDATER.compareAndSet(this, local, null) ? local : null;
+    }
+
+    final T ensureProxy(final YangInstanceIdentifier path) {
+        return ensureProxy(getState(), parent.resolveShardForPath(path));
     }
 
-    final T ensureProxy(final YangInstanceIdentifier path, final Function<Long, T> createProxy) {
-        final Map<Long, T> local = getState();
-        final Long shard = parent.resolveShardForPath(path);
+    private T ensureProxy(final State<T> localState, final Long shard) {
+        return localState.computeIfAbsent(shard, this::createProxy);
+    }
 
-        return local.computeIfAbsent(shard, createProxy);
+    final Stream<T> ensureAllProxies() {
+        final var local = getState();
+        return parent.resolveAllShards().map(shard -> ensureProxy(local, shard));
     }
 
     final AbstractClientHistory parent() {
         return parent;
     }
 
+    abstract @NonNull T createProxy(@NonNull Long shard);
+
     private State<T> getState() {
         final State<T> local = state;
-        Preconditions.checkState(local != null, "Transaction %s is closed", transactionId);
+        checkState(local != null, "Transaction %s is closed", transactionId);
         return local;
     }
 
index 542cc2dbafb48d9e8db27fe632618af54209644e..796c23614e2fa220660bb09c6e7db0f84162b8df 100644 (file)
@@ -7,16 +7,23 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLongFieldUpdater;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.StampedLock;
-import javax.annotation.concurrent.GuardedBy;
+import java.util.stream.Stream;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
@@ -25,6 +32,7 @@ import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryReq
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
 import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -59,8 +67,8 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
     private final Map<Long, ProxyHistory> histories = new ConcurrentHashMap<>();
     private final StampedLock lock = new StampedLock();
 
-    private final AbstractDataStoreClientBehavior client;
-    private final LocalHistoryIdentifier identifier;
+    private final @NonNull AbstractDataStoreClientBehavior client;
+    private final @NonNull LocalHistoryIdentifier identifier;
 
     // Used via NEXT_TX_UPDATER
     @SuppressWarnings("unused")
@@ -69,9 +77,9 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
     private volatile State state = State.IDLE;
 
     AbstractClientHistory(final AbstractDataStoreClientBehavior client, final LocalHistoryIdentifier identifier) {
-        this.client = Preconditions.checkNotNull(client);
-        this.identifier = Preconditions.checkNotNull(identifier);
-        Preconditions.checkArgument(identifier.getCookie() == 0);
+        this.client = requireNonNull(client);
+        this.identifier = requireNonNull(identifier);
+        checkArgument(identifier.getCookie() == 0);
     }
 
     final State state() {
@@ -80,14 +88,14 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
 
     final void updateState(final State expected, final State next) {
         final boolean success = STATE_UPDATER.compareAndSet(this, expected, next);
-        Preconditions.checkState(success, "Race condition detected, state changed from %s to %s", expected, state);
+        checkState(success, "Race condition detected, state changed from %s to %s", expected, state);
         LOG.debug("Client history {} changed state from {} to {}", this, expected, next);
     }
 
     final synchronized void doClose() {
         final State local = state;
         if (local != State.CLOSED) {
-            Preconditions.checkState(local == State.IDLE, "Local history %s has an open transaction", this);
+            checkState(local == State.IDLE, "Local history %s has an open transaction", this);
             histories.values().forEach(ProxyHistory::close);
             updateState(local, State.CLOSED);
         }
@@ -99,7 +107,7 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
     }
 
     @Override
-    public LocalHistoryIdentifier getIdentifier() {
+    public final LocalHistoryIdentifier getIdentifier() {
         return identifier;
     }
 
@@ -111,6 +119,14 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         return client.resolveShardForPath(path);
     }
 
+    final Stream<Long> resolveAllShards() {
+        return client.resolveAllShards();
+    }
+
+    final ActorUtils actorUtils() {
+        return client.actorUtils();
+    }
+
     @Override
     final void localAbort(final Throwable cause) {
         final State oldState = STATE_UPDATER.getAndSet(this, State.CLOSED);
@@ -130,9 +146,10 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
     /**
      * Create a new history proxy for a given shard.
      *
+     * @param shard Shard cookie
      * @throws InversibleLockException if the shard is being reconnected
      */
-    @GuardedBy("lock")
+    @Holding("lock")
     private ProxyHistory createHistoryProxy(final Long shard) {
         final AbstractClientConnection<ShardBackendInfo> connection = client.getConnection(shard);
         final LocalHistoryIdentifier proxyId = new LocalHistoryIdentifier(identifier.getClientId(),
@@ -156,7 +173,7 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         LOG.debug("Create history response {}", response);
     }
 
-    private ProxyHistory ensureHistoryProxy(final TransactionIdentifier transactionId, final Long shard) {
+    private @NonNull ProxyHistory ensureHistoryProxy(final TransactionIdentifier transactionId, final Long shard) {
         while (true) {
             try {
                 // Short-lived lock to ensure exclusion of createHistoryProxy and the lookup phase in startReconnect,
@@ -175,11 +192,13 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         }
     }
 
-    final AbstractProxyTransaction createSnapshotProxy(final TransactionIdentifier transactionId, final Long shard) {
+    final @NonNull AbstractProxyTransaction createSnapshotProxy(final TransactionIdentifier transactionId,
+            final Long shard) {
         return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, true);
     }
 
-    final AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId, final Long shard) {
+    final @NonNull AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId,
+            final Long shard) {
         return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, false);
     }
 
@@ -196,7 +215,8 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
      * @throws DOMTransactionChainClosedException if this history is closed
      * @throws IllegalStateException if a previous dependent transaction has not been closed
      */
-    public ClientTransaction createTransaction() {
+    // Non-final for mocking
+    public @NonNull ClientTransaction createTransaction() {
         checkNotClosed();
 
         synchronized (this) {
@@ -213,6 +233,7 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
      * @throws DOMTransactionChainClosedException if this history is closed
      * @throws IllegalStateException if a previous dependent transaction has not been closed
      */
+    // Non-final for mocking
     public ClientSnapshot takeSnapshot() {
         checkNotClosed();
 
@@ -223,16 +244,38 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         }
     }
 
-    @GuardedBy("this")
+    @Holding("this")
     abstract ClientSnapshot doCreateSnapshot();
 
-    @GuardedBy("this")
+    @Holding("this")
     abstract ClientTransaction doCreateTransaction();
 
     /**
-     * Callback invoked from {@link ClientTransaction} when a child transaction readied for submission.
+     * Callback invoked from {@link AbstractClientHandle}'s lifecycle to inform that a particular transaction is
+     * completing with a set of participating shards.
      *
      * @param txId Transaction identifier
+     * @param participatingShards Participating shard cookies
+     */
+    final void onTransactionShardsBound(final TransactionIdentifier txId, final Set<Long> participatingShards) {
+        // Guard against startReconnect() kicking in. It is okay to connect new participants concurrently, as those
+        // will not see the holes caused by this.
+        final long stamp = lock.readLock();
+        try {
+            for (var entry : histories.entrySet()) {
+                if (!participatingShards.contains(entry.getKey())) {
+                    entry.getValue().skipTransaction(txId);
+                }
+            }
+        } finally {
+            lock.unlockRead(stamp);
+        }
+    }
+
+    /**
+     * Callback invoked from {@link ClientTransaction} when a child transaction readied for submission.
+     *
+     * @param tx Client transaction
      * @param cohort Transaction commit cohort
      */
     synchronized AbstractTransactionCommitCohort onTransactionReady(final ClientTransaction tx,
@@ -243,8 +286,7 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
         }
 
         final AbstractTransactionCommitCohort previous = readyTransactions.putIfAbsent(txId, cohort);
-        Preconditions.checkState(previous == null, "Duplicate cohort %s for transaction %s, already have %s",
-                cohort, txId, previous);
+        checkState(previous == null, "Duplicate cohort %s for transaction %s, already have %s", cohort, txId, previous);
 
         LOG.debug("Local history {} readied transaction {}", this, txId);
         return cohort;
@@ -268,13 +310,14 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
      *
      * @param txId transaction identifier
      */
+    // Non-final for mocking
     synchronized void onTransactionComplete(final TransactionIdentifier txId) {
         if (readyTransactions.remove(txId) == null) {
             LOG.warn("Could not find completed transaction {}", txId);
         }
     }
 
-    HistoryReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConn) {
+    final HistoryReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConn) {
         /*
          * This looks ugly and unusual and there is a reason for that, as the locking involved is in multiple places.
          *
@@ -299,7 +342,7 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
             return null;
         }
 
-        final ProxyReconnectCohort proxy = Verify.verifyNotNull(oldProxy.startReconnect(newConn));
+        final ProxyReconnectCohort proxy = verifyNotNull(oldProxy.startReconnect(newConn));
         return new HistoryReconnectCohort() {
             @Override
             ProxyReconnectCohort getProxy() {
@@ -322,5 +365,4 @@ public abstract class AbstractClientHistory extends LocalAbortable implements Id
             }
         };
     }
-
 }
index 12858b9cb9fc1470349de74b925f02cced49f73a..507f065d49cfe370d589326bdb92bbc37a477844 100644 (file)
@@ -12,8 +12,9 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.util.Timeout;
+import com.google.common.base.Throwables;
 import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
 import org.opendaylight.controller.cluster.access.client.ClientActorConfig;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
@@ -48,15 +49,14 @@ public abstract class AbstractDataStoreClientActor extends AbstractClientActor {
     abstract AbstractDataStoreClientBehavior initialBehavior(ClientActorContext context, ActorUtils actorUtils);
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public static DataStoreClient getDistributedDataStoreClient(@Nonnull final ActorRef actor,
+    public static DataStoreClient getDistributedDataStoreClient(final @NonNull ActorRef actor,
             final long timeout, final TimeUnit unit) {
         try {
             return (DataStoreClient) Await.result(ExplicitAsk.ask(actor, GET_CLIENT_FACTORY,
                 Timeout.apply(timeout, unit)), Duration.Inf());
-        } catch (RuntimeException e) {
-            throw e;
         } catch (Exception e) {
-            throw new RuntimeException(e);
+            Throwables.throwIfUnchecked(e);
+            throw new IllegalStateException(e);
         }
     }
 }
index 4f91cb27fae151a26ba5c49dbd8ba12498dc8238..82567c40d930c2300407eb710cbe1d40b89fa022 100644 (file)
@@ -17,13 +17,14 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.StampedLock;
-import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorBehavior;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
 import org.opendaylight.controller.cluster.access.client.ReconnectForwarder;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -71,7 +72,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     private volatile Throwable aborted;
 
     AbstractDataStoreClientBehavior(final ClientActorContext context,
-            final BackendInfoResolver<ShardBackendInfo> resolver) {
+            final AbstractShardBackendResolver resolver) {
         super(context, resolver);
         singleHistory = new SingleClientHistory(this, new LocalHistoryIdentifier(getIdentifier(), 0));
     }
@@ -194,7 +195,7 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
         try {
             if (aborted != null) {
                 Throwables.throwIfUnchecked(aborted);
-                throw new RuntimeException(aborted);
+                throw new IllegalStateException(aborted);
             }
 
             final ClientLocalHistory history = new ClientLocalHistory(this, historyId);
@@ -224,4 +225,10 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior<Shard
     }
 
     abstract Long resolveShardForPath(YangInstanceIdentifier path);
+
+    abstract Stream<Long> resolveAllShards();
+
+    final ActorUtils actorUtils() {
+        return ((AbstractShardBackendResolver) resolver()).actorUtils();
+    }
 }
index 546738fb670ec9592f7086ac4ea84c3b8f8310c1..14ad54699161a60719ea846d353c46e73fb4adea 100644 (file)
@@ -7,10 +7,13 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.collect.Iterables;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -19,14 +22,14 @@ import java.util.ArrayDeque;
 import java.util.Deque;
 import java.util.Iterator;
 import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.function.Consumer;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.client.ConnectionEntry;
 import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ClosedTransactionException;
@@ -46,6 +49,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
@@ -61,15 +65,13 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class interacts with the queueing mechanism in ClientActorBehavior, hence once we arrive at a decision
  * to use either a local or remote implementation, we are stuck with it. We can re-evaluate on the next transaction.
- *
- * @author Robert Varga
  */
-abstract class AbstractProxyTransaction implements Identifiable<TransactionIdentifier> {
+abstract sealed class AbstractProxyTransaction implements Identifiable<TransactionIdentifier>
+        permits LocalProxyTransaction, RemoteProxyTransaction {
     /**
      * Marker object used instead of read-type of requests, which are satisfied only once. This has a lower footprint
-     * and allows compressing multiple requests into a single entry.
+     * and allows compressing multiple requests into a single entry. This class is not thread-safe.
      */
-    @NotThreadSafe
     private static final class IncrementSequence {
         private final long sequence;
         private long delta = 0;
@@ -99,7 +101,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         private final String string;
 
         State(final String string) {
-            this.string = Preconditions.checkNotNull(string);
+            this.string = requireNonNull(string);
         }
 
         @Override
@@ -137,7 +139,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
                 latch.await();
             } catch (InterruptedException e) {
                 LOG.warn("Interrupted while waiting for latch of {}", successor);
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
             return successor;
         }
@@ -147,26 +149,26 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         }
 
         State getPrevState() {
-            return Verify.verifyNotNull(prevState, "Attempted to access previous state, which was not set");
+            return verifyNotNull(prevState, "Attempted to access previous state, which was not set");
         }
 
         void setPrevState(final State prevState) {
-            Verify.verify(this.prevState == null, "Attempted to set previous state to %s when we already have %s",
-                    prevState, this.prevState);
-            this.prevState = Preconditions.checkNotNull(prevState);
+            verify(this.prevState == null, "Attempted to set previous state to %s when we already have %s", prevState,
+                    this.prevState);
+            this.prevState = requireNonNull(prevState);
             // We cannot have duplicate successor states, so this check is sufficient
-            this.done = DONE.equals(prevState);
+            done = DONE.equals(prevState);
         }
 
         // To be called from safe contexts, where successor is known to be completed
         AbstractProxyTransaction getSuccessor() {
-            return Verify.verifyNotNull(successor);
+            return verifyNotNull(successor);
         }
 
         void setSuccessor(final AbstractProxyTransaction successor) {
-            Verify.verify(this.successor == null, "Attempted to set successor to %s when we already have %s",
-                    successor, this.successor);
-            this.successor = Preconditions.checkNotNull(successor);
+            verify(this.successor == null, "Attempted to set successor to %s when we already have %s", successor,
+                    this.successor);
+            this.successor = requireNonNull(successor);
         }
 
         boolean isDone() {
@@ -254,7 +256,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
     private volatile State state;
 
     AbstractProxyTransaction(final ProxyHistory parent, final boolean isDone) {
-        this.parent = Preconditions.checkNotNull(parent);
+        this.parent = requireNonNull(parent);
         if (isDone) {
             state = DONE;
             // DONE implies previous seal operation completed
@@ -292,13 +294,13 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         doDelete(path);
     }
 
-    final void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    final void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
         checkReadWrite();
         checkNotSealed();
         doMerge(path, data);
     }
 
-    final void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    final void write(final YangInstanceIdentifier path, final NormalizedNode data) {
         checkReadWrite();
         checkNotSealed();
         doWrite(path, data);
@@ -309,7 +311,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         return doExists(path);
     }
 
-    final FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
+    final FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
         checkNotSealed();
         return doRead(path);
     }
@@ -332,9 +334,9 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
     final void seal() {
         // Transition user-visible state first
         final boolean success = markSealed();
-        Preconditions.checkState(success, "Proxy %s was already sealed", getIdentifier());
+        checkState(success, "Proxy %s was already sealed", getIdentifier());
 
-        if (!sealAndSend(Optional.empty())) {
+        if (!sealAndSend(OptionalLong.empty())) {
             sealSuccessor();
         }
     }
@@ -352,13 +354,13 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         // Propagate state and seal the successor.
         final Optional<ModifyTransactionRequest> optState = flushState();
         if (optState.isPresent()) {
-            forwardToSuccessor(successor, optState.get(), null);
+            forwardToSuccessor(successor, optState.orElseThrow(), null);
         }
         successor.predecessorSealed();
     }
 
     private void predecessorSealed() {
-        if (markSealed() && !sealAndSend(Optional.empty())) {
+        if (markSealed() && !sealAndSend(OptionalLong.empty())) {
             sealSuccessor();
         }
     }
@@ -380,7 +382,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
      * @param enqueuedTicks Enqueue ticks when this is invoked from replay path.
      * @return True if seal operation was successful, false if this proxy has a successor.
      */
-    boolean sealAndSend(final Optional<Long> enqueuedTicks) {
+    boolean sealAndSend(final OptionalLong enqueuedTicks) {
         return sealState();
     }
 
@@ -400,16 +402,16 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
     }
 
     private void checkNotSealed() {
-        Preconditions.checkState(sealed == 0, "Transaction %s has already been sealed", getIdentifier());
+        checkState(sealed == 0, "Transaction %s has already been sealed", getIdentifier());
     }
 
     private void checkSealed() {
-        Preconditions.checkState(sealed != 0, "Transaction %s has not been sealed yet", getIdentifier());
+        checkState(sealed != 0, "Transaction %s has not been sealed yet", getIdentifier());
     }
 
     private SuccessorState getSuccessorState() {
         final State local = state;
-        Verify.verify(local instanceof SuccessorState, "State %s has unexpected class", local);
+        verify(local instanceof SuccessorState, "State %s has unexpected class", local);
         return (SuccessorState) local;
     }
 
@@ -419,8 +421,8 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         }
     }
 
-    final void recordSuccessfulRequest(@Nonnull final TransactionRequest<?> req) {
-        successfulRequests.add(Verify.verifyNotNull(req));
+    final void recordSuccessfulRequest(final @NonNull TransactionRequest<?> req) {
+        successfulRequests.add(verifyNotNull(req));
     }
 
     final void recordFinishedRequest(final Response<?, ?> response) {
@@ -446,7 +448,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         });
     }
 
-    final void abort(final VotingFuture<Void> ret) {
+    final void abort(final VotingFuture<Empty> ret) {
         checkSealed();
 
         sendDoAbort(t -> {
@@ -500,7 +502,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         synchronized (this) {
             if (STATE_UPDATER.compareAndSet(this, SEALED, FLUSHED)) {
                 final SettableFuture<Boolean> ret = SettableFuture.create();
-                sendRequest(Verify.verifyNotNull(commitRequest(false)), t -> {
+                sendRequest(verifyNotNull(commitRequest(false)), t -> {
                     if (t instanceof TransactionCommitSuccess) {
                         ret.set(Boolean.TRUE);
                     } else if (t instanceof RequestFailure) {
@@ -536,7 +538,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         // Precludes startReconnect() from interfering with the fast path
         synchronized (this) {
             if (STATE_UPDATER.compareAndSet(this, SEALED, FLUSHED)) {
-                final TransactionRequest<?> req = Verify.verifyNotNull(commitRequest(true));
+                final TransactionRequest<?> req = verifyNotNull(commitRequest(true));
 
                 sendRequest(req, t -> {
                     if (t instanceof TransactionCanCommitSuccess) {
@@ -662,7 +664,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
         final State prevState = STATE_UPDATER.getAndSet(this, nextState);
 
         LOG.debug("Start reconnect of proxy {} previous state {}", this, prevState);
-        Verify.verify(!(prevState instanceof SuccessorState), "Proxy %s duplicate reconnect attempt after %s", this,
+        verify(!(prevState instanceof SuccessorState), "Proxy %s duplicate reconnect attempt after %s", this,
             prevState);
 
         // We have asserted a slow-path state, seal(), canCommit(), directCommit() are forced to slow paths, which will
@@ -695,7 +697,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
                     LOG.debug("Forwarding successful request {} to successor {}", obj, successor);
                     successor.doReplayRequest((TransactionRequest<?>) obj, resp -> { /*NOOP*/ }, now);
                 } else {
-                    Verify.verify(obj instanceof IncrementSequence);
+                    verify(obj instanceof IncrementSequence);
                     final IncrementSequence increment = (IncrementSequence) obj;
                     successor.doReplayRequest(new IncrementTransactionSequenceRequest(getIdentifier(),
                         increment.getSequence(), localActor(), isSnapshotOnly(),
@@ -714,7 +716,7 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
             final Request<?, ?> req = e.getRequest();
 
             if (getIdentifier().equals(req.getTarget())) {
-                Verify.verify(req instanceof TransactionRequest, "Unhandled request %s", req);
+                verify(req instanceof TransactionRequest, "Unhandled request %s", req);
                 LOG.debug("Replaying queued request {} to successor {}", req, successor);
                 successor.doReplayRequest((TransactionRequest<?>) req, e.getCallback(), e.getEnqueuedTicks());
                 it.remove();
@@ -731,10 +733,10 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
             final long enqueuedTicks = parent.currentTime();
             final Optional<ModifyTransactionRequest> optState = flushState();
             if (optState.isPresent()) {
-                successor.handleReplayedRemoteRequest(optState.get(), null, enqueuedTicks);
+                successor.handleReplayedRemoteRequest(optState.orElseThrow(), null, enqueuedTicks);
             }
             if (successor.markSealed()) {
-                successor.sealAndSend(Optional.of(enqueuedTicks));
+                successor.sealAndSend(OptionalLong.of(enqueuedTicks));
             }
         }
     }
@@ -799,13 +801,13 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
 
     abstract void doDelete(YangInstanceIdentifier path);
 
-    abstract void doMerge(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+    abstract void doMerge(YangInstanceIdentifier path, NormalizedNode data);
 
-    abstract void doWrite(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+    abstract void doWrite(YangInstanceIdentifier path, NormalizedNode data);
 
     abstract FluentFuture<Boolean> doExists(YangInstanceIdentifier path);
 
-    abstract FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(YangInstanceIdentifier path);
+    abstract FluentFuture<Optional<NormalizedNode>> doRead(YangInstanceIdentifier path);
 
     @GuardedBy("this")
     abstract Optional<ModifyTransactionRequest> flushState();
@@ -852,7 +854,11 @@ abstract class AbstractProxyTransaction implements Identifiable<TransactionIdent
     abstract void handleReplayedRemoteRequest(TransactionRequest<?> request,
             @Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
 
-    private static IllegalStateException unhandledResponseException(Response<?, ?> resp) {
+    static final @NonNull IllegalArgumentException unhandledRequest(final TransactionRequest<?> request) {
+        return new IllegalArgumentException("Unhandled request " + request);
+    }
+
+    private static @NonNull IllegalStateException unhandledResponseException(final Response<?, ?> resp) {
         return new IllegalStateException("Unhandled response " + resp.getClass());
     }
 
index eddbba6acdd92992bf4e145c8fb3f20596fb3000..5a436a53d3b2978ac3c1d4585220d3a2fbf44a58 100644 (file)
@@ -21,10 +21,9 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Consumer;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.ThreadSafe;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
@@ -49,9 +48,11 @@ import scala.compat.java8.FutureConverters;
  * shard is assigned a single cookie and this mapping is stored in a bidirectional map. Information about corresponding
  * shard leader is resolved via {@link ActorUtils}. The product of resolution is {@link ShardBackendInfo}.
  *
+ * <p>
+ * This class is thread-safe.
+ *
  * @author Robert Varga
  */
-@ThreadSafe
 abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBackendInfo> {
     static final class ShardState {
         private final CompletionStage<ShardBackendInfo> stage;
@@ -63,17 +64,17 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
             stage.whenComplete(this::onStageResolved);
         }
 
-        @Nonnull CompletionStage<ShardBackendInfo> getStage() {
+        @NonNull CompletionStage<ShardBackendInfo> getStage() {
             return stage;
         }
 
-        @Nullable synchronized ShardBackendInfo getResult() {
+        synchronized @Nullable ShardBackendInfo getResult() {
             return result;
         }
 
         private synchronized void onStageResolved(final ShardBackendInfo info, final Throwable failure) {
             if (failure == null) {
-                this.result = requireNonNull(info);
+                result = requireNonNull(info);
             } else {
                 LOG.warn("Failed to resolve shard", failure);
             }
@@ -96,7 +97,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
     AbstractShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
         this.actorUtils = requireNonNull(actorUtils);
-        this.connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.BORON,
+        connectFunction = ExplicitAsk.toScala(t -> new ConnectClientRequest(clientId, t, ABIVersion.POTASSIUM,
             ABIVersion.current()));
     }
 
@@ -106,7 +107,7 @@ abstract class AbstractShardBackendResolver extends BackendInfoResolver<ShardBac
         return () -> staleBackendInfoCallbacks.remove(callback);
     }
 
-    protected void notifyStaleBackendInfoCallbacks(Long cookie) {
+    protected void notifyStaleBackendInfoCallbacks(final Long cookie) {
         staleBackendInfoCallbacks.forEach(callback -> callback.accept(cookie));
     }
 
index 789309cf8a3d6d1b328e6a0f6bfc50b2ac07ddd9..77de1e45d82bd0e8f797b4152830cf8aa9cda949 100644 (file)
@@ -7,12 +7,14 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * Base class for internal {@link DOMStoreThreePhaseCommitCohort} implementation. It contains utility constants for
@@ -22,14 +24,14 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
  */
 abstract class AbstractTransactionCommitCohort implements DOMStoreThreePhaseCommitCohort {
     static final ListenableFuture<Boolean> TRUE_FUTURE = Futures.immediateFuture(Boolean.TRUE);
-    static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
+    static final ListenableFuture<Empty> EMPTY_FUTURE = Futures.immediateFuture(Empty.value());
 
     private final AbstractClientHistory parent;
     private final TransactionIdentifier txId;
 
     AbstractTransactionCommitCohort(final AbstractClientHistory parent, final TransactionIdentifier txId) {
-        this.parent = Preconditions.checkNotNull(parent);
-        this.txId = Preconditions.checkNotNull(txId);
+        this.parent = requireNonNull(parent);
+        this.txId = requireNonNull(txId);
     }
 
     final void complete() {
index 26e346e77cb4e0a0a19b6c66b5f5903d141a4803..52bf1d930fd71b9cca62a3a4669c4c7760e155eb 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.Collections2;
 import com.google.common.collect.Maps;
 import java.util.Collection;
@@ -46,7 +47,7 @@ final class BouncingReconnectForwarder extends ReconnectForwarder {
     private BouncingReconnectForwarder(final ConnectedClientConnection<?> successor,
             final Map<LocalHistoryIdentifier, ProxyReconnectCohort> cohorts) {
         super(successor);
-        this.cohorts = Preconditions.checkNotNull(cohorts);
+        this.cohorts = requireNonNull(cohorts);
     }
 
     static ReconnectForwarder forCohorts(final ConnectedClientConnection<?> successor,
index 74c4ae48b0cdf70fcd00ef7e8f2179961f508e05..5611a1044f1446452ab8eeedbf292ca9c8384dbd 100644 (file)
@@ -11,6 +11,7 @@ import com.google.common.annotations.Beta;
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
@@ -27,19 +28,21 @@ public class ClientSnapshot extends AbstractClientHandle<AbstractProxyTransactio
         super(parent, transactionId);
     }
 
-    private AbstractProxyTransaction createProxy(final Long shard) {
-        return parent().createSnapshotProxy(getIdentifier(), shard);
+    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
+        return ensureProxy(path).exists(path);
     }
 
-    private AbstractProxyTransaction ensureSnapshotProxy(final YangInstanceIdentifier path) {
-        return ensureProxy(path, this::createProxy);
+    public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
+        return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
     }
 
-    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return ensureSnapshotProxy(path).exists(path);
+    private FluentFuture<Optional<NormalizedNode>> readRoot() {
+        return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+            .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
     }
 
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
-        return ensureSnapshotProxy(path).read(path);
+    @Override
+    final AbstractProxyTransaction createProxy(final Long shard) {
+        return parent().createSnapshotProxy(getIdentifier(), shard);
     }
 }
index 9c0983879a799576cde9ec37ea154e9d54392bc6..b2ff5d5184d58753fa76bf90df457abfe6e685f0 100644 (file)
@@ -7,18 +7,20 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkState;
+
 import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterables;
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Collection;
+import java.util.Map;
 import java.util.Optional;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
+import org.opendaylight.controller.cluster.datastore.utils.RootScatterGather;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
@@ -53,72 +55,84 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  */
 @Beta
 public class ClientTransaction extends AbstractClientHandle<AbstractProxyTransaction> {
-
-    private ClientTransactionCursor cursor;
-
     ClientTransaction(final AbstractClientHistory parent, final TransactionIdentifier transactionId) {
         super(parent, transactionId);
     }
 
-    private AbstractProxyTransaction createProxy(final Long shard) {
-        return parent().createTransactionProxy(getIdentifier(), shard);
+    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
+        return ensureProxy(path).exists(path);
     }
 
-    private AbstractProxyTransaction ensureTransactionProxy(final YangInstanceIdentifier path) {
-        return ensureProxy(path, this::createProxy);
+    public FluentFuture<Optional<NormalizedNode>> read(final YangInstanceIdentifier path) {
+        return path.isEmpty() ? readRoot() : ensureProxy(path).read(path);
     }
 
-    public DOMDataTreeWriteCursor openCursor() {
-        Preconditions.checkState(cursor == null, "Transaction %s has open cursor", getIdentifier());
-        cursor = new ClientTransactionCursor(this);
-        return cursor;
+    private FluentFuture<Optional<NormalizedNode>> readRoot() {
+        return RootScatterGather.gather(parent().actorUtils(), ensureAllProxies()
+            .map(proxy -> proxy.read(YangInstanceIdentifier.of())));
     }
 
-    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return ensureTransactionProxy(path).exists(path);
+    public void delete(final YangInstanceIdentifier path) {
+        if (path.isEmpty()) {
+            ensureAllProxies().forEach(proxy -> proxy.delete(YangInstanceIdentifier.of()));
+        } else {
+            ensureProxy(path).delete(path);
+        }
+    }
+
+    public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+        if (path.isEmpty()) {
+            mergeRoot(RootScatterGather.castRootNode(data));
+        } else {
+            ensureProxy(path).merge(path, data);
+        }
     }
 
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
-        return ensureTransactionProxy(path).read(path);
+    private void mergeRoot(final @NonNull ContainerNode rootData) {
+        if (!rootData.isEmpty()) {
+            RootScatterGather.scatterTouched(rootData, this::ensureProxy).forEach(
+                scattered -> scattered.shard().merge(YangInstanceIdentifier.of(), scattered.container()));
+        }
     }
 
-    public void delete(final YangInstanceIdentifier path) {
-        ensureTransactionProxy(path).delete(path);
+    public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+        if (path.isEmpty()) {
+            writeRoot(RootScatterGather.castRootNode(data));
+        } else {
+            ensureProxy(path).write(path, data);
+        }
     }
 
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        ensureTransactionProxy(path).merge(path, data);
+    private void writeRoot(final @NonNull ContainerNode rootData) {
+        RootScatterGather.scatterAll(rootData, this::ensureProxy, ensureAllProxies()).forEach(
+            scattered -> scattered.shard().write(YangInstanceIdentifier.of(), scattered.container()));
     }
 
-    public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        ensureTransactionProxy(path).write(path, data);
+    private AbstractProxyTransaction ensureProxy(final PathArgument childId) {
+        return ensureProxy(YangInstanceIdentifier.of(childId));
     }
 
     public DOMStoreThreePhaseCommitCohort ready() {
-        final Collection<AbstractProxyTransaction> toReady = ensureClosed();
-        Preconditions.checkState(toReady != null, "Attempted to submit a closed transaction %s", this);
+        final Map<Long, AbstractProxyTransaction> participants = ensureClosed();
+        checkState(participants != null, "Attempted to submit a closed transaction %s", this);
 
+        final Collection<AbstractProxyTransaction> toReady = participants.values();
         toReady.forEach(AbstractProxyTransaction::seal);
-        final AbstractTransactionCommitCohort cohort;
-        switch (toReady.size()) {
-            case 0:
-                cohort = new EmptyTransactionCommitCohort(parent(), getIdentifier());
-                break;
-            case 1:
-                cohort = new DirectTransactionCommitCohort(parent(), getIdentifier(),
-                    Iterables.getOnlyElement(toReady));
-                break;
-            default:
-                cohort = new ClientTransactionCommitCohort(parent(), getIdentifier(), toReady);
-                break;
-        }
 
-        return parent().onTransactionReady(this, cohort);
+        final TransactionIdentifier txId = getIdentifier();
+        final AbstractClientHistory parent = parent();
+        parent.onTransactionShardsBound(txId, participants.keySet());
+
+        final AbstractTransactionCommitCohort cohort = switch (toReady.size()) {
+            case 0 -> new EmptyTransactionCommitCohort(parent, txId);
+            case 1 -> new DirectTransactionCommitCohort(parent, txId, toReady.iterator().next());
+            default -> new ClientTransactionCommitCohort(parent, txId, toReady);
+        };
+        return parent.onTransactionReady(this, cohort);
     }
 
-    void closeCursor(@Nonnull final DOMDataTreeCursor cursorToClose) {
-        if (cursorToClose.equals(this.cursor)) {
-            this.cursor = null;
-        }
+    @Override
+    final AbstractProxyTransaction createProxy(final Long shard) {
+        return parent().createTransactionProxy(getIdentifier(), shard);
     }
 }
index a4eb5e074f421ffc3e8bb718f02e747540f3839a..7887577a939bc3abf2c5d84d24e1dfe320f75299 100644 (file)
@@ -12,6 +12,8 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.Collection;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohort {
     private final Collection<AbstractProxyTransaction> proxies;
@@ -35,14 +37,14 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
         return ret;
     }
 
-    private ListenableFuture<Void> addComplete(final ListenableFuture<Void> future) {
+    private <T> ListenableFuture<T> addComplete(final ListenableFuture<T> future) {
         future.addListener(this::complete, MoreExecutors.directExecutor());
         return future;
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<Empty> preCommit() {
+        final var ret = new VotingFuture<>(Empty.value(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.preCommit(ret);
         }
@@ -51,8 +53,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<CommitInfo> commit() {
+        final var ret = new VotingFuture<>(CommitInfo.empty(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.doCommit(ret);
         }
@@ -61,8 +63,8 @@ final class ClientTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
-        final VotingFuture<Void> ret = new VotingFuture<>(null, proxies.size());
+    public ListenableFuture<Empty> abort() {
+        final var ret = new VotingFuture<>(Empty.value(), proxies.size());
         for (AbstractProxyTransaction proxy : proxies) {
             proxy.abort(ret);
         }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursor.java
deleted file mode 100644 (file)
index 41d2cb8..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker.actors.dds;
-
-import com.google.common.base.Preconditions;
-import java.util.Arrays;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A {@link DOMDataTreeWriteCursor} tied to a {@link ClientTransaction}.
- *
- * @author Robert Varga
- */
-final class ClientTransactionCursor implements DOMDataTreeWriteCursor {
-    private YangInstanceIdentifier current = YangInstanceIdentifier.EMPTY;
-    private final ClientTransaction parent;
-
-    ClientTransactionCursor(final ClientTransaction parent) {
-        this.parent = Preconditions.checkNotNull(parent);
-    }
-
-    @Override
-    public void enter(final PathArgument child) {
-        current = current.node(child);
-    }
-
-    @Override
-    public void enter(final PathArgument... path) {
-        enter(Arrays.asList(path));
-    }
-
-    @Override
-    public void enter(final Iterable<PathArgument> path) {
-        path.forEach(this::enter);
-    }
-
-    @Override
-    public void exit() {
-        final YangInstanceIdentifier currentParent = current.getParent();
-        Preconditions.checkState(currentParent != null);
-        current = currentParent;
-    }
-
-    @Override
-    public void exit(final int depth) {
-        for (int i = 0; i < depth; ++i) {
-            exit();
-        }
-    }
-
-    @Override
-    public void close() {
-        parent.closeCursor(this);
-    }
-
-    @Override
-    public void delete(final PathArgument child) {
-        parent.delete(current.node(child));
-    }
-
-    @Override
-    public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
-        parent.merge(current.node(child), data);
-    }
-
-    @Override
-    public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
-        parent.write(current.node(child), data);
-    }
-}
index 2032b5498401f3969c956319ca96100d995c9bcc..e3dc91cbaea2b5228ad11e91aa0ab04594dc438d 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import com.google.common.annotations.Beta;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
@@ -22,10 +22,8 @@ import org.opendaylight.yangtools.concepts.Identifiable;
  * @author Robert Varga
  */
 @Beta
+@NonNullByDefault
 public interface DataStoreClient extends Identifiable<ClientIdentifier>, AutoCloseable {
-    @Override
-    @Nonnull ClientIdentifier getIdentifier();
-
     @Override
     void close();
 
@@ -34,19 +32,19 @@ public interface DataStoreClient extends Identifiable<ClientIdentifier>, AutoClo
      *
      * @return Client history handle
      */
-    @Nonnull ClientLocalHistory createLocalHistory();
+    ClientLocalHistory createLocalHistory();
 
     /**
      * Create a new free-standing snapshot.
      *
      * @return Client snapshot handle
      */
-    @Nonnull ClientSnapshot createSnapshot();
+    ClientSnapshot createSnapshot();
 
     /**
      * Create a new free-standing transaction.
      *
      * @return Client transaction handle
      */
-    @Nonnull ClientTransaction createTransaction();
+    ClientTransaction createTransaction();
 }
index 1d5d4a70dedac1c10122ad991ac3c3ae3f9f1507..5b5ff5864a3f8a50bffa650fce2db2a04e7f4fdd 100644 (file)
@@ -7,9 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * An {@link AbstractTransactionCommitCohort} implementation for transactions which contain a single proxy. Since there
@@ -23,7 +26,7 @@ final class DirectTransactionCommitCohort extends AbstractTransactionCommitCohor
     DirectTransactionCommitCohort(final AbstractClientHistory parent, final TransactionIdentifier txId,
         final AbstractProxyTransaction proxy) {
         super(parent, txId);
-        this.proxy = Preconditions.checkNotNull(proxy);
+        this.proxy = requireNonNull(proxy);
     }
 
     @Override
@@ -32,19 +35,19 @@ final class DirectTransactionCommitCohort extends AbstractTransactionCommitCohor
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        return VOID_FUTURE;
+    public ListenableFuture<Empty> preCommit() {
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         complete();
-        return VOID_FUTURE;
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<CommitInfo> commit() {
         complete();
-        return VOID_FUTURE;
+        return CommitInfo.emptyFluentFuture();
     }
 }
index 719a3b13b29f798d54593b57c79bbee5acd009e4..1701d4c894b5f7fddf04ec9511e343992d14d28b 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import akka.actor.Props;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -31,8 +31,8 @@ public final class DistributedDataStoreClientActor extends AbstractDataStoreClie
         return new DistributedDataStoreClientBehavior(context, actorUtils);
     }
 
-    public static Props props(@Nonnull final MemberName memberName, @Nonnull final String storeName,
-            final ActorUtils ctx) {
+    public static Props props(final @NonNull MemberName memberName, final @NonNull String storeName,
+            final @NonNull ActorUtils ctx) {
         final String name = "datastore-" + storeName;
         final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, FrontendType.forName(name));
         return Props.create(DistributedDataStoreClientActor.class,
index e40da21d138c2c5bde8876bae053bdf3f96db03b..f8927c28c859f0eab65c00efccd7a8a304baf109 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import java.util.function.Function;
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -18,12 +18,12 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * @author Robert Varga
  */
 final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
-    private final Function<YangInstanceIdentifier, Long> pathToShard;
+    private final ModuleShardBackendResolver resolver;
 
     private DistributedDataStoreClientBehavior(final ClientActorContext context,
             final ModuleShardBackendResolver resolver) {
         super(context, resolver);
-        pathToShard = resolver::resolveShardForPath;
+        this.resolver = resolver;
     }
 
     DistributedDataStoreClientBehavior(final ClientActorContext context, final ActorUtils actorUtils) {
@@ -32,7 +32,12 @@ final class DistributedDataStoreClientBehavior extends AbstractDataStoreClientBe
 
     @Override
     Long resolveShardForPath(final YangInstanceIdentifier path) {
-        return pathToShard.apply(path);
+        return resolver.resolveShardForPath(path);
+    }
+
+    @Override
+    Stream<Long> resolveAllShards() {
+        return resolver.resolveAllShards();
     }
 
     @Override
index 7193dd053f762cb37c4cf701afe1efd89fb33919..5b11d8679e31ba9801a10638228f0bb58504ee15 100644 (file)
@@ -9,6 +9,8 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 /**
  * An {@link AbstractTransactionCommitCohort} for use with empty transactions. This relies on the fact that no backends
@@ -30,19 +32,19 @@ final class EmptyTransactionCommitCohort extends AbstractTransactionCommitCohort
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        return VOID_FUTURE;
+    public ListenableFuture<Empty> preCommit() {
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         complete();
-        return VOID_FUTURE;
+        return EMPTY_FUTURE;
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<CommitInfo> commit() {
         complete();
-        return VOID_FUTURE;
+        return CommitInfo.emptyFluentFuture();
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModification.java
new file mode 100644 (file)
index 0000000..63dbc92
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+
+/**
+ * A {@link CursorAwareDataTreeModification} which does not really do anything and throws an
+ * {@link FailedDataTreeModificationException} for most of its operations. Used in case we when
+ * {@link DataTreeSnapshot#newModification()} fails, see {@link LocalReadWriteProxyTransaction} for details. Surrounding
+ * code should guard against invocation of most of these methods.
+ */
+record FailedDataTreeModification(
+        @NonNull EffectiveModelContext modelContext,
+        @NonNull Exception cause) implements CursorAwareDataTreeModification {
+
+    FailedDataTreeModification {
+        requireNonNull(modelContext);
+        requireNonNull(cause);
+    }
+
+    @Override
+    public void delete(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    @Override
+    public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+        throw ex();
+    }
+
+    @Override
+    public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+        throw ex();
+    }
+
+    @Override
+    public void ready() {
+        // No-op
+    }
+
+    @Override
+    public void applyToCursor(final DataTreeModificationCursor cursor) {
+        throw ex();
+    }
+
+    @Override
+    public Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    @Override
+    public CursorAwareDataTreeModification newModification() {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public Optional<? extends DataTreeModificationCursor> openCursor(final YangInstanceIdentifier path) {
+        throw ex();
+    }
+
+    private @NonNull FailedDataTreeModificationException ex() {
+        return new FailedDataTreeModificationException(cause);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/FailedDataTreeModificationException.java
new file mode 100644 (file)
index 0000000..5f860a0
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker.actors.dds;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * A box {@link RuntimeException} thrown by {@link FailedDataTreeModification} from its user-facing methods.
+ */
+final class FailedDataTreeModificationException extends RuntimeException {
+    private static final long serialVersionUID = 1L;
+
+    FailedDataTreeModificationException(final Exception cause) {
+        super(null, requireNonNull(cause), false, false);
+    }
+}
index db9142474720fea4b37d8cc55a85e686afc5a14f..7d9d74d8ea5ede282c50d7d149072b12e59cd99a 100644 (file)
@@ -7,8 +7,9 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 
 /**
  * Request the ClientIdentifier from a particular actor. Response is an instance of {@link DataStoreClient}.
@@ -19,7 +20,7 @@ final class GetClientRequest {
     private final ActorRef replyTo;
 
     GetClientRequest(final ActorRef replyTo) {
-        this.replyTo = Preconditions.checkNotNull(replyTo);
+        this.replyTo = requireNonNull(replyTo);
     }
 
     ActorRef getReplyTo() {
index 2a81e1d0aa37714cf208beea5d9012cf893ff96c..6c4006e93f0a2197937ce7ff90b3b7c5a3e8b00a 100644 (file)
@@ -7,13 +7,13 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.FluentFuture;
 import java.util.Optional;
 import java.util.function.Consumer;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
@@ -26,20 +26,22 @@ import org.opendaylight.controller.cluster.access.commands.ReadTransactionSucces
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AbstractProxyTransaction} for dispatching a transaction towards a shard leader which is co-located with
- * the client instance.
+ * the client instance. This class is NOT thread-safe.
  *
  * <p>
  * It requires a {@link DataTreeSnapshot}, which is used to instantiated a new {@link DataTreeModification}. Operations
@@ -49,18 +51,16 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class is not thread-safe as usual with transactions. Since it does not interact with the backend until the
  * transaction is submitted, at which point this class gets out of the picture, this is not a cause for concern.
- *
- * @author Robert Varga
  */
-@NotThreadSafe
-abstract class LocalProxyTransaction extends AbstractProxyTransaction {
+abstract sealed class LocalProxyTransaction extends AbstractProxyTransaction
+        permits LocalReadOnlyProxyTransaction, LocalReadWriteProxyTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(LocalProxyTransaction.class);
 
-    private final TransactionIdentifier identifier;
+    private final @NonNull TransactionIdentifier identifier;
 
     LocalProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier, final boolean isDone) {
         super(parent, isDone);
-        this.identifier = Preconditions.checkNotNull(identifier);
+        this.identifier = requireNonNull(identifier);
     }
 
     @Override
@@ -68,8 +68,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         return identifier;
     }
 
-    @Nonnull
-    abstract DataTreeSnapshot readOnlyView();
+    abstract @NonNull DataTreeSnapshot readOnlyView();
 
     abstract void applyForwardedModifyTransactionRequest(ModifyTransactionRequest request,
             @Nullable Consumer<Response<?, ?>> callback);
@@ -78,13 +77,25 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
             @Nullable Consumer<Response<?, ?>> callback, long enqueuedTicks);
 
     @Override
-    final FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
-        return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path).isPresent());
+    FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+        final boolean result;
+        try {
+            result = readOnlyView().readNode(path).isPresent();
+        } catch (FailedDataTreeModificationException e) {
+            return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+        }
+        return FluentFutures.immediateBooleanFluentFuture(result);
     }
 
     @Override
-    final FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(final YangInstanceIdentifier path) {
-        return FluentFutures.immediateFluentFuture(readOnlyView().readNode(path));
+    FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+        final Optional<NormalizedNode> result;
+        try {
+            result = readOnlyView().readNode(path);
+        } catch (FailedDataTreeModificationException e) {
+            return FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(e));
+        }
+        return FluentFutures.immediateFluentFuture(result);
     }
 
     @Override
@@ -98,42 +109,13 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         if (request instanceof AbortLocalTransactionRequest) {
             enqueueAbort(request, callback, enqueuedTicks);
         } else {
-            throw new IllegalArgumentException("Unhandled request" + request);
-        }
-    }
-
-    private boolean handleReadRequest(final TransactionRequest<?> request,
-            @Nullable final Consumer<Response<?, ?>> callback) {
-        // Note we delay completion of read requests to limit the scope at which the client can run, as they have
-        // listeners, which we do not want to execute while we are reconnecting.
-        if (request instanceof ReadTransactionRequest) {
-            final YangInstanceIdentifier path = ((ReadTransactionRequest) request).getPath();
-            final Optional<NormalizedNode<?, ?>> result = readOnlyView().readNode(path);
-            if (callback != null) {
-                // XXX: FB does not see that callback is final, on stack and has be check for non-null.
-                final Consumer<Response<?, ?>> fbIsStupid = Preconditions.checkNotNull(callback);
-                executeInActor(() -> fbIsStupid.accept(new ReadTransactionSuccess(request.getTarget(),
-                    request.getSequence(), result)));
-            }
-            return true;
-        } else if (request instanceof ExistsTransactionRequest) {
-            final YangInstanceIdentifier path = ((ExistsTransactionRequest) request).getPath();
-            final boolean result = readOnlyView().readNode(path).isPresent();
-            if (callback != null) {
-                // XXX: FB does not see that callback is final, on stack and has be check for non-null.
-                final Consumer<Response<?, ?>> fbIsStupid = Preconditions.checkNotNull(callback);
-                executeInActor(() -> fbIsStupid.accept(new ExistsTransactionSuccess(request.getTarget(),
-                    request.getSequence(), result)));
-            }
-            return true;
-        } else {
-            return false;
+            throw unhandledRequest(request);
         }
     }
 
     @Override
-    void handleReplayedRemoteRequest(final TransactionRequest<?> request,
-            @Nullable final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
+    void handleReplayedRemoteRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback,
+            final long enqueuedTicks) {
         if (request instanceof ModifyTransactionRequest) {
             replayModifyTransactionRequest((ModifyTransactionRequest) request, callback, enqueuedTicks);
         } else if (handleReadRequest(request, callback)) {
@@ -145,7 +127,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
             // hence we can skip sequence increments.
             LOG.debug("Not replaying {}", request);
         } else {
-            throw new IllegalArgumentException("Unhandled request " + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -165,26 +147,67 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback);
         } else {
-            throw new IllegalArgumentException("Unhandled request " + request);
+            throw unhandledRequest(request);
+        }
+    }
+
+    @NonNull Response<?, ?> handleExistsRequest(final @NonNull DataTreeSnapshot snapshot,
+            final @NonNull ExistsTransactionRequest request) {
+        try {
+            return new ExistsTransactionSuccess(request.getTarget(), request.getSequence(),
+                snapshot.readNode(request.getPath()).isPresent());
+        } catch (FailedDataTreeModificationException e) {
+            return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+                ReadFailedException.MAPPER.apply(e)));
+        }
+    }
+
+    @NonNull Response<?, ?> handleReadRequest(final @NonNull DataTreeSnapshot snapshot,
+            final @NonNull ReadTransactionRequest request) {
+        try {
+            return new ReadTransactionSuccess(request.getTarget(), request.getSequence(),
+                snapshot.readNode(request.getPath()));
+        } catch (FailedDataTreeModificationException e) {
+            return request.toRequestFailure(new RuntimeRequestException("Failed to access data",
+                ReadFailedException.MAPPER.apply(e)));
+        }
+    }
+
+    private boolean handleReadRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
+        // Note we delay completion of read requests to limit the scope at which the client can run, as they have
+        // listeners, which we do not want to execute while we are reconnecting.
+        if (request instanceof ReadTransactionRequest) {
+            if (callback != null) {
+                final var response = handleReadRequest(readOnlyView(), (ReadTransactionRequest) request);
+                executeInActor(() -> callback.accept(response));
+            }
+            return true;
+        } else if (request instanceof ExistsTransactionRequest) {
+            if (callback != null) {
+                final var response = handleExistsRequest(readOnlyView(), (ExistsTransactionRequest) request);
+                executeInActor(() -> callback.accept(response));
+            }
+            return true;
+        } else {
+            return false;
         }
     }
 
     @Override
     final void forwardToRemote(final RemoteProxyTransaction successor, final TransactionRequest<?> request,
                          final Consumer<Response<?, ?>> callback) {
-        if (request instanceof CommitLocalTransactionRequest) {
-            final CommitLocalTransactionRequest req = (CommitLocalTransactionRequest) request;
+        if (request instanceof final CommitLocalTransactionRequest req) {
             final DataTreeModification mod = req.getModification();
 
             LOG.debug("Applying modification {} to successor {}", mod, successor);
             mod.applyToCursor(new AbstractDataTreeModificationCursor() {
                 @Override
-                public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+                public void write(final PathArgument child, final NormalizedNode data) {
                     successor.write(current().node(child), data);
                 }
 
                 @Override
-                public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+                public void merge(final PathArgument child, final NormalizedNode data) {
                     successor.merge(current().node(child), data);
                 }
 
@@ -206,7 +229,7 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof ModifyTransactionRequest) {
             successor.handleForwardedRequest(request, callback);
         } else {
-            throwUnhandledRequest(request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -218,16 +241,12 @@ abstract class LocalProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             successor.enqueuePurge(callback);
         } else {
-            throwUnhandledRequest(request);
+            throw unhandledRequest(request);
         }
 
         LOG.debug("Forwarded request {} to successor {}", request, successor);
     }
 
-    private static void throwUnhandledRequest(final TransactionRequest<?> request) {
-        throw new IllegalArgumentException("Unhandled request" + request);
-    }
-
     void sendAbort(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
         sendRequest(request, callback);
     }
index ee5889da767bcdb08f05fc0ab5a7c64bcbb95cab..6d019af1a19383b460ff95524a02b501cb12aa30 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Optional;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
@@ -19,28 +20,26 @@ import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 /**
- * A read-only specialization of {@link LocalProxyTransaction}.
+ * A read-only specialization of {@link LocalProxyTransaction}. This class is NOT thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
-
     private final DataTreeSnapshot snapshot;
 
     LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier,
         final DataTreeSnapshot snapshot) {
         super(parent, identifier, false);
-        this.snapshot = Preconditions.checkNotNull(snapshot);
+        this.snapshot = requireNonNull(snapshot);
     }
 
     LocalReadOnlyProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
         super(parent, identifier, true);
         // It is an error to touch snapshot once we are DONE
-        this.snapshot = null;
+        snapshot = null;
     }
 
     @Override
@@ -50,7 +49,7 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
 
     @Override
     DataTreeSnapshot readOnlyView() {
-        return Preconditions.checkNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
+        return verifyNotNull(snapshot, "Transaction %s is DONE", getIdentifier());
     }
 
     @Override
@@ -59,12 +58,12 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
     }
 
     @Override
-    void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
         throw new UnsupportedOperationException("doMerge");
     }
 
     @Override
-    void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
         throw new UnsupportedOperationException("doWrite");
     }
 
@@ -94,9 +93,9 @@ final class LocalReadOnlyProxyTransaction extends LocalProxyTransaction {
     }
 
     private static void commonModifyTransactionRequest(final ModifyTransactionRequest request) {
-        Verify.verify(request.getModifications().isEmpty());
+        verify(request.getModifications().isEmpty());
 
-        final PersistenceProtocol protocol = request.getPersistenceProtocol().get();
-        Verify.verify(protocol == PersistenceProtocol.ABORT);
+        final PersistenceProtocol protocol = request.getPersistenceProtocol().orElseThrow();
+        verify(protocol == PersistenceProtocol.ABORT);
     }
 }
index 8b19e30b4b641db2eea3abca902fbded1f086c0d..47ae6a2bc7a27ae60fc1a8b094411c65c03d4a21 100644 (file)
@@ -7,21 +7,27 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.util.concurrent.FluentFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.function.BiConsumer;
 import java.util.function.Consumer;
 import java.util.function.Supplier;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
+import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
 import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
+import org.opendaylight.controller.cluster.access.commands.ReadTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionAbortRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionDelete;
 import org.opendaylight.controller.cluster.access.commands.TransactionDoCommitRequest;
@@ -31,22 +37,25 @@ import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitR
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.Response;
+import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
+import org.opendaylight.mdsal.common.api.ReadFailedException;
+import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AbstractProxyTransaction} for dispatching a transaction towards a shard leader which is co-located with
- * the client instance.
+ * the client instance. This class is NOT thread-safe.
  *
  * <p>
  * It requires a {@link DataTreeSnapshot}, which is used to instantiated a new {@link DataTreeModification}. Operations
@@ -59,7 +68,6 @@ import org.slf4j.LoggerFactory;
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(LocalReadWriteProxyTransaction.class);
 
@@ -84,16 +92,31 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
      */
     private Exception recordedFailure;
 
+    @SuppressWarnings("checkstyle:IllegalCatch")
     LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier,
-        final DataTreeSnapshot snapshot) {
+            final DataTreeSnapshot snapshot) {
         super(parent, identifier, false);
-        this.modification = (CursorAwareDataTreeModification) snapshot.newModification();
+
+        if (snapshot instanceof FailedDataTreeModification failed) {
+            recordedFailure = failed.cause();
+            modification = failed;
+        } else {
+            CursorAwareDataTreeModification mod;
+            try {
+                mod = (CursorAwareDataTreeModification) snapshot.newModification();
+            } catch (Exception e) {
+                LOG.debug("Failed to instantiate modification for {}", identifier, e);
+                recordedFailure = e;
+                mod = new FailedDataTreeModification(snapshot.modelContext(), e);
+            }
+            modification = mod;
+        }
     }
 
     LocalReadWriteProxyTransaction(final ProxyHistory parent, final TransactionIdentifier identifier) {
         super(parent, identifier, true);
         // This is DONE transaction, this should never be touched
-        this.modification = null;
+        modification = null;
     }
 
     @Override
@@ -106,6 +129,20 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         return getModification();
     }
 
+    @Override
+    FluentFuture<Boolean> doExists(final YangInstanceIdentifier path) {
+        final var ex = recordedFailure;
+        return ex == null ? super.doExists(path)
+            : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+    }
+
+    @Override
+    FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+        final var ex = recordedFailure;
+        return ex == null ? super.doRead(path)
+            : FluentFutures.immediateFailedFluentFuture(ReadFailedException.MAPPER.apply(ex));
+    }
+
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
     void doDelete(final YangInstanceIdentifier path) {
@@ -126,7 +163,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
         final CursorAwareDataTreeModification mod = getModification();
         if (recordedFailure != null) {
             LOG.debug("Transaction {} recorded failure, ignoring merge to {}", getIdentifier(), path);
@@ -144,7 +181,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
         final CursorAwareDataTreeModification mod = getModification();
         if (recordedFailure != null) {
             LOG.debug("Transaction {} recorded failure, ignoring write to {}", getIdentifier(), path);
@@ -178,7 +215,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
     }
 
     private void sealModification() {
-        Preconditions.checkState(sealedModification == null, "Transaction %s is already sealed", this);
+        checkState(sealedModification == null, "Transaction %s is already sealed", this);
         final CursorAwareDataTreeModification mod = getModification();
         mod.ready();
         sealedModification = mod;
@@ -191,7 +228,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
     }
 
     @Override
-    boolean sealAndSend(final Optional<Long> enqueuedTicks) {
+    boolean sealAndSend(final OptionalLong enqueuedTicks) {
         sealModification();
         return super.sealAndSend(enqueuedTicks);
     }
@@ -203,12 +240,12 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
         sealedModification.applyToCursor(new AbstractDataTreeModificationCursor() {
             @Override
-            public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+            public void write(final PathArgument child, final NormalizedNode data) {
                 b.addModification(new TransactionWrite(current().node(child), data));
             }
 
             @Override
-            public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+            public void merge(final PathArgument child, final NormalizedNode data) {
                 b.addModification(new TransactionMerge(current().node(child), data));
             }
 
@@ -221,25 +258,25 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         return Optional.of(b.build());
     }
 
-    DataTreeSnapshot getSnapshot() {
-        Preconditions.checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
+    CursorAwareDataTreeSnapshot getSnapshot() {
+        checkState(sealedModification != null, "Proxy %s is not sealed yet", getIdentifier());
         return sealedModification;
     }
 
     @Override
     void applyForwardedModifyTransactionRequest(final ModifyTransactionRequest request,
-            @Nullable final Consumer<Response<?, ?>> callback) {
+            final Consumer<Response<?, ?>> callback) {
         commonModifyTransactionRequest(request, callback, this::sendRequest);
     }
 
     @Override
     void replayModifyTransactionRequest(final ModifyTransactionRequest request,
-            @Nullable final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
+            final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
         commonModifyTransactionRequest(request, callback, (req, cb) -> enqueueRequest(req, cb, enqueuedTicks));
     }
 
     private void commonModifyTransactionRequest(final ModifyTransactionRequest request,
-            @Nullable final Consumer<Response<?, ?>> callback,
+            final @Nullable Consumer<Response<?, ?>> callback,
             final BiConsumer<TransactionRequest<?>, Consumer<Response<?, ?>>> sendMethod) {
         for (final TransactionModification mod : request.getModifications()) {
             if (mod instanceof TransactionWrite) {
@@ -255,26 +292,26 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
         final Optional<PersistenceProtocol> maybeProtocol = request.getPersistenceProtocol();
         if (maybeProtocol.isPresent()) {
-            Verify.verify(callback != null, "Request %s has null callback", request);
+            final var cb = verifyNotNull(callback, "Request %s has null callback", request);
             if (markSealed()) {
                 sealOnly();
             }
 
-            switch (maybeProtocol.get()) {
+            switch (maybeProtocol.orElseThrow()) {
                 case ABORT:
-                    sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), callback);
+                    sendMethod.accept(new AbortLocalTransactionRequest(getIdentifier(), localActor()), cb);
                     break;
                 case READY:
                     // No-op, as we have already issued a sealOnly() and we are not transmitting anything
                     break;
                 case SIMPLE:
-                    sendMethod.accept(commitRequest(false), callback);
+                    sendMethod.accept(commitRequest(false), cb);
                     break;
                 case THREE_PHASE:
-                    sendMethod.accept(commitRequest(true), callback);
+                    sendMethod.accept(commitRequest(true), cb);
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProtocol.orElseThrow());
             }
         }
     }
@@ -291,7 +328,7 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
 
     @Override
     void handleReplayedRemoteRequest(final TransactionRequest<?> request,
-            @Nullable final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
+            final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
         LOG.debug("Applying replayed request {}", request);
 
         if (request instanceof TransactionPreCommitRequest) {
@@ -322,16 +359,39 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         }
     }
 
+    @Override
+    Response<?, ?> handleExistsRequest(final DataTreeSnapshot snapshot, final ExistsTransactionRequest request) {
+        final var ex = recordedFailure;
+        return ex == null ? super.handleExistsRequest(snapshot, request)
+            : request.toRequestFailure(
+                new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+    }
+
+    @Override
+    Response<?, ?> handleReadRequest(final DataTreeSnapshot snapshot, final ReadTransactionRequest request) {
+        final var ex = recordedFailure;
+        return ex == null ? super.handleReadRequest(snapshot, request)
+            : request.toRequestFailure(
+                new RuntimeRequestException("Previous modification failed", ReadFailedException.MAPPER.apply(ex)));
+    }
+
     @Override
     void forwardToLocal(final LocalProxyTransaction successor, final TransactionRequest<?> request,
             final Consumer<Response<?, ?>> callback) {
         if (request instanceof CommitLocalTransactionRequest) {
-            Verify.verify(successor instanceof LocalReadWriteProxyTransaction);
-            ((LocalReadWriteProxyTransaction) successor).sendRebased((CommitLocalTransactionRequest)request, callback);
-            LOG.debug("Forwarded request {} to successor {}", request, successor);
+            verifyLocalReadWrite(successor).sendRebased((CommitLocalTransactionRequest)request, callback);
+        } else if (request instanceof ModifyTransactionRequest) {
+            verifyLocalReadWrite(successor).handleForwardedRemoteRequest(request, callback);
         } else {
             super.forwardToLocal(successor, request, callback);
+            return;
         }
+        LOG.debug("Forwarded request {} to successor {}", request, successor);
+    }
+
+    private static LocalReadWriteProxyTransaction verifyLocalReadWrite(final LocalProxyTransaction successor) {
+        verify(successor instanceof LocalReadWriteProxyTransaction, "Unexpected successor %s", successor);
+        return (LocalReadWriteProxyTransaction) successor;
     }
 
     @Override
@@ -347,13 +407,12 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         closedException = this::abortedException;
     }
 
-    @Nonnull
-    private CursorAwareDataTreeModification getModification() {
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of recorded failure")
+    private @NonNull CursorAwareDataTreeModification getModification() {
         if (closedException != null) {
             throw closedException.get();
         }
-
-        return Preconditions.checkNotNull(modification, "Transaction %s is DONE", getIdentifier());
+        return verifyNotNull(modification, "Transaction %s is DONE", getIdentifier());
     }
 
     private void sendRebased(final CommitLocalTransactionRequest request, final Consumer<Response<?, ?>> callback) {
@@ -364,8 +423,18 @@ final class LocalReadWriteProxyTransaction extends LocalProxyTransaction {
         // Rebase old modification on new data tree.
         final CursorAwareDataTreeModification mod = getModification();
 
-        try (DataTreeModificationCursor cursor = mod.createCursor(YangInstanceIdentifier.EMPTY)) {
-            request.getModification().applyToCursor(cursor);
+        if (!(mod instanceof FailedDataTreeModification)) {
+            request.getDelayedFailure().ifPresentOrElse(failure -> {
+                if (recordedFailure == null) {
+                    recordedFailure = failure;
+                } else {
+                    recordedFailure.addSuppressed(failure);
+                }
+            }, () -> {
+                try (DataTreeModificationCursor cursor = mod.openCursor()) {
+                    request.getModification().applyToCursor(cursor);
+                }
+            });
         }
 
         if (markSealed()) {
index d360508abcfd00105e386da765383a8f79319f77..6ab566e2fafc186fb6257ebd98606e3f36c9470c 100644 (file)
@@ -7,21 +7,20 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static akka.pattern.Patterns.ask;
 import static com.google.common.base.Verify.verifyNotNull;
 
 import akka.dispatch.ExecutionContexts;
 import akka.dispatch.OnComplete;
+import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.collect.BiMap;
 import com.google.common.collect.ImmutableBiMap;
-import com.google.common.collect.ImmutableBiMap.Builder;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.ThreadSafe;
+import java.util.stream.Stream;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.shardmanager.RegisterForShardAvailabilityChanges;
@@ -38,9 +37,11 @@ import scala.concurrent.Future;
  * shard is assigned a single cookie and this mapping is stored in a bidirectional map. Information about corresponding
  * shard leader is resolved via {@link ActorUtils}. The product of resolution is {@link ShardBackendInfo}.
  *
+ * <p>
+ * This class is thread-safe.
+ *
  * @author Robert Varga
  */
-@ThreadSafe
 final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     private static final Logger LOG = LoggerFactory.getLogger(ModuleShardBackendResolver.class);
 
@@ -51,19 +52,20 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     @GuardedBy("this")
     private long nextShard = 1;
 
-    private volatile BiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
+    private volatile ImmutableBiMap<String, Long> shards = ImmutableBiMap.of(DefaultShardStrategy.DEFAULT_SHARD, 0L);
 
     // FIXME: we really need just ActorContext.findPrimaryShardAsync()
     ModuleShardBackendResolver(final ClientIdentifier clientId, final ActorUtils actorUtils) {
         super(clientId, actorUtils);
 
-        shardAvailabilityChangesRegFuture = ask(actorUtils.getShardManager(), new RegisterForShardAvailabilityChanges(
-            this::onShardAvailabilityChange), Timeout.apply(60, TimeUnit.MINUTES))
+        shardAvailabilityChangesRegFuture = Patterns.ask(actorUtils.getShardManager(),
+            new RegisterForShardAvailabilityChanges(this::onShardAvailabilityChange),
+            Timeout.apply(60, TimeUnit.MINUTES))
                 .map(reply -> (Registration)reply, ExecutionContexts.global());
 
         shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
             @Override
-            public void onComplete(Throwable failure, Registration reply) {
+            public void onComplete(final Throwable failure, final Registration reply) {
                 if (failure != null) {
                     LOG.error("RegisterForShardAvailabilityChanges failed", failure);
                 }
@@ -71,7 +73,7 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
         }, ExecutionContexts.global());
     }
 
-    private void onShardAvailabilityChange(String shardName) {
+    private void onShardAvailabilityChange(final String shardName) {
         LOG.debug("onShardAvailabilityChange for {}", shardName);
 
         Long cookie = shards.get(shardName);
@@ -84,22 +86,26 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     }
 
     Long resolveShardForPath(final YangInstanceIdentifier path) {
-        final String shardName = actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
+        return resolveCookie(actorUtils().getShardStrategyFactory().getStrategy(path).findShard(path));
+    }
+
+    Stream<Long> resolveAllShards() {
+        return actorUtils().getConfiguration().getAllShardNames().stream()
+            .sorted()
+            .map(this::resolveCookie);
+    }
+
+    private @NonNull Long resolveCookie(final String shardName) {
+        final Long cookie = shards.get(shardName);
+        return cookie != null ? cookie : populateShard(shardName);
+    }
+
+    private synchronized @NonNull Long populateShard(final String shardName) {
         Long cookie = shards.get(shardName);
         if (cookie == null) {
-            synchronized (this) {
-                cookie = shards.get(shardName);
-                if (cookie == null) {
-                    cookie = nextShard++;
-
-                    Builder<String, Long> builder = ImmutableBiMap.builder();
-                    builder.putAll(shards);
-                    builder.put(shardName, cookie);
-                    shards = builder.build();
-                }
-            }
+            cookie = nextShard++;
+            shards = ImmutableBiMap.<String, Long>builder().putAll(shards).put(shardName, cookie).build();
         }
-
         return cookie;
     }
 
@@ -173,14 +179,14 @@ final class ModuleShardBackendResolver extends AbstractShardBackendResolver {
     public void close() {
         shardAvailabilityChangesRegFuture.onComplete(new OnComplete<Registration>() {
             @Override
-            public void onComplete(Throwable failure, Registration reply) {
+            public void onComplete(final Throwable failure, final Registration reply) {
                 reply.close();
             }
         }, ExecutionContexts.global());
     }
 
     @Override
-    public String resolveCookieName(Long cookie) {
+    public String resolveCookieName(final Long cookie) {
         return verifyNotNull(shards.inverse().get(cookie), "Unexpected null cookie: %s", cookie);
     }
 }
index ad105c31f2f194ff610bac75ff8c3d432324b853..437effe9ae0df3264d65db37b6b2a4bf60377cf0 100644 (file)
@@ -7,20 +7,29 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import com.google.common.collect.ImmutableList;
+import com.google.common.primitives.UnsignedLong;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
@@ -29,6 +38,7 @@ import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryReq
 import org.opendaylight.controller.cluster.access.commands.DestroyLocalHistoryRequest;
 import org.opendaylight.controller.cluster.access.commands.LocalHistoryRequest;
 import org.opendaylight.controller.cluster.access.commands.PurgeLocalHistoryRequest;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.Request;
@@ -36,8 +46,8 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,12 +58,12 @@ import org.slf4j.LoggerFactory;
  */
 abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
     private abstract static class AbstractLocal extends ProxyHistory {
-        private final DataTree dataTree;
+        private final ReadOnlyDataTree dataTree;
 
         AbstractLocal(final AbstractClientHistory parent, final AbstractClientConnection<ShardBackendInfo> connection,
-            final LocalHistoryIdentifier identifier, final DataTree dataTree) {
+            final LocalHistoryIdentifier identifier, final ReadOnlyDataTree dataTree) {
             super(parent, connection, identifier);
-            this.dataTree = Preconditions.checkNotNull(dataTree);
+            this.dataTree = requireNonNull(dataTree);
         }
 
         final DataTreeSnapshot takeSnapshot() {
@@ -79,14 +89,14 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
         private volatile LocalReadWriteProxyTransaction lastSealed;
 
         Local(final AbstractClientHistory parent, final AbstractClientConnection<ShardBackendInfo> connection,
-            final LocalHistoryIdentifier identifier, final DataTree dataTree) {
+            final LocalHistoryIdentifier identifier, final ReadOnlyDataTree dataTree) {
             super(parent, connection, identifier, dataTree);
         }
 
         @Override
         AbstractProxyTransaction doCreateTransactionProxy(final AbstractClientConnection<ShardBackendInfo> connection,
                 final TransactionIdentifier txId, final boolean snapshotOnly, final boolean isDone) {
-            Preconditions.checkState(lastOpen == null, "Proxy %s has %s currently open", this, lastOpen);
+            checkState(lastOpen == null, "Proxy %s has %s currently open", this, lastOpen);
 
             if (isDone) {
                 // Done transactions do not register on our radar on should not have any state associated.
@@ -126,7 +136,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
 
         @Override
         void onTransactionCompleted(final AbstractProxyTransaction tx) {
-            Verify.verify(tx instanceof LocalProxyTransaction);
+            verify(tx instanceof LocalProxyTransaction, "Unexpected transaction %s", tx);
             if (tx instanceof LocalReadWriteProxyTransaction
                     && LAST_SEALED_UPDATER.compareAndSet(this, (LocalReadWriteProxyTransaction) tx, null)) {
                 LOG.debug("Completed last sealed transaction {}", tx);
@@ -135,7 +145,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
 
         @Override
         void onTransactionSealed(final AbstractProxyTransaction tx) {
-            Preconditions.checkState(tx.equals(lastOpen));
+            checkState(tx.equals(lastOpen));
             lastSealed = lastOpen;
             lastOpen = null;
         }
@@ -143,7 +153,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
 
     private static final class LocalSingle extends AbstractLocal {
         LocalSingle(final AbstractClientHistory parent, final AbstractClientConnection<ShardBackendInfo> connection,
-            final LocalHistoryIdentifier identifier, final DataTree dataTree) {
+            final LocalHistoryIdentifier identifier, final ReadOnlyDataTree dataTree) {
             super(parent, connection, identifier, dataTree);
         }
 
@@ -216,7 +226,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
             return identifier;
         }
 
-        @GuardedBy("lock")
+        @Holding("lock")
         @Override
         void replayRequests(final Collection<ConnectionEntry> previousEntries) {
             // First look for our Create message
@@ -225,7 +235,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
                 final ConnectionEntry e = it.next();
                 final Request<?, ?> req = e.getRequest();
                 if (identifier.equals(req.getTarget())) {
-                    Verify.verify(req instanceof LocalHistoryRequest);
+                    verify(req instanceof LocalHistoryRequest, "Unexpected request %s", req);
                     if (req instanceof CreateLocalHistoryRequest) {
                         successor.connection.enqueueRequest(req, e.getCallback(), e.getEnqueuedTicks());
                         it.remove();
@@ -239,13 +249,21 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
                 t.replayMessages(successor, previousEntries);
             }
 
+            // Forward any skipped transactions
+            final var local = skippedTransactions;
+            if (local != null) {
+                LOG.debug("{} forwarding skipped transactions towards successor {}", identifier, successor);
+                successor.skipTransactions(local);
+                skippedTransactions = null;
+            }
+
             // Now look for any finalizing messages
             it = previousEntries.iterator();
             while (it.hasNext()) {
                 final ConnectionEntry e  = it.next();
                 final Request<?, ?> req = e.getRequest();
                 if (identifier.equals(req.getTarget())) {
-                    Verify.verify(req instanceof LocalHistoryRequest);
+                    verify(req instanceof LocalHistoryRequest, "Unexpected request %s", req);
                     if (req instanceof DestroyLocalHistoryRequest) {
                         successor.connection.enqueueRequest(req, e.getCallback(), e.getEnqueuedTicks());
                         it.remove();
@@ -255,10 +273,10 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
             }
         }
 
-        @GuardedBy("lock")
+        @Holding("lock")
         @Override
         ProxyHistory finishReconnect() {
-            final ProxyHistory ret = Verify.verifyNotNull(successor);
+            final ProxyHistory ret = verifyNotNull(successor);
 
             for (AbstractProxyTransaction t : proxies.values()) {
                 t.finishReconnect();
@@ -316,38 +334,59 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
     private static final Logger LOG = LoggerFactory.getLogger(ProxyHistory.class);
 
     private final Lock lock = new ReentrantLock();
-    private final LocalHistoryIdentifier identifier;
-    private final AbstractClientConnection<ShardBackendInfo> connection;
-    private final AbstractClientHistory parent;
+    private final @NonNull LocalHistoryIdentifier identifier;
+    private final @NonNull AbstractClientConnection<ShardBackendInfo> connection;
+    private final @NonNull AbstractClientHistory parent;
 
     @GuardedBy("lock")
     private final Map<TransactionIdentifier, AbstractProxyTransaction> proxies = new LinkedHashMap<>();
     @GuardedBy("lock")
     private ProxyHistory successor;
 
+    // List of transaction identifiers which were allocated by our parent history, but did not touch our shard. Each of
+    // these represents a hole in otherwise-contiguous allocation of transactionIds. These holes are problematic, as
+    // each of them prevents LeaderFrontendState.purgedHistories from coalescing, leading to a gradual heap exhaustion.
+    //
+    // <p>
+    // We keep these in an ArrayList for fast insertion, as that happens when we are otherwise idle. We translate these
+    // into purge requests when:
+    // - we are about to allocate a new transaction
+    // - we get a successor proxy
+    // - the list grows unreasonably long
+    //
+    // TODO: we are tracking entire TransactionIdentifiers, but really only need to track the longs. Do that once we
+    //       have a {@code List<long>}.
+    // FIXME: this is not tuneable, but perhaps should be
+    // FIXME: default value deserves some explanation -- this affects depth of an RB Tree on the receiving end.
+    private static final int PURGE_SKIPPED_TXID_THRESHOLD = 256;
+
+    @GuardedBy("lock")
+    private volatile List<TransactionIdentifier> skippedTransactions;
+
     private ProxyHistory(final AbstractClientHistory parent,
             final AbstractClientConnection<ShardBackendInfo> connection, final LocalHistoryIdentifier identifier) {
-        this.parent = Preconditions.checkNotNull(parent);
-        this.connection = Preconditions.checkNotNull(connection);
-        this.identifier = Preconditions.checkNotNull(identifier);
+        this.parent = requireNonNull(parent);
+        this.connection = requireNonNull(connection);
+        this.identifier = requireNonNull(identifier);
     }
 
     static ProxyHistory createClient(final AbstractClientHistory parent,
             final AbstractClientConnection<ShardBackendInfo> connection, final LocalHistoryIdentifier identifier) {
-        final Optional<DataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
-        return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.get())
+        final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
+        return dataTree.isPresent() ? new Local(parent, connection, identifier, dataTree.orElseThrow())
              : new Remote(parent, connection, identifier);
     }
 
     static ProxyHistory createSingle(final AbstractClientHistory parent,
             final AbstractClientConnection<ShardBackendInfo> connection,
             final LocalHistoryIdentifier identifier) {
-        final Optional<DataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
-        return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.get())
+        final Optional<ReadOnlyDataTree> dataTree = connection.getBackendInfo().flatMap(ShardBackendInfo::getDataTree);
+        return dataTree.isPresent() ? new LocalSingle(parent, connection, identifier, dataTree.orElseThrow())
              : new RemoteSingle(parent, connection, identifier);
     }
 
     @Override
+    // Non-final for mocking
     public LocalHistoryIdentifier getIdentifier() {
         return identifier;
     }
@@ -373,6 +412,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
         return createTransactionProxy(txId, snapshotOnly, false);
     }
 
+    // Non-final for mocking
     AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier txId, final boolean snapshotOnly,
             final boolean isDone) {
         lock.lock();
@@ -391,6 +431,86 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
         }
     }
 
+    final void skipTransaction(final TransactionIdentifier txId) {
+        lock.lock();
+        try {
+            if (successor != null) {
+                successor.skipTransaction(txId);
+                return;
+            }
+
+            var local = skippedTransactions;
+            if (local == null) {
+                skippedTransactions = local = new ArrayList<>();
+            }
+            local.add(txId);
+            LOG.debug("Recorded skipped transaction {}", txId);
+            skipIfNeeded(local);
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    @Holding("lock")
+    private void skipIfNeeded(final List<TransactionIdentifier> current) {
+        if (current.size() >= PURGE_SKIPPED_TXID_THRESHOLD) {
+            skippedTransactions = null;
+            doSkipTransactions(current);
+        }
+    }
+
+    private void skipTransactions(final List<TransactionIdentifier> toSkip) {
+        lock.lock();
+        try {
+            if (successor != null) {
+                successor.skipTransactions(toSkip);
+                return;
+            }
+
+            var local = skippedTransactions;
+            if (local != null) {
+                local.addAll(toSkip);
+            } else {
+                skippedTransactions = local = toSkip;
+            }
+            skipIfNeeded(local);
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    private void skipTransactions() {
+        var local = skippedTransactions;
+        if (local != null) {
+            lock.lock();
+            try {
+                local = skippedTransactions;
+                if (local != null && successor == null) {
+                    skippedTransactions = null;
+                    doSkipTransactions(local);
+                }
+            } finally {
+                lock.unlock();
+            }
+        }
+    }
+
+    @Holding("lock")
+    private void doSkipTransactions(final List<TransactionIdentifier> toSkip) {
+        final var txIds = toSkip.stream()
+            .mapToLong(TransactionIdentifier::getTransactionId)
+            .distinct()
+            .sorted()
+            .mapToObj(UnsignedLong::fromLongBits)
+            .collect(ImmutableList.toImmutableList());
+
+        LOG.debug("Proxy {} skipping transactions {}", this, txIds);
+        connection.enqueueRequest(new SkipTransactionsRequest(new TransactionIdentifier(identifier,
+            txIds.get(0).longValue()), 0, localActor(),txIds.subList(1, txIds.size())), resp -> {
+                LOG.debug("Proxy {} confirmed transaction skip", this);
+            }, connection.currentTime());
+    }
+
     final void abortTransaction(final AbstractProxyTransaction tx) {
         lock.lock();
         try {
@@ -413,7 +533,7 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
         }
     }
 
-    void purgeTransaction(final AbstractProxyTransaction tx) {
+    final void purgeTransaction(final AbstractProxyTransaction tx) {
         lock.lock();
         try {
             proxies.remove(tx.getIdentifier());
@@ -441,23 +561,26 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
 
     final void enqueueRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback,
             final long enqueuedTicks) {
+        skipTransactions();
         connection.enqueueRequest(request, callback, enqueuedTicks);
     }
 
     final void sendRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
+        skipTransactions();
         connection.sendRequest(request, callback);
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     @SuppressWarnings("checkstyle:hiddenField")
     abstract AbstractProxyTransaction doCreateTransactionProxy(AbstractClientConnection<ShardBackendInfo> connection,
             TransactionIdentifier txId, boolean snapshotOnly, boolean isDone);
 
+    @Holding("lock")
     @SuppressWarnings("checkstyle:hiddenField")
     abstract ProxyHistory createSuccessor(AbstractClientConnection<ShardBackendInfo> connection);
 
     @SuppressFBWarnings(value = "UL_UNRELEASED_LOCK", justification = "Lock is released asynchronously via the cohort")
-    ProxyReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConnection) {
+    final ProxyReconnectCohort startReconnect(final ConnectedClientConnection<ShardBackendInfo> newConnection) {
         lock.lock();
         if (successor != null) {
             lock.unlock();
@@ -491,16 +614,17 @@ abstract class ProxyHistory implements Identifiable<LocalHistoryIdentifier> {
         LOG.debug("Proxy {} purge completed with {}", this, response);
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     void onTransactionAborted(final AbstractProxyTransaction tx) {
         // No-op for most implementations
     }
 
-    @GuardedBy("lock")
+    @Holding("lock")
     void onTransactionCompleted(final AbstractProxyTransaction tx) {
         // No-op for most implementations
     }
 
+    @Holding("lock")
     void onTransactionSealed(final AbstractProxyTransaction tx) {
         // No-op on most implementations
     }
index d36218c19e13a5eae47d10ef9bbf22ed9061ab5e..946e3341fd8f7778fdbf940299deb72112a61284 100644 (file)
@@ -13,8 +13,8 @@ import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.function.Consumer;
-import javax.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.AbstractLocalTransactionRequest;
@@ -49,7 +49,7 @@ import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -64,18 +64,14 @@ import org.slf4j.LoggerFactory;
  * <p>
  * This class is not safe to access from multiple application threads, as is usual for transactions. Its internal state
  * transitions based on backend responses are thread-safe.
- *
- * @author Robert Varga
  */
 final class RemoteProxyTransaction extends AbstractProxyTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteProxyTransaction.class);
 
-    // FIXME: make this tuneable
-    private static final int REQUEST_MAX_MODIFICATIONS = 1000;
-
     private final ModifyTransactionRequestBuilder builder;
     private final boolean sendReadyOnSeal;
     private final boolean snapshotOnly;
+    private final int maxModifications;
 
     private boolean builderBusy;
 
@@ -87,6 +83,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         this.snapshotOnly = snapshotOnly;
         this.sendReadyOnSeal = sendReadyOnSeal;
         builder = new ModifyTransactionRequestBuilder(identifier, localActor());
+        maxModifications = parent.parent().actorUtils().getDatastoreContext().getShardBatchedModificationCount();
     }
 
     @Override
@@ -101,17 +98,17 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
 
     @Override
     void doDelete(final YangInstanceIdentifier path) {
-        appendModification(new TransactionDelete(path), Optional.empty());
+        appendModification(new TransactionDelete(path), OptionalLong.empty());
     }
 
     @Override
-    void doMerge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        appendModification(new TransactionMerge(path, data), Optional.empty());
+    void doMerge(final YangInstanceIdentifier path, final NormalizedNode data) {
+        appendModification(new TransactionMerge(path, data), OptionalLong.empty());
     }
 
     @Override
-    void doWrite(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        appendModification(new TransactionWrite(path, data), Optional.empty());
+    void doWrite(final YangInstanceIdentifier path, final NormalizedNode data) {
+        appendModification(new TransactionWrite(path, data), OptionalLong.empty());
     }
 
     private <T> FluentFuture<T> sendReadRequest(final AbstractReadTransactionRequest<?> request,
@@ -137,8 +134,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     @Override
-    FluentFuture<Optional<NormalizedNode<?, ?>>> doRead(final YangInstanceIdentifier path) {
-        final SettableFuture<Optional<NormalizedNode<?, ?>>> future = SettableFuture.create();
+    FluentFuture<Optional<NormalizedNode>> doRead(final YangInstanceIdentifier path) {
+        final SettableFuture<Optional<NormalizedNode>> future = SettableFuture.create();
         return sendReadRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(), path,
             isSnapshotOnly()), t -> completeRead(path, future, t), future);
     }
@@ -151,40 +148,40 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     private void ensureFlushedBuider() {
-        ensureFlushedBuider(Optional.empty());
+        ensureFlushedBuider(OptionalLong.empty());
     }
 
-    private void ensureFlushedBuider(final Optional<Long> enqueuedTicks) {
+    private void ensureFlushedBuider(final OptionalLong enqueuedTicks) {
         if (builderBusy) {
             flushBuilder(enqueuedTicks);
         }
     }
 
-    private void flushBuilder(final Optional<Long> enqueuedTicks) {
+    private void flushBuilder(final OptionalLong enqueuedTicks) {
         final ModifyTransactionRequest request = builder.build();
         builderBusy = false;
 
         sendModification(request, enqueuedTicks);
     }
 
-    private void sendModification(final TransactionRequest<?> request, final Optional<Long> enqueuedTicks) {
+    private void sendModification(final TransactionRequest<?> request, final OptionalLong enqueuedTicks) {
         if (enqueuedTicks.isPresent()) {
-            enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.get().longValue());
+            enqueueRequest(request, response -> completeModify(request, response), enqueuedTicks.orElseThrow());
         } else {
             sendRequest(request, response -> completeModify(request, response));
         }
     }
 
     private void appendModification(final TransactionModification modification) {
-        appendModification(modification, Optional.empty());
+        appendModification(modification, OptionalLong.empty());
     }
 
-    private void appendModification(final TransactionModification modification, final Optional<Long> enqueuedTicks) {
+    private void appendModification(final TransactionModification modification, final OptionalLong enqueuedTicks) {
         if (operationFailure == null) {
             ensureInitializedBuilder();
 
             builder.addModification(modification);
-            if (builder.size() >= REQUEST_MAX_MODIFICATIONS) {
+            if (builder.size() >= maxModifications) {
                 flushBuilder(enqueuedTicks);
             }
         } else {
@@ -205,8 +202,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
 
     private Exception recordFailedResponse(final Response<?, ?> response) {
         final Exception failure;
-        if (response instanceof RequestFailure) {
-            final RequestException cause = ((RequestFailure<?, ?>) response).getCause();
+        if (response instanceof RequestFailure<?, ?> requestFailure) {
+            final RequestException cause = requestFailure.getCause();
             failure = cause instanceof RequestTimeoutException
                     ? new DataStoreUnavailableException(cause.getMessage(), cause) : cause;
         } else {
@@ -230,8 +227,8 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             final Response<?, ?> response) {
         LOG.debug("Exists request for {} completed with {}", path, response);
 
-        if (response instanceof ExistsTransactionSuccess) {
-            future.set(((ExistsTransactionSuccess) response).getExists());
+        if (response instanceof ExistsTransactionSuccess success) {
+            future.set(success.getExists());
         } else {
             failReadFuture(future, "Error executing exists request for path " + path, response);
         }
@@ -239,12 +236,12 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         recordFinishedRequest(response);
     }
 
-    private void completeRead(final YangInstanceIdentifier path,
-            final SettableFuture<Optional<NormalizedNode<?, ?>>> future, final Response<?, ?> response) {
+    private void completeRead(final YangInstanceIdentifier path, final SettableFuture<Optional<NormalizedNode>> future,
+            final Response<?, ?> response) {
         LOG.debug("Read request for {} completed with {}", path, response);
 
-        if (response instanceof ReadTransactionSuccess) {
-            future.set(((ReadTransactionSuccess) response).getData());
+        if (response instanceof ReadTransactionSuccess success) {
+            future.set(success.getData());
         } else {
             failReadFuture(future, "Error reading data for path " + path, response);
         }
@@ -276,7 +273,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     @Override
-    boolean sealAndSend(final Optional<Long> enqueuedTicks) {
+    boolean sealAndSend(final OptionalLong enqueuedTicks) {
         if (sendReadyOnSeal) {
             ensureInitializedBuilder();
             builder.setReady();
@@ -303,19 +300,19 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     void handleForwardedRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback) {
-        if (request instanceof ModifyTransactionRequest) {
-            handleForwardedModifyTransactionRequest(callback, (ModifyTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            handleForwardedModifyTransactionRequest(callback, modifyRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
             ensureFlushedBuider();
             sendRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                readRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     callback.accept(resp);
                 });
-        } else if (request instanceof ExistsTransactionRequest) {
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
             ensureFlushedBuider();
             sendRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                existsRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     callback.accept(resp);
                 });
@@ -336,7 +333,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback);
         } else {
-            throw new IllegalArgumentException("Unhandled request {}" + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -355,7 +352,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             }
 
             final TransactionRequest<?> tmp;
-            switch (maybeProto.get()) {
+            switch (maybeProto.orElseThrow()) {
                 case ABORT:
                     tmp = abortRequest();
                     sendRequest(tmp, resp -> {
@@ -385,7 +382,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
                     });
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
             }
         }
     }
@@ -399,28 +396,28 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     @Override
     void handleReplayedLocalRequest(final AbstractLocalTransactionRequest<?> request,
             final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
-        if (request instanceof CommitLocalTransactionRequest) {
-            replayLocalCommitRequest((CommitLocalTransactionRequest) request, callback, enqueuedTicks);
+        if (request instanceof CommitLocalTransactionRequest commitRequest) {
+            replayLocalCommitRequest(commitRequest, callback, enqueuedTicks);
         } else if (request instanceof AbortLocalTransactionRequest) {
             enqueueRequest(abortRequest(), callback, enqueuedTicks);
         } else {
-            throw new IllegalStateException("Unhandled request " + request);
+            throw unhandledRequest(request);
         }
     }
 
     private void replayLocalCommitRequest(final CommitLocalTransactionRequest request,
             final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
         final DataTreeModification mod = request.getModification();
-        final Optional<Long> optTicks = Optional.of(Long.valueOf(enqueuedTicks));
+        final OptionalLong optTicks = OptionalLong.of(enqueuedTicks);
 
         mod.applyToCursor(new AbstractDataTreeModificationCursor() {
             @Override
-            public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+            public void write(final PathArgument child, final NormalizedNode data) {
                 appendModification(new TransactionWrite(current().node(child), data), optTicks);
             }
 
             @Override
-            public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+            public void merge(final PathArgument child, final NormalizedNode data) {
                 appendModification(new TransactionMerge(current().node(child), data), optTicks);
             }
 
@@ -434,24 +431,24 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
     }
 
     @Override
-    void handleReplayedRemoteRequest(final TransactionRequest<?> request,
-            @Nullable final Consumer<Response<?, ?>> callback, final long enqueuedTicks) {
+    void handleReplayedRemoteRequest(final TransactionRequest<?> request, final Consumer<Response<?, ?>> callback,
+            final long enqueuedTicks) {
         final Consumer<Response<?, ?>> cb = callback != null ? callback : resp -> { /* NOOP */ };
-        final Optional<Long> optTicks = Optional.of(Long.valueOf(enqueuedTicks));
+        final OptionalLong optTicks = OptionalLong.of(enqueuedTicks);
 
-        if (request instanceof ModifyTransactionRequest) {
-            handleReplayedModifyTransactionRequest(enqueuedTicks, cb, (ModifyTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            handleReplayedModifyTransactionRequest(enqueuedTicks, cb, modifyRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new ReadTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ReadTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                readRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     cb.accept(resp);
                 }, enqueuedTicks);
-        } else if (request instanceof ExistsTransactionRequest) {
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new ExistsTransactionRequest(getIdentifier(), nextSequence(), localActor(),
-                ((ExistsTransactionRequest) request).getPath(), isSnapshotOnly()), resp -> {
+                existsRequest.getPath(), isSnapshotOnly()), resp -> {
                     recordFinishedRequest(resp);
                     cb.accept(resp);
                 }, enqueuedTicks);
@@ -472,14 +469,13 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             enqueueDoAbort(callback, enqueuedTicks);
         } else if (request instanceof TransactionPurgeRequest) {
             enqueuePurge(callback, enqueuedTicks);
-        } else if (request instanceof IncrementTransactionSequenceRequest) {
-            final IncrementTransactionSequenceRequest req = (IncrementTransactionSequenceRequest) request;
+        } else if (request instanceof IncrementTransactionSequenceRequest req) {
             ensureFlushedBuider(optTicks);
             enqueueRequest(new IncrementTransactionSequenceRequest(getIdentifier(), nextSequence(), localActor(),
                 snapshotOnly, req.getIncrement()), callback, enqueuedTicks);
             incrementSequence(req.getIncrement());
         } else {
-            throw new IllegalArgumentException("Unhandled request {}" + request);
+            throw unhandledRequest(request);
         }
     }
 
@@ -496,7 +492,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
             }
 
             final TransactionRequest<?> tmp;
-            switch (maybeProto.get()) {
+            switch (maybeProto.orElseThrow()) {
                 case ABORT:
                     tmp = abortRequest();
                     enqueueRequest(tmp, resp -> {
@@ -526,7 +522,7 @@ final class RemoteProxyTransaction extends AbstractProxyTransaction {
                     }, enqueuedTicks);
                     break;
                 default:
-                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.get());
+                    throw new IllegalArgumentException("Unhandled protocol " + maybeProto.orElseThrow());
             }
         }
     }
index fda7fc6122fd5e828266db32b0d3ed81d0c76a50..95ce87ca9516036d4d4eca4c88aa76c8af540c0f 100644 (file)
@@ -7,45 +7,45 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
 import com.google.common.primitives.UnsignedLong;
 import java.util.Optional;
-import javax.annotation.concurrent.ThreadSafe;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.BackendInfo;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Combined backend tracking. Aside from usual {@link BackendInfo}, this object also tracks the cookie assigned
- * to the shard. This assignment remains constant for as long as the client is not restarted.
+ * to the shard. This assignment remains constant for as long as the client is not restarted. This class is thread-safe.
  *
  * @author Robert Varga
  */
-@ThreadSafe
 final class ShardBackendInfo extends BackendInfo {
-    private final Optional<DataTree> dataTree;
+    private final Optional<ReadOnlyDataTree> dataTree;
     private final UnsignedLong cookie;
 
     ShardBackendInfo(final ActorRef actor, final long sessionId, final ABIVersion version, final String shardName,
-        final UnsignedLong cookie, final Optional<DataTree> dataTree, final int maxMessages) {
+        final UnsignedLong cookie, final Optional<ReadOnlyDataTree> dataTree, final int maxMessages) {
         super(actor, shardName, sessionId, version, maxMessages);
-        this.cookie = Preconditions.checkNotNull(cookie);
-        this.dataTree = Preconditions.checkNotNull(dataTree);
+        this.cookie = requireNonNull(cookie);
+        this.dataTree = requireNonNull(dataTree);
     }
 
     UnsignedLong getCookie() {
         return cookie;
     }
 
-    Optional<DataTree> getDataTree() {
+    Optional<ReadOnlyDataTree> getDataTree() {
         return dataTree;
     }
 
     LocalHistoryIdentifier brandHistory(final LocalHistoryIdentifier id) {
-        Preconditions.checkArgument(id.getCookie() == 0, "History %s is already branded", id);
+        checkArgument(id.getCookie() == 0, "History %s is already branded", id);
         return new LocalHistoryIdentifier(id.getClientId(), id.getHistoryId(), cookie.longValue());
     }
 
index de67400f7a71bae8c41102ceee33e80302a20e7c..b19de5a4ef9cd678394e900bc6e59b408bb715a1 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 import static java.util.Objects.requireNonNull;
 
 import akka.actor.Props;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.client.AbstractClientActor;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
@@ -37,7 +37,7 @@ public final class SimpleDataStoreClientActor extends AbstractDataStoreClientAct
         return new SimpleDataStoreClientBehavior(context, actorUtils, shardName);
     }
 
-    public static Props props(@Nonnull final MemberName memberName, @Nonnull final String storeName,
+    public static Props props(final @NonNull MemberName memberName, final @NonNull String storeName,
             final ActorUtils actorUtils, final String shardName) {
         final String name = "datastore-" + storeName;
         final FrontendIdentifier frontendId = FrontendIdentifier.create(memberName, FrontendType.forName(name));
index aaaa88e8b6b2494e535ecab198f0f82c590f5ebd..984a4e4f0c81158fe90b404600e6ab8f9efec8ef 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import java.util.stream.Stream;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -18,7 +19,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  */
 final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavior {
     // Pre-boxed instance
-    private static final Long ZERO = Long.valueOf(0);
+    private static final Long ZERO = 0L;
 
     private SimpleDataStoreClientBehavior(final ClientActorContext context,
             final SimpleShardBackendResolver resolver) {
@@ -34,4 +35,9 @@ final class SimpleDataStoreClientBehavior extends AbstractDataStoreClientBehavio
     Long resolveShardForPath(final YangInstanceIdentifier path) {
         return ZERO;
     }
+
+    @Override
+    Stream<Long> resolveAllShards() {
+        return Stream.of(ZERO);
+    }
 }
index e086f08630240d949375eac31a07bb7d2a0a762d..012e068a81bcbb23fba18f8163057d744b0f0d6c 100644 (file)
@@ -11,7 +11,6 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static java.util.Objects.requireNonNull;
 
 import java.util.concurrent.CompletionStage;
-import javax.annotation.concurrent.ThreadSafe;
 import org.opendaylight.controller.cluster.access.client.BackendInfoResolver;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@@ -21,11 +20,11 @@ import org.slf4j.LoggerFactory;
 /**
  * {@link BackendInfoResolver} implementation for static shard configuration based on ShardManager. Unlike the full
  * {@link ModuleShardBackendResolver}, this resolver is used in situations where the client corresponds exactly to one
- * backend shard, e.g. there is only one fixed cookie assigned and the operation path is not consulted at all.
+ * backend shard, e.g. there is only one fixed cookie assigned and the operation path is not consulted at all. This
+ * class is thread-safe.
  *
  * @author Robert Varga
  */
-@ThreadSafe
 final class SimpleShardBackendResolver extends AbstractShardBackendResolver {
     private static final Logger LOG = LoggerFactory.getLogger(SimpleShardBackendResolver.class);
 
index 01298dfabfcef1c37e0eaeb0e8ded68a6e975b68..6433b6b5878be65d11507dbfd14d25d73fbe7eb5 100644 (file)
@@ -7,14 +7,17 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.AbstractFuture;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
 
 /**
  * An {@link AbstractFuture} implementation which requires a certain number of votes before it completes. If all votes
@@ -43,11 +46,10 @@ class VotingFuture<T> extends AbstractFuture<T> {
     private volatile int neededVotes;
 
     VotingFuture(final T result, final int requiredVotes) {
-        Preconditions.checkArgument(requiredVotes > 0);
+        this.result = requireNonNull(result);
+        checkArgument(requiredVotes > 0);
         this.neededVotes = requiredVotes;
 
-        // null is okay to allow Void type
-        this.result = result;
     }
 
     void voteYes() {
@@ -69,11 +71,11 @@ class VotingFuture<T> extends AbstractFuture<T> {
 
     private boolean castVote() {
         final int votes = VOTES_UPDATER.decrementAndGet(this);
-        Verify.verify(votes >= 0);
+        verify(votes >= 0);
         return votes == 0;
     }
 
-    @GuardedBy("failures")
+    @Holding("failures")
     private void resolveResult() {
         final Iterator<Throwable> it = failures.iterator();
         if (!it.hasNext()) {
index 55108e0ef9cd720895b04d560145022b3cdae703..05edad2cf1f72867110f5a3798a6570365408682 100644 (file)
@@ -13,64 +13,62 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
+import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.concurrent.CountDownLatch;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
 import org.opendaylight.controller.cluster.databroker.actors.dds.DistributedDataStoreClientActor;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerCreator;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.Duration;
 
 /**
  * Base implementation of a distributed DOMStore.
  */
-public abstract class AbstractDataStore implements DistributedDataStoreInterface, SchemaContextListener,
-        DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher,
-        DOMDataTreeCommitCohortRegistry, AutoCloseable {
-
+public abstract class AbstractDataStore implements DistributedDataStoreInterface,
+        DatastoreContextPropertiesUpdater.Listener, DOMStoreTreeChangePublisher, CommitCohortExtension,
+        AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(AbstractDataStore.class);
 
-    private static final long READY_WAIT_FACTOR = 3;
-
+    private final SettableFuture<Empty> readinessFuture = SettableFuture.create();
+    private final ClientIdentifier identifier;
+    private final DataStoreClient client;
     private final ActorUtils actorUtils;
-    private final long waitTillReadyTimeInMillis;
 
     private AutoCloseable closeable;
-
     private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
-
     private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
 
-    private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
-
-    private final ClientIdentifier identifier;
-    private final DataStoreClient client;
-
     @SuppressWarnings("checkstyle:IllegalCatch")
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Testing overrides")
     protected AbstractDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
             final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
             final DatastoreSnapshot restoreFromSnapshot) {
@@ -89,9 +87,9 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
         PrimaryShardInfoFutureCache primaryShardInfoCache = new PrimaryShardInfoFutureCache();
 
-        ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration)
+        AbstractShardManagerCreator<?> creator = getShardManagerCreator().cluster(cluster).configuration(configuration)
                 .datastoreContextFactory(datastoreContextFactory)
-                .waitTillReadyCountDownLatch(waitTillReadyCountDownLatch)
+                .readinessFuture(readinessFuture)
                 .primaryShardInfoCache(primaryShardInfoCache)
                 .restoreFromSnapshot(restoreFromSnapshot)
                 .distributedDataStore(this);
@@ -109,15 +107,12 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
             LOG.error("Failed to get actor for {}", clientProps, e);
             clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
             Throwables.throwIfUnchecked(e);
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
 
         identifier = client.getIdentifier();
         LOG.debug("Distributed data store client {} started", identifier);
 
-        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
-                .duration().toMillis() * READY_WAIT_FACTOR;
-
         datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(
                 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
         datastoreConfigMXBean.setContext(datastoreContextFactory.getBaseDatastoreContext());
@@ -129,76 +124,89 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     }
 
     @VisibleForTesting
-    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
+    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
+                                final DataStoreClient clientActor) {
         this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
-        this.client = null;
+        client = clientActor;
         this.identifier = requireNonNull(identifier);
-        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
-                .duration().toMillis() * READY_WAIT_FACTOR;
     }
 
     @VisibleForTesting
-    protected AbstractDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
-                                final DataStoreClient clientActor) {
-        this.actorUtils = requireNonNull(actorUtils, "actorContext should not be null");
-        this.client = clientActor;
-        this.identifier = requireNonNull(identifier);
-        this.waitTillReadyTimeInMillis = actorUtils.getDatastoreContext().getShardLeaderElectionTimeout()
-                .duration().toMillis() * READY_WAIT_FACTOR;
+    protected AbstractShardManagerCreator<?> getShardManagerCreator() {
+        return new ShardManagerCreator();
     }
 
     protected final DataStoreClient getClient() {
         return client;
     }
 
-    final ClientIdentifier getIdentifier() {
-        return identifier;
-    }
-
     public void setCloseable(final AutoCloseable closeable) {
         this.closeable = closeable;
     }
 
     @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener) {
+    public final Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return registerTreeChangeListener(treeId, listener, true);
+    }
+
+    private @NonNull Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener, final boolean clustered) {
         requireNonNull(treeId, "treeId should not be null");
         requireNonNull(listener, "listener should not be null");
 
-        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
-        LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
+        /*
+         * We need to potentially deal with multi-shard composition for registration targeting the root of the data
+         * store. If that is the case, we delegate to a more complicated setup invol
+         */
+        if (treeId.isEmpty()) {
+            // User is targeting root of the datastore. If there is more than one shard, we have to register with them
+            // all and perform data composition.
+            final var shardNames = actorUtils.getConfiguration().getAllShardNames();
+            if (shardNames.size() > 1) {
+                if (!clustered) {
+                    throw new IllegalArgumentException(
+                        "Cannot listen on root without non-clustered listener " + listener);
+                }
+                return new RootDataTreeChangeListenerProxy<>(actorUtils, listener, shardNames);
+            }
+        }
 
-        final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, listener, treeId);
-        listenerRegistrationProxy.init(shardName);
+        final var shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
+        LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
 
-        return listenerRegistrationProxy;
+        return DataTreeChangeListenerProxy.of(actorUtils, listener, treeId, clustered, shardName);
     }
 
+    @Override
+    @Deprecated(since = "9.0.0", forRemoval = true)
+    public final Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return registerTreeChangeListener(treeId, listener, false);
+    }
 
     @Override
-    public <C extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<C> registerCommitCohort(
-            final DOMDataTreeIdentifier subtree, final C cohort) {
-        YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").getRootIdentifier();
+    // Non-final for testing
+    public Registration registerCommitCohort(final DOMDataTreeIdentifier subtree,
+            final DOMDataTreeCommitCohort cohort) {
+        YangInstanceIdentifier treeId = requireNonNull(subtree, "subtree should not be null").path();
         requireNonNull(cohort, "listener should not be null");
 
 
         final String shardName = actorUtils.getShardStrategyFactory().getStrategy(treeId).findShard(treeId);
         LOG.debug("Registering cohort: {} for tree: {} shard: {}", cohort, treeId, shardName);
 
-        DataTreeCohortRegistrationProxy<C> cohortProxy =
-                new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
+        final var cohortProxy = new DataTreeCohortRegistrationProxy<>(actorUtils, subtree, cohort);
         cohortProxy.init(shardName);
         return cohortProxy;
     }
 
-    @Override
-    public void onGlobalContextUpdated(final SchemaContext schemaContext) {
-        actorUtils.setSchemaContext(schemaContext);
+    public void onModelContextUpdated(final EffectiveModelContext newModelContext) {
+        actorUtils.setSchemaContext(newModelContext);
     }
 
     @Override
-    public void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
+    public final void onDatastoreContextUpdated(final DatastoreContextFactory contextFactory) {
         LOG.info("DatastoreContext updated for data store {}", actorUtils.getDataStoreName());
 
         actorUtils.setDatastoreContext(contextFactory);
@@ -207,7 +215,7 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
 
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
+    public final void close() {
         LOG.info("Closing data store {}", identifier);
 
         if (datastoreConfigMXBean != null) {
@@ -233,28 +241,69 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
     }
 
     @Override
-    public ActorUtils getActorUtils() {
+    public final ActorUtils getActorUtils() {
         return actorUtils;
     }
 
-    public void waitTillReady() {
+    // TODO: consider removing this in favor of awaitReadiness()
+    @Deprecated
+    public final void waitTillReady() {
         LOG.info("Beginning to wait for data store to become ready : {}", identifier);
 
+        final Duration toWait = initialSettleTime();
         try {
-            if (waitTillReadyCountDownLatch.await(waitTillReadyTimeInMillis, TimeUnit.MILLISECONDS)) {
-                LOG.debug("Data store {} is now ready", identifier);
-            } else {
-                LOG.error("Shard leaders failed to settle in {} seconds, giving up",
-                        TimeUnit.MILLISECONDS.toSeconds(waitTillReadyTimeInMillis));
+            if (!awaitReadiness(toWait)) {
+                LOG.error("Shard leaders failed to settle in {}, giving up", toWait);
+                return;
             }
         } catch (InterruptedException e) {
             LOG.error("Interrupted while waiting for shards to settle", e);
+            return;
+        }
+
+        LOG.debug("Data store {} is now ready", identifier);
+    }
+
+    @Beta
+    @Deprecated
+    public final boolean awaitReadiness() throws InterruptedException {
+        return awaitReadiness(initialSettleTime());
+    }
+
+    @Beta
+    @Deprecated
+    public final boolean awaitReadiness(final Duration toWait) throws InterruptedException {
+        try {
+            if (toWait.isFinite()) {
+                try {
+                    readinessFuture.get(toWait.toNanos(), TimeUnit.NANOSECONDS);
+                } catch (TimeoutException e) {
+                    LOG.debug("Timed out waiting for shards to settle", e);
+                    return false;
+                }
+            } else {
+                readinessFuture.get();
+            }
+        } catch (ExecutionException e) {
+            LOG.warn("Unexpected readiness failure, assuming convergence", e);
+        }
+
+        return true;
+    }
+
+    @Beta
+    @Deprecated
+    public final void awaitReadiness(final long timeout, final TimeUnit unit)
+            throws InterruptedException, TimeoutException {
+        if (!awaitReadiness(Duration.create(timeout, unit))) {
+            throw new TimeoutException("Shard leaders failed to settle");
         }
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private static ActorRef createShardManager(final ActorSystem actorSystem, final ShardManagerCreator creator,
-            final String shardDispatcher, final String shardManagerId) {
+    private static ActorRef createShardManager(final ActorSystem actorSystem,
+            final AbstractShardManagerCreator<?> creator, final String shardDispatcher,
+            final String shardManagerId) {
         Exception lastException = null;
 
         for (int i = 0; i < 100; i++) {
@@ -271,48 +320,47 @@ public abstract class AbstractDataStore implements DistributedDataStoreInterface
         throw new IllegalStateException("Failed to create Shard Manager", lastException);
     }
 
-    @VisibleForTesting
-    public CountDownLatch getWaitTillReadyCountDownLatch() {
-        return waitTillReadyCountDownLatch;
+    /**
+     * Future which completes when all shards settle for the first time.
+     *
+     * @return A Listenable future.
+     */
+    public final ListenableFuture<?> initialSettleFuture() {
+        return readinessFuture;
     }
 
-    @SuppressWarnings("unchecked")
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerProxyListener(
-            final YangInstanceIdentifier shardLookup,
-            final YangInstanceIdentifier insideShard,
-            final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegate) {
+    @VisibleForTesting
+    public final SettableFuture<Empty> readinessFuture() {
+        return readinessFuture;
+    }
 
+    @Override
+    public final Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+            final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
         requireNonNull(shardLookup, "shardLookup should not be null");
         requireNonNull(insideShard, "insideShard should not be null");
         requireNonNull(delegate, "delegate should not be null");
 
-        final String shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
-        LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}",
-                delegate,shardLookup, shardName, insideShard);
+        final var shardName = actorUtils.getShardStrategyFactory().getStrategy(shardLookup).findShard(shardLookup);
+        LOG.debug("Registering tree listener: {} for tree: {} shard: {}, path inside shard: {}", delegate, shardLookup,
+            shardName, insideShard);
 
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> listenerRegistrationProxy =
-                new DataTreeChangeListenerProxy<>(actorUtils,
-                        // wrap this in the ClusteredDOMDataTreeChangeLister interface
-                        // since we always want clustered registration
-                        (ClusteredDOMDataTreeChangeListener) delegate::onDataTreeChanged, insideShard);
-        listenerRegistrationProxy.init(shardName);
+        return DataTreeChangeListenerProxy.of(actorUtils, new DOMDataTreeChangeListener() {
+            @Override
+            public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
+                delegate.onDataTreeChanged(changes);
+            }
 
-        return (ListenerRegistration<L>) listenerRegistrationProxy;
+            @Override
+            public void onInitialData() {
+                delegate.onInitialData();
+            }
+        }, insideShard, true, shardName);
     }
 
-    @SuppressWarnings("unchecked")
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerShardConfigListener(
-            final YangInstanceIdentifier internalPath,
-            final DOMDataTreeChangeListener delegate) {
-        requireNonNull(delegate, "delegate should not be null");
-
-        LOG.debug("Registering a listener for the configuration shard: {}", internalPath);
-
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, delegate, internalPath);
-        proxy.init(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
-
-        return (ListenerRegistration<L>) proxy;
+    private Duration initialSettleTime() {
+        final DatastoreContext context = actorUtils.getDatastoreContext();
+        final int multiplier = context.getInitialSettleTimeoutMultiplier();
+        return multiplier == 0 ? Duration.Inf() : context.getShardLeaderElectionTimeout().duration().$times(multiplier);
     }
-
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractDatastoreContextIntrospectorFactory.java
new file mode 100644 (file)
index 0000000..b44bf38
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.util.Map;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+
+abstract class AbstractDatastoreContextIntrospectorFactory implements DatastoreContextIntrospectorFactory {
+    @Override
+    public DatastoreContextIntrospector newInstance(final LogicalDatastoreType datastoreType,
+            final Map<String, Object> properties) {
+        final DatastoreContextIntrospector inst = newInstance(datastoreType);
+        inst.update(properties);
+        return inst;
+    }
+
+    @VisibleForTesting
+    final DatastoreContextIntrospector newInstance(final LogicalDatastoreType datastoreType) {
+        return newInstance(DatastoreContext.newBuilder()
+                .logicalStoreType(datastoreType)
+                .tempFileDirectory("./data")
+                .build());
+    }
+
+    @VisibleForTesting
+    final @NonNull DatastoreContextIntrospector newInstance(final DatastoreContext context) {
+        return new DatastoreContextIntrospector(context, (DataStorePropertiesContainer) serializer()
+            .fromNormalizedNode(YangInstanceIdentifier.of(DataStorePropertiesContainer.QNAME),
+                ImmutableNodes.newContainerBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(DataStorePropertiesContainer.QNAME))
+                .build())
+            .getValue());
+    }
+
+    abstract BindingNormalizedNodeSerializer serializer();
+}
index 072f8ede80c978987e78767f0df019f56efa6438..d00db5757eb342801e8a49c28fd5d513f714398c 100644 (file)
@@ -7,17 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
 import com.google.common.primitives.UnsignedLong;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Optional;
 import java.util.SortedSet;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.AbstractReadTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ClosedTransactionException;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
@@ -25,6 +25,8 @@ import org.opendaylight.controller.cluster.access.commands.DeadTransactionExcept
 import org.opendaylight.controller.cluster.access.commands.IncrementTransactionSequenceRequest;
 import org.opendaylight.controller.cluster.access.commands.LocalHistorySuccess;
 import org.opendaylight.controller.cluster.access.commands.OutOfOrderRequestException;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsRequest;
+import org.opendaylight.controller.cluster.access.commands.SkipTransactionsResponse;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeResponse;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
@@ -33,8 +35,9 @@ import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifie
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +51,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
     private static final Logger LOG = LoggerFactory.getLogger(AbstractFrontendHistory.class);
 
     private final Map<TransactionIdentifier, FrontendTransaction> transactions = new HashMap<>();
-    private final RangeSet<UnsignedLong> purgedTransactions;
+    private final MutableUnsignedLongSet purgedTransactions;
     private final String persistenceId;
     private final ShardDataTree tree;
 
@@ -59,11 +62,11 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
     private Map<UnsignedLong, Boolean> closedTransactions;
 
     AbstractFrontendHistory(final String persistenceId, final ShardDataTree tree,
-        final Map<UnsignedLong, Boolean> closedTransactions, final RangeSet<UnsignedLong> purgedTransactions) {
-        this.persistenceId = Preconditions.checkNotNull(persistenceId);
-        this.tree = Preconditions.checkNotNull(tree);
-        this.closedTransactions = Preconditions.checkNotNull(closedTransactions);
-        this.purgedTransactions = Preconditions.checkNotNull(purgedTransactions);
+            final Map<UnsignedLong, Boolean> closedTransactions, final MutableUnsignedLongSet purgedTransactions) {
+        this.persistenceId = requireNonNull(persistenceId);
+        this.tree = requireNonNull(tree);
+        this.closedTransactions = requireNonNull(closedTransactions);
+        this.purgedTransactions = requireNonNull(purgedTransactions);
     }
 
     final String persistenceId() {
@@ -74,22 +77,24 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
         return tree.readTime();
     }
 
-    @Nullable
-    final TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
+    final @Nullable TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
             final RequestEnvelope envelope, final long now) throws RequestException {
-        if (request instanceof TransactionPurgeRequest) {
-            return handleTransactionPurgeRequest(request, envelope, now);
+        if (request instanceof TransactionPurgeRequest purgeRequest) {
+            return handleTransactionPurgeRequest(purgeRequest, envelope, now);
+        } else if (request instanceof SkipTransactionsRequest skipRequest) {
+            return handleSkipTransactionsRequest(skipRequest, envelope, now);
         }
 
         final TransactionIdentifier id = request.getTarget();
-        final UnsignedLong ul = UnsignedLong.fromLongBits(id.getTransactionId());
-        if (purgedTransactions.contains(ul)) {
+        final long txidBits = id.getTransactionId();
+        if (purgedTransactions.contains(txidBits)) {
             LOG.warn("{}: Request {} is contained purged transactions {}", persistenceId, request, purgedTransactions);
-            throw new DeadTransactionException(purgedTransactions);
+            throw new DeadTransactionException(purgedTransactions.toRangeSet());
         }
-        final Boolean closed = closedTransactions.get(ul);
+
+        final Boolean closed = closedTransactions.get(UnsignedLong.fromLongBits(txidBits));
         if (closed != null) {
-            final boolean successful = closed.booleanValue();
+            final boolean successful = closed;
             LOG.debug("{}: Request {} refers to a {} transaction", persistenceId, request, successful ? "successful"
                     : "failed");
             throw new ClosedTransactionException(successful);
@@ -108,7 +113,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
         } else if (!(request instanceof IncrementTransactionSequenceRequest)) {
             final Optional<TransactionSuccess<?>> maybeReplay = tx.replaySequence(request.getSequence());
             if (maybeReplay.isPresent()) {
-                final TransactionSuccess<?> replay = maybeReplay.get();
+                final TransactionSuccess<?> replay = maybeReplay.orElseThrow();
                 LOG.debug("{}: envelope {} replaying response {}", persistenceId(), envelope, replay);
                 return replay;
             }
@@ -117,11 +122,11 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
         return tx.handleRequest(request, envelope, now);
     }
 
-    private TransactionSuccess<?> handleTransactionPurgeRequest(final TransactionRequest<?> request,
+    private TransactionPurgeResponse handleTransactionPurgeRequest(final TransactionPurgeRequest request,
             final RequestEnvelope envelope, final long now) {
         final TransactionIdentifier id = request.getTarget();
-        final UnsignedLong ul = UnsignedLong.fromLongBits(id.getTransactionId());
-        if (purgedTransactions.contains(ul)) {
+        final long txidBits = id.getTransactionId();
+        if (purgedTransactions.contains(txidBits)) {
             // Retransmitted purge request: nothing to do
             LOG.debug("{}: transaction {} already purged", persistenceId, id);
             return new TransactionPurgeResponse(id, request.getSequence());
@@ -129,6 +134,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
 
         // We perform two lookups instead of a straight remove, because once the map becomes empty we switch it
         // to an ImmutableMap, which does not allow remove().
+        final UnsignedLong ul = UnsignedLong.fromLongBits(txidBits);
         if (closedTransactions.containsKey(ul)) {
             tree.purgeTransaction(id, () -> {
                 closedTransactions.remove(ul);
@@ -136,7 +142,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
                     closedTransactions = ImmutableMap.of();
                 }
 
-                purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+                purgedTransactions.add(txidBits);
                 LOG.debug("{}: finished purging inherited transaction {}", persistenceId(), id);
                 envelope.sendSuccess(new TransactionPurgeResponse(id, request.getSequence()), readTime() - now);
             });
@@ -149,12 +155,12 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
             // purged transactions in one go. If it does, we warn about the situation and
             LOG.warn("{}: transaction {} not tracked in {}, but not present in active transactions", persistenceId,
                 id, purgedTransactions);
-            purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+            purgedTransactions.add(txidBits);
             return new TransactionPurgeResponse(id, request.getSequence());
         }
 
         tree.purgeTransaction(id, () -> {
-            purgedTransactions.add(Range.closedOpen(ul, UnsignedLong.ONE.plus(ul)));
+            purgedTransactions.add(txidBits);
             transactions.remove(id);
             LOG.debug("{}: finished purging transaction {}", persistenceId(), id);
             envelope.sendSuccess(new TransactionPurgeResponse(id, request.getSequence()), readTime() - now);
@@ -163,6 +169,43 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
         return null;
     }
 
+    private SkipTransactionsResponse handleSkipTransactionsRequest(final SkipTransactionsRequest request,
+            final RequestEnvelope envelope, final long now) {
+        final var first = request.getTarget();
+        final var others = request.getOthers();
+        final var ids = new ArrayList<UnsignedLong>(others.size() + 1);
+        ids.add(UnsignedLong.fromLongBits(first.getTransactionId()));
+        ids.addAll(others);
+
+        final var it = ids.iterator();
+        while (it.hasNext()) {
+            final var id = it.next();
+            final long bits = id.longValue();
+            if (purgedTransactions.contains(bits)) {
+                LOG.warn("{}: history {} tracks {} as purged", persistenceId(), getIdentifier(), id);
+                it.remove();
+            } else if (transactions.containsKey(new TransactionIdentifier(getIdentifier(), bits))) {
+                LOG.warn("{}: history {} tracks {} as open", persistenceId(), getIdentifier(), id);
+                it.remove();
+            }
+        }
+
+        if (ids.isEmpty()) {
+            LOG.debug("{}: history {} completing empty skip request", persistenceId(), getIdentifier());
+            return new SkipTransactionsResponse(first, now);
+        }
+
+        final var transactionIds = MutableUnsignedLongSet.of(ids.stream().mapToLong(UnsignedLong::longValue).toArray())
+            .immutableCopy();
+        LOG.debug("{}: history {} skipping transactions {}", persistenceId(), getIdentifier(), transactionIds.ranges());
+
+        tree.skipTransactions(getIdentifier(), transactionIds, () -> {
+            purgedTransactions.addAll(transactionIds);
+            envelope.sendSuccess(new TransactionPurgeResponse(first, request.getSequence()), readTime() - now);
+        });
+        return null;
+    }
+
     final void destroy(final long sequence, final RequestEnvelope envelope, final long now) {
         LOG.debug("{}: closing history {}", persistenceId(), getIdentifier());
         tree.closeTransactionChain(getIdentifier(),
@@ -181,13 +224,12 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
     }
 
     private FrontendTransaction createTransaction(final TransactionRequest<?> request, final TransactionIdentifier id) {
-        if (request instanceof CommitLocalTransactionRequest) {
+        if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
             LOG.debug("{}: allocating new ready transaction {}", persistenceId(), id);
             tree.getStats().incrementReadWriteTransactionCount();
-            return createReadyTransaction(id, ((CommitLocalTransactionRequest) request).getModification());
+            return createReadyTransaction(id, commitLocalRequest.getModification());
         }
-        if (request instanceof AbstractReadTransactionRequest
-                && ((AbstractReadTransactionRequest<?>) request).isSnapshotOnly()) {
+        if (request instanceof AbstractReadTransactionRequest<?> readTxRequest && readTxRequest.isSnapshotOnly()) {
             LOG.debug("{}: allocating new open snapshot {}", persistenceId(), id);
             tree.getStats().incrementReadOnlyTransactionCount();
             return createOpenSnapshot(id);
@@ -202,8 +244,7 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
 
     abstract FrontendTransaction createOpenTransaction(TransactionIdentifier id);
 
-    abstract FrontendTransaction createReadyTransaction(TransactionIdentifier id, DataTreeModification mod)
-        ;
+    abstract FrontendTransaction createReadyTransaction(TransactionIdentifier id, DataTreeModification mod);
 
     abstract ShardDataTreeCohort createFailedCohort(TransactionIdentifier id, DataTreeModification mod,
             Exception failure);
@@ -212,8 +253,11 @@ abstract class AbstractFrontendHistory implements Identifiable<LocalHistoryIdent
             Optional<SortedSet<String>> participatingShardNames);
 
     @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this).omitNullValues().add("identifier", getIdentifier())
-                .add("persistenceId", persistenceId).add("transactions", transactions).toString();
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).omitNullValues()
+            .add("identifier", getIdentifier())
+            .add("persistenceId", persistenceId)
+            .add("transactions", transactions)
+            .toString();
     }
 }
index b4cdb81c5aee7e9088e94d31cd62d35ed1260cb1..b4c65a80c07f672172d878b8d2eee1aefee72f34 100644 (file)
@@ -11,19 +11,17 @@ import akka.actor.ActorContext;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Abstract base class for a ShardDataTreeNotificationPublisher that offloads the generation and publication
- * of data tree notifications to an actor.
+ * of data tree notifications to an actor. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  */
-@NotThreadSafe
 abstract class AbstractShardDataTreeNotificationPublisherActorProxy implements ShardDataTreeNotificationPublisher {
     @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
     protected final Logger log = LoggerFactory.getLogger(getClass());
index cde7da28f156f4a4e89dc04e28557fa2bcd80db2..e559ff12498be15eb9f85d4be8ce3adfec5064bf 100644 (file)
@@ -7,20 +7,20 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 /**
- * Abstract base for transactions running on SharrdDataTree.
+ * Abstract base for transactions running on SharrdDataTree. This class is NOT thread-safe.
  *
  * @param <T> Backing transaction type.
  */
-@NotThreadSafe
 abstract class AbstractShardDataTreeTransaction<T extends DataTreeSnapshot>
         implements Identifiable<TransactionIdentifier> {
     private final ShardDataTreeTransactionParent parent;
@@ -31,9 +31,9 @@ abstract class AbstractShardDataTreeTransaction<T extends DataTreeSnapshot>
 
     AbstractShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
         final T snapshot) {
-        this.parent = Preconditions.checkNotNull(parent);
-        this.snapshot = Preconditions.checkNotNull(snapshot);
-        this.id = Preconditions.checkNotNull(id);
+        this.parent = requireNonNull(parent);
+        this.snapshot = requireNonNull(snapshot);
+        this.id = requireNonNull(id);
     }
 
     @Override
@@ -68,7 +68,7 @@ abstract class AbstractShardDataTreeTransaction<T extends DataTreeSnapshot>
     }
 
     final void abort(final Runnable callback) {
-        Preconditions.checkState(close(), "Transaction is already closed");
+        checkState(close(), "Transaction is already closed");
         parent.abortTransaction(this, callback);
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index 7ef1cd4..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.List;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import scala.concurrent.Future;
-
-/**
- * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
- * implementation. In addition to the usual set of methods it also contains the list of actor
- * futures.
- */
-public abstract class AbstractThreePhaseCommitCohort<T> implements DOMStoreThreePhaseCommitCohort {
-    protected static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
-    protected static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
-
-    abstract List<Future<T>> getCohortFutures();
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContext.java
deleted file mode 100644 (file)
index 6c4fc60..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-abstract class AbstractTransactionContext implements TransactionContext {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContext.class);
-    private final TransactionIdentifier transactionIdentifier;
-    private long modificationCount = 0;
-    private boolean handOffComplete;
-    private final short transactionVersion;
-
-    protected AbstractTransactionContext(TransactionIdentifier transactionIdentifier) {
-        this(transactionIdentifier, DataStoreVersions.CURRENT_VERSION);
-    }
-
-    protected AbstractTransactionContext(TransactionIdentifier transactionIdentifier, short transactionVersion) {
-        this.transactionIdentifier = transactionIdentifier;
-        this.transactionVersion = transactionVersion;
-    }
-
-    /**
-     * Get the transaction identifier associated with this context.
-     *
-     * @return Transaction identifier.
-     */
-    @Nonnull protected final TransactionIdentifier getIdentifier() {
-        return transactionIdentifier;
-    }
-
-    protected final void incrementModificationCount() {
-        modificationCount++;
-    }
-
-    protected final void logModificationCount() {
-        LOG.debug("Total modifications on Tx {} = [ {} ]", getIdentifier(), modificationCount);
-    }
-
-    @Override
-    public final void operationHandOffComplete() {
-        handOffComplete = true;
-    }
-
-    protected boolean isOperationHandOffComplete() {
-        return handOffComplete;
-    }
-
-    @Override
-    public boolean usesOperationLimiting() {
-        return false;
-    }
-
-    @Override
-    public short getTransactionVersion() {
-        return transactionVersion;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContextFactory.java
deleted file mode 100644 (file)
index a2bbc11..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicLongFieldUpdater;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.util.Try;
-
-/**
- * Factory for creating local and remote TransactionContext instances. Maintains a cache of known local
- * transaction factories.
- */
-abstract class AbstractTransactionContextFactory<F extends LocalTransactionFactory> implements AutoCloseable {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractTransactionContextFactory.class);
-    @SuppressWarnings("rawtypes")
-    private static final AtomicLongFieldUpdater<AbstractTransactionContextFactory> TX_COUNTER_UPDATER =
-            AtomicLongFieldUpdater.newUpdater(AbstractTransactionContextFactory.class, "nextTx");
-
-    private final ConcurrentMap<String, F> knownLocal = new ConcurrentHashMap<>();
-    private final LocalHistoryIdentifier historyId;
-    private final ActorUtils actorUtils;
-
-    // Used via TX_COUNTER_UPDATER
-    @SuppressWarnings("unused")
-    private volatile long nextTx;
-
-    protected AbstractTransactionContextFactory(final ActorUtils actorUtils, final LocalHistoryIdentifier historyId) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.historyId = requireNonNull(historyId);
-    }
-
-    final ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    final LocalHistoryIdentifier getHistoryId() {
-        return historyId;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private TransactionContext maybeCreateLocalTransactionContext(final TransactionProxy parent,
-            final String shardName) {
-        final LocalTransactionFactory local = knownLocal.get(shardName);
-        if (local != null) {
-            LOG.debug("Tx {} - Creating local component for shard {} using factory {}", parent.getIdentifier(),
-                shardName, local);
-
-            try {
-                return createLocalTransactionContext(local, parent);
-            } catch (Exception e) {
-                return new NoOpTransactionContext(e, parent.getIdentifier());
-            }
-        }
-
-        return null;
-    }
-
-    private void onFindPrimaryShardSuccess(PrimaryShardInfo primaryShardInfo, TransactionProxy parent,
-            String shardName, TransactionContextWrapper transactionContextWrapper) {
-        LOG.debug("Tx {}: Found primary {} for shard {}", parent.getIdentifier(),
-                primaryShardInfo.getPrimaryShardActor(), shardName);
-
-        updateShardInfo(shardName, primaryShardInfo);
-
-        try {
-            TransactionContext localContext = maybeCreateLocalTransactionContext(parent, shardName);
-            if (localContext != null) {
-                transactionContextWrapper.executePriorTransactionOperations(localContext);
-            } else {
-                RemoteTransactionContextSupport remote = new RemoteTransactionContextSupport(transactionContextWrapper,
-                        parent, shardName);
-                remote.setPrimaryShard(primaryShardInfo);
-            }
-        } finally {
-            onTransactionContextCreated(parent.getIdentifier());
-        }
-    }
-
-    private void onFindPrimaryShardFailure(Throwable failure, TransactionProxy parent,
-            String shardName, TransactionContextWrapper transactionContextWrapper) {
-        LOG.debug("Tx {}: Find primary for shard {} failed", parent.getIdentifier(), shardName, failure);
-
-        try {
-            transactionContextWrapper.executePriorTransactionOperations(new NoOpTransactionContext(failure,
-                    parent.getIdentifier()));
-        } finally {
-            onTransactionContextCreated(parent.getIdentifier());
-        }
-    }
-
-    final TransactionContextWrapper newTransactionContextWrapper(final TransactionProxy parent,
-            final String shardName) {
-        final TransactionContextWrapper transactionContextWrapper =
-                new TransactionContextWrapper(parent.getIdentifier(), actorUtils, shardName);
-
-        Future<PrimaryShardInfo> findPrimaryFuture = findPrimaryShard(shardName, parent.getIdentifier());
-        if (findPrimaryFuture.isCompleted()) {
-            Try<PrimaryShardInfo> maybe = findPrimaryFuture.value().get();
-            if (maybe.isSuccess()) {
-                onFindPrimaryShardSuccess(maybe.get(), parent, shardName, transactionContextWrapper);
-            } else {
-                onFindPrimaryShardFailure(maybe.failed().get(), parent, shardName, transactionContextWrapper);
-            }
-        } else {
-            findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
-                @Override
-                public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
-                    if (failure == null) {
-                        onFindPrimaryShardSuccess(primaryShardInfo, parent, shardName, transactionContextWrapper);
-                    } else {
-                        onFindPrimaryShardFailure(failure, parent, shardName, transactionContextWrapper);
-                    }
-                }
-            }, actorUtils.getClientDispatcher());
-        }
-
-        return transactionContextWrapper;
-    }
-
-    private void updateShardInfo(final String shardName, final PrimaryShardInfo primaryShardInfo) {
-        final Optional<DataTree> maybeDataTree = primaryShardInfo.getLocalShardDataTree();
-        if (maybeDataTree.isPresent()) {
-            if (!knownLocal.containsKey(shardName)) {
-                LOG.debug("Shard {} resolved to local data tree - adding local factory", shardName);
-
-                F factory = factoryForShard(shardName, primaryShardInfo.getPrimaryShardActor(), maybeDataTree.get());
-                knownLocal.putIfAbsent(shardName, factory);
-            }
-        } else if (knownLocal.containsKey(shardName)) {
-            LOG.debug("Shard {} invalidating local data tree", shardName);
-
-            knownLocal.remove(shardName);
-        }
-    }
-
-    protected final MemberName getMemberName() {
-        return historyId.getClientId().getFrontendId().getMemberName();
-    }
-
-    /**
-     * Create an identifier for the next TransactionProxy attached to this component
-     * factory.
-     * @return Transaction identifier, may not be null.
-     */
-    protected final TransactionIdentifier nextIdentifier() {
-        return new TransactionIdentifier(historyId, TX_COUNTER_UPDATER.getAndIncrement(this));
-    }
-
-    /**
-     * Find the primary shard actor.
-     *
-     * @param shardName Shard name
-     * @return Future containing shard information.
-     */
-    protected abstract Future<PrimaryShardInfo> findPrimaryShard(@Nonnull String shardName,
-            @Nonnull TransactionIdentifier txId);
-
-    /**
-     * Create local transaction factory for specified shard, backed by specified shard leader
-     * and data tree instance.
-     *
-     * @param shardName the shard name
-     * @param shardLeader the shard leader
-     * @param dataTree Backing data tree instance. The data tree may only be accessed in
-     *                 read-only manner.
-     * @return Transaction factory for local use.
-     */
-    protected abstract F factoryForShard(String shardName, ActorSelection shardLeader, DataTree dataTree);
-
-    /**
-     * Callback invoked from child transactions to push any futures, which need to
-     * be waited for before the next transaction is allocated.
-     * @param cohortFutures Collection of futures
-     */
-    protected abstract <T> void onTransactionReady(@Nonnull TransactionIdentifier transaction,
-            @Nonnull Collection<Future<T>> cohortFutures);
-
-    /**
-     * Callback invoked when the internal TransactionContext has been created for a transaction.
-     *
-     * @param transactionId the ID of the transaction.
-     */
-    protected abstract void onTransactionContextCreated(@Nonnull TransactionIdentifier transactionId);
-
-    private static TransactionContext createLocalTransactionContext(final LocalTransactionFactory factory,
-                                                                    final TransactionProxy parent) {
-
-        switch (parent.getType()) {
-            case READ_ONLY:
-                final DOMStoreReadTransaction readOnly = factory.newReadOnlyTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(readOnly, parent.getIdentifier(), factory) {
-                    @Override
-                    protected DOMStoreWriteTransaction getWriteDelegate() {
-                        throw new UnsupportedOperationException();
-                    }
-
-                    @Override
-                    protected DOMStoreReadTransaction getReadDelegate() {
-                        return readOnly;
-                    }
-                };
-            case READ_WRITE:
-                final DOMStoreReadWriteTransaction readWrite = factory.newReadWriteTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(readWrite, parent.getIdentifier(), factory) {
-                    @Override
-                    protected DOMStoreWriteTransaction getWriteDelegate() {
-                        return readWrite;
-                    }
-
-                    @Override
-                    protected DOMStoreReadTransaction getReadDelegate() {
-                        return readWrite;
-                    }
-                };
-            case WRITE_ONLY:
-                final DOMStoreWriteTransaction writeOnly = factory.newWriteOnlyTransaction(parent.getIdentifier());
-                return new LocalTransactionContext(writeOnly, parent.getIdentifier(), factory) {
-                    @Override
-                    protected DOMStoreWriteTransaction getWriteDelegate() {
-                        return writeOnly;
-                    }
-
-                    @Override
-                    protected DOMStoreReadTransaction getReadDelegate() {
-                        throw new UnsupportedOperationException();
-                    }
-                };
-            default:
-                throw new IllegalArgumentException("Invalid transaction type: " + parent.getType());
-        }
-    }
-}
index 2cb4dee3749ce7195851a44ad543ca2b345f901b..e3bb074bdc6fe7cada938e48b8b88281c8e59935 100644 (file)
@@ -7,15 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -27,9 +29,9 @@ final class ChainedCommitCohort extends ShardDataTreeCohort {
 
     ChainedCommitCohort(final ShardDataTreeTransactionChain chain, final ReadWriteShardDataTreeTransaction transaction,
             final ShardDataTreeCohort delegate) {
-        this.transaction = Preconditions.checkNotNull(transaction);
-        this.delegate = Preconditions.checkNotNull(delegate);
-        this.chain = Preconditions.checkNotNull(chain);
+        this.transaction = requireNonNull(transaction);
+        this.delegate = requireNonNull(delegate);
+        this.chain = requireNonNull(chain);
     }
 
     @Override
@@ -51,12 +53,12 @@ final class ChainedCommitCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public TransactionIdentifier getIdentifier() {
-        return delegate.getIdentifier();
+    TransactionIdentifier transactionId() {
+        return delegate.transactionId();
     }
 
     @Override
-    public void canCommit(final FutureCallback<Void> callback) {
+    public void canCommit(final FutureCallback<Empty> callback) {
         delegate.canCommit(callback);
     }
 
@@ -66,7 +68,7 @@ final class ChainedCommitCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public void abort(final FutureCallback<Void> callback) {
+    public void abort(final FutureCallback<Empty> callback) {
         delegate.abort(callback);
     }
 
index 38d23f6884ca949e121579da0fb669391ffc058e..be162d9304ee205d5fde8afee03f72d822d81296 100644 (file)
@@ -5,15 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Address;
 import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 
 public class ClusterWrapperImpl implements ClusterWrapper {
@@ -21,12 +22,10 @@ public class ClusterWrapperImpl implements ClusterWrapper {
     private final MemberName currentMemberName;
     private final Address selfAddress;
 
-    public ClusterWrapperImpl(ActorSystem actorSystem) {
-        Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+    public ClusterWrapperImpl(final ActorSystem actorSystem) {
+        cluster = Cluster.get(requireNonNull(actorSystem, "actorSystem should not be null"));
 
-        cluster = Cluster.get(actorSystem);
-
-        Preconditions.checkState(cluster.getSelfRoles().size() > 0,
+        checkState(cluster.getSelfRoles().size() > 0,
             "No akka roles were specified.\n"
             + "One way to specify the member name is to pass a property on the command line like so\n"
             + "   -Dakka.cluster.roles.0=member-3\n"
@@ -37,10 +36,8 @@ public class ClusterWrapperImpl implements ClusterWrapper {
     }
 
     @Override
-    public void subscribeToMemberEvents(ActorRef actorRef) {
-        Preconditions.checkNotNull(actorRef, "actorRef should not be null");
-
-        cluster.subscribe(actorRef, ClusterEvent.initialStateAsEvents(),
+    public void subscribeToMemberEvents(final ActorRef actorRef) {
+        cluster.subscribe(requireNonNull(actorRef, "actorRef should not be null"), ClusterEvent.initialStateAsEvents(),
             ClusterEvent.MemberEvent.class,
             ClusterEvent.UnreachableMember.class,
             ClusterEvent.ReachableMember.class);
index 3d5238ee77812eed4190017d7d7a1289887875dd..120b004a6e9bf44513acc784df5efe619232abe0 100644 (file)
@@ -7,18 +7,24 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.List;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortDecorator;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 final class CohortEntry {
     private final ReadWriteShardDataTreeTransaction transaction;
     private final TransactionIdentifier transactionId;
@@ -26,21 +32,23 @@ final class CohortEntry {
 
     private RuntimeException lastBatchedModificationsException;
     private int totalBatchedModificationsReceived;
+    private int totalOperationsProcessed;
     private ShardDataTreeCohort cohort;
     private boolean doImmediateCommit;
     private ActorRef replySender;
     private Shard shard;
 
     private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) {
-        this.transaction = Preconditions.checkNotNull(transaction);
-        this.transactionId = transaction.getIdentifier();
+        cohort = null;
+        this.transaction = requireNonNull(transaction);
+        transactionId = transaction.getIdentifier();
         this.clientVersion = clientVersion;
     }
 
     private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) {
-        this.cohort = Preconditions.checkNotNull(cohort);
-        this.transactionId = cohort.getIdentifier();
-        this.transaction = null;
+        this.cohort = requireNonNull(cohort);
+        transactionId = cohort.transactionId();
+        transaction = null;
         this.clientVersion = clientVersion;
     }
 
@@ -76,14 +84,20 @@ final class CohortEntry {
         return totalBatchedModificationsReceived;
     }
 
+    int getTotalOperationsProcessed() {
+        return totalOperationsProcessed;
+    }
+
     RuntimeException getLastBatchedModificationsException() {
         return lastBatchedModificationsException;
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    void applyModifications(final Iterable<Modification> modifications) {
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Re-thrown")
+    void applyModifications(final List<Modification> modifications) {
         totalBatchedModificationsReceived++;
         if (lastBatchedModificationsException == null) {
+            totalOperationsProcessed += modifications.size();
             for (Modification modification : modifications) {
                 try {
                     modification.apply(transaction.getSnapshot());
@@ -95,7 +109,7 @@ final class CohortEntry {
         }
     }
 
-    void canCommit(final FutureCallback<Void> callback) {
+    void canCommit(final FutureCallback<Empty> callback) {
         cohort.canCommit(callback);
     }
 
@@ -107,12 +121,12 @@ final class CohortEntry {
         cohort.commit(callback);
     }
 
-    void abort(final FutureCallback<Void> callback) {
+    void abort(final FutureCallback<Empty> callback) {
         cohort.abort(callback);
     }
 
     void ready(final Optional<SortedSet<String>> participatingShardNames, final CohortDecorator cohortDecorator) {
-        Preconditions.checkState(cohort == null, "cohort was already set");
+        checkState(cohort == null, "cohort was already set");
 
         cohort = transaction.ready(participatingShardNames);
 
@@ -122,6 +136,10 @@ final class CohortEntry {
         }
     }
 
+    boolean isSealed() {
+        return cohort != null;
+    }
+
     Optional<SortedSet<String>> getParticipatingShardNames() {
         return cohort != null ? cohort.getParticipatingShardNames() : Optional.empty();
     }
index 200f766b5d3619da0bc1427145a1736ee94bcd77..078b45f68f9e98cdcef1cd48b190fbedddd1a065 100644 (file)
@@ -5,9 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Status;
 import akka.actor.Status.Failure;
@@ -17,13 +19,10 @@ import akka.dispatch.OnComplete;
 import akka.dispatch.Recover;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.AbstractMap.SimpleImmutableEntry;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
@@ -31,12 +30,13 @@ import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.Executor;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.CanCommit;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActor.Success;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.compat.java8.FutureConverters;
@@ -47,7 +47,6 @@ import scala.concurrent.Future;
  * <p/>
  * It tracks current operation and list of cohorts which successfuly finished previous phase in
  * case, if abort is necessary to invoke it only on cohort steps which are still active.
- *
  */
 class CompositeDataTreeCohort {
     private static final Logger LOG = LoggerFactory.getLogger(CompositeDataTreeCohort.class);
@@ -91,7 +90,7 @@ class CompositeDataTreeCohort {
         ABORTED
     }
 
-    static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<Object>() {
+    static final Recover<Object> EXCEPTION_TO_MESSAGE = new Recover<>() {
         @Override
         public Failure recover(final Throwable error) {
             return new Failure(error);
@@ -100,21 +99,20 @@ class CompositeDataTreeCohort {
 
     private final DataTreeCohortActorRegistry registry;
     private final TransactionIdentifier txId;
-    private final SchemaContext schema;
+    private final EffectiveModelContext schema;
     private final Executor callbackExecutor;
     private final Timeout timeout;
 
-    @Nonnull
-    private List<Success> successfulFromPrevious = Collections.emptyList();
+    private @NonNull List<Success> successfulFromPrevious = List.of();
     private State state = State.IDLE;
 
     CompositeDataTreeCohort(final DataTreeCohortActorRegistry registry, final TransactionIdentifier transactionID,
-        final SchemaContext schema, final Executor callbackExecutor, final Timeout timeout) {
-        this.registry = Preconditions.checkNotNull(registry);
-        this.txId = Preconditions.checkNotNull(transactionID);
-        this.schema = Preconditions.checkNotNull(schema);
-        this.callbackExecutor = Preconditions.checkNotNull(callbackExecutor);
-        this.timeout = Preconditions.checkNotNull(timeout);
+        final EffectiveModelContext schema, final Executor callbackExecutor, final Timeout timeout) {
+        this.registry = requireNonNull(registry);
+        txId = requireNonNull(transactionID);
+        this.schema = requireNonNull(schema);
+        this.callbackExecutor = requireNonNull(callbackExecutor);
+        this.timeout = requireNonNull(timeout);
     }
 
     void reset() {
@@ -135,11 +133,11 @@ class CompositeDataTreeCohort {
                 throw new IllegalStateException("Unhandled state " + state);
         }
 
-        successfulFromPrevious = Collections.emptyList();
+        successfulFromPrevious = List.of();
         state = State.IDLE;
     }
 
-    Optional<CompletionStage<Void>> canCommit(final DataTreeCandidate tip) {
+    Optional<CompletionStage<Empty>> canCommit(final DataTreeCandidate tip) {
         if (LOG.isTraceEnabled()) {
             LOG.trace("{}: canCommit - candidate: {}", txId, tip);
         } else {
@@ -149,7 +147,7 @@ class CompositeDataTreeCohort {
         final List<CanCommit> messages = registry.createCanCommitMessages(txId, tip, schema);
         LOG.debug("{}: canCommit - messages: {}", txId, messages);
         if (messages.isEmpty()) {
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             changeStateFrom(State.IDLE, State.CAN_COMMIT_SUCCESSFUL);
             return Optional.empty();
         }
@@ -167,7 +165,7 @@ class CompositeDataTreeCohort {
         return Optional.of(processResponses(futures, State.CAN_COMMIT_SENT, State.CAN_COMMIT_SUCCESSFUL));
     }
 
-    Optional<CompletionStage<Void>> preCommit() {
+    Optional<CompletionStage<Empty>> preCommit() {
         LOG.debug("{}: preCommit - successfulFromPrevious: {}", txId, successfulFromPrevious);
 
         if (successfulFromPrevious.isEmpty()) {
@@ -181,7 +179,7 @@ class CompositeDataTreeCohort {
         return Optional.of(processResponses(futures, State.PRE_COMMIT_SENT, State.PRE_COMMIT_SUCCESSFUL));
     }
 
-    Optional<CompletionStage<Void>> commit() {
+    Optional<CompletionStage<Empty>> commit() {
         LOG.debug("{}: commit - successfulFromPrevious: {}", txId, successfulFromPrevious);
         if (successfulFromPrevious.isEmpty()) {
             changeStateFrom(State.PRE_COMMIT_SUCCESSFUL, State.COMMITED);
@@ -222,11 +220,10 @@ class CompositeDataTreeCohort {
         return ret;
     }
 
-    @Nonnull
-    private CompletionStage<Void> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
+    private @NonNull CompletionStage<Empty> processResponses(final List<Entry<ActorRef, Future<Object>>> futures,
             final State currentState, final State afterState) {
         LOG.debug("{}: processResponses - currentState: {}, afterState: {}", txId, currentState, afterState);
-        final CompletableFuture<Void> returnFuture = new CompletableFuture<>();
+        final CompletableFuture<Empty> returnFuture = new CompletableFuture<>();
         Future<Iterable<Object>> aggregateFuture = Futures.sequence(Lists.transform(futures, Entry::getValue),
                 ExecutionContexts.global());
 
@@ -241,13 +238,10 @@ class CompositeDataTreeCohort {
         return returnFuture;
     }
 
-    // FB issues violation for passing null to CompletableFuture#complete but it is valid and necessary when the
-    // generic type is Void.
-    @SuppressFBWarnings("NP_NONNULL_PARAM_VIOLATION")
     private void processResponses(final Throwable failure, final Iterable<Object> results,
-            final State currentState, final State afterState, final CompletableFuture<Void> resultFuture) {
+            final State currentState, final State afterState, final CompletableFuture<Empty> resultFuture) {
         if (failure != null) {
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             resultFuture.completeExceptionally(failure);
             return;
         }
@@ -274,17 +268,17 @@ class CompositeDataTreeCohort {
                 firstEx.addSuppressed(it.next().cause());
             }
 
-            successfulFromPrevious = Collections.emptyList();
+            successfulFromPrevious = List.of();
             resultFuture.completeExceptionally(firstEx);
         } else {
             successfulFromPrevious = successful;
             changeStateFrom(currentState, afterState);
-            resultFuture.complete(null);
+            resultFuture.complete(Empty.value());
         }
     }
 
     void changeStateFrom(final State expected, final State followup) {
-        Preconditions.checkState(state == expected);
+        checkState(state == expected);
         state = followup;
     }
 }
index 7e79c3e88a9f852cde80fae1fe731165ee11048a..88877bc36a8e97b90711d2aeb5e89b2356c387c8 100644 (file)
@@ -5,23 +5,23 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
 
 final class DOMDataTreeCandidateTO implements DOMDataTreeCandidate {
 
     private final DOMDataTreeIdentifier rootPath;
     private final DataTreeCandidateNode rootNode;
 
-    private DOMDataTreeCandidateTO(DOMDataTreeIdentifier rootPath, DataTreeCandidateNode rootNode) {
-        this.rootPath = Preconditions.checkNotNull(rootPath);
-        this.rootNode = Preconditions.checkNotNull(rootNode);
+    private DOMDataTreeCandidateTO(final DOMDataTreeIdentifier rootPath, final DataTreeCandidateNode rootNode) {
+        this.rootPath = requireNonNull(rootPath);
+        this.rootNode = requireNonNull(rootNode);
     }
 
     @Override
@@ -34,7 +34,7 @@ final class DOMDataTreeCandidateTO implements DOMDataTreeCandidate {
         return rootNode;
     }
 
-    static DOMDataTreeCandidate create(DOMDataTreeIdentifier path, DataTreeCandidateNode node) {
+    static DOMDataTreeCandidate create(final DOMDataTreeIdentifier path, final DataTreeCandidateNode node) {
         return new DOMDataTreeCandidateTO(path, node);
     }
 
index 0b4115a6fa01af210893a9f64d5d2eba576aa1b8..996fe1023dac6f21b4ae6a6c0f4bd68059694635 100644 (file)
@@ -8,16 +8,33 @@
 package org.opendaylight.controller.cluster.datastore;
 
 /**
- * Defines version numbers.
+ * Defines version numbers where ask-based protocol is concerned.
  *
  * @author Thomas Pantelis
  */
-public interface DataStoreVersions {
-    short BASE_HELIUM_VERSION = 0;
-    short HELIUM_1_VERSION = 1;
-    short HELIUM_2_VERSION = 2;
-    short LITHIUM_VERSION = 3;
-    short BORON_VERSION = 5;
-    short FLUORINE_VERSION = 9;
-    short CURRENT_VERSION = FLUORINE_VERSION;
+public final class DataStoreVersions {
+    @Deprecated
+    public static final short BASE_HELIUM_VERSION =  0;
+    @Deprecated
+    public static final short HELIUM_1_VERSION    =  1;
+    @Deprecated
+    public static final short HELIUM_2_VERSION    =  2;
+    @Deprecated
+    public static final short LITHIUM_VERSION     =  3;
+    @Deprecated
+    public static final short BORON_VERSION       =  5;
+    @Deprecated
+    public static final short FLUORINE_VERSION    =  9;
+    @Deprecated
+    public static final short NEON_SR2_VERSION    = 10;
+    @Deprecated
+    public static final short SODIUM_SR1_VERSION  = 11;
+    @Deprecated
+    public static final short PHOSPHORUS_VERSION  = 12;
+    public static final short POTASSIUM_VERSION   = 13;
+    public static final short CURRENT_VERSION     = POTASSIUM_VERSION;
+
+    private DataStoreVersions() {
+
+    }
 }
index c72de945b15154c67c4c1002cd736a72d56aabaa..6f88d3ea986136416276fa39915c5bfe2673bee6 100644 (file)
@@ -7,15 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
 import akka.actor.Props;
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.GetInfo;
 import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
@@ -23,25 +25,26 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * Proxy actor which acts as a facade to the user-provided listener. Responsible for decapsulating
  * DataTreeChanged messages and dispatching their context to the user.
  */
-final class DataTreeChangeListenerActor extends AbstractUntypedActor {
+class DataTreeChangeListenerActor extends AbstractUntypedActor {
     private final DOMDataTreeChangeListener listener;
     private final YangInstanceIdentifier registeredPath;
+
     private boolean notificationsEnabled = false;
     private long notificationCount;
     private String logContext = "";
 
-    private DataTreeChangeListenerActor(final DOMDataTreeChangeListener listener,
+    DataTreeChangeListenerActor(final DOMDataTreeChangeListener listener,
             final YangInstanceIdentifier registeredPath) {
-        this.listener = Preconditions.checkNotNull(listener);
-        this.registeredPath = Preconditions.checkNotNull(registeredPath);
+        this.listener = requireNonNull(listener);
+        this.registeredPath = requireNonNull(registeredPath);
     }
 
     @Override
-    protected void handleReceive(final Object message) {
+    protected final void handleReceive(final Object message) {
         if (message instanceof DataTreeChanged) {
-            dataChanged((DataTreeChanged)message);
+            dataTreeChanged((DataTreeChanged) message);
         } else if (message instanceof OnInitialData) {
-            onInitialData();
+            onInitialData((OnInitialData) message);
         } else if (message instanceof EnableNotification) {
             enableNotification((EnableNotification) message);
         } else if (message instanceof GetInfo) {
@@ -53,18 +56,18 @@ final class DataTreeChangeListenerActor extends AbstractUntypedActor {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void onInitialData() {
+    void onInitialData(final OnInitialData message) {
         LOG.debug("{}: Notifying onInitialData to listener {}", logContext, listener);
 
         try {
-            this.listener.onInitialData();
+            listener.onInitialData();
         } catch (Exception e) {
-            LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+            LOG.error("{}: Error notifying listener {}", logContext, listener, e);
         }
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void dataChanged(final DataTreeChanged message) {
+    void dataTreeChanged(final DataTreeChanged message) {
         // Do nothing if notifications are not enabled
         if (!notificationsEnabled) {
             LOG.debug("{}: Notifications not enabled for listener {} - dropping change notification",
@@ -72,22 +75,29 @@ final class DataTreeChangeListenerActor extends AbstractUntypedActor {
             return;
         }
 
-        LOG.debug("{}: Sending {} change notification(s) {} to listener {}", logContext, message.getChanges().size(),
-                message.getChanges(), listener);
+        final var changes = message.getChanges();
+        LOG.debug("{}: Sending {} change notification(s) to listener {}", logContext, changes.size(), listener);
+        if (LOG.isTraceEnabled() && !changes.isEmpty()) {
+            LOG.trace("{}: detailed change follow", logContext);
+            for (int i = 0, size = changes.size(); i < size; ++i) {
+                LOG.trace("{}: change {}: {}", logContext, i, changes.get(i));
+            }
+        }
 
         notificationCount++;
 
         try {
-            this.listener.onDataTreeChanged(message.getChanges());
+            listener.onDataTreeChanged(changes);
         } catch (Exception e) {
-            LOG.error("{}: Error notifying listener {}", logContext, this.listener, e);
+            LOG.error("{}: Error notifying listener {}", logContext, listener, e);
         }
 
         // TODO: do we really need this?
         // It seems the sender is never null but it doesn't hurt to check. If the caller passes in
         // a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
-        if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
-            getSender().tell(DataTreeChangedReply.getInstance(), getSelf());
+        final ActorRef sender = getSender();
+        if (sender != null && !sender.equals(getContext().system().deadLetters())) {
+            sender.tell(DataTreeChangedReply.getInstance(), getSelf());
         }
     }
 
@@ -98,7 +108,7 @@ final class DataTreeChangeListenerActor extends AbstractUntypedActor {
                 listener);
     }
 
-    public static Props props(final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath) {
+    static Props props(final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath) {
         return Props.create(DataTreeChangeListenerActor.class, listener, registeredPath);
     }
 }
index adf8b1ad27c9b36ef74570d76980beb8a6fc9b33..be849284e7f1fabdd04f9e3255e99a5912e53f48 100644 (file)
@@ -14,19 +14,20 @@ import akka.actor.ActorSelection;
 import akka.actor.PoisonPill;
 import akka.dispatch.OnComplete;
 import com.google.common.annotations.VisibleForTesting;
-import javax.annotation.concurrent.GuardedBy;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.Executor;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
 
 /**
  * Proxy class for holding required state to lazily instantiate a listener registration with an
@@ -34,28 +35,61 @@ import scala.concurrent.Future;
  *
  * @param <T> listener type
  */
-final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+final class DataTreeChangeListenerProxy extends AbstractObjectRegistration<DOMDataTreeChangeListener> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
     private final ActorRef dataChangeListenerActor;
     private final ActorUtils actorUtils;
     private final YangInstanceIdentifier registeredPath;
+    private final boolean clustered;
 
     @GuardedBy("this")
     private ActorSelection listenerRegistrationActor;
 
-    DataTreeChangeListenerProxy(final ActorUtils actorUtils, final T listener,
-            final YangInstanceIdentifier registeredPath) {
+    @VisibleForTesting
+    private DataTreeChangeListenerProxy(final ActorUtils actorUtils, final DOMDataTreeChangeListener listener,
+            final YangInstanceIdentifier registeredPath, final boolean clustered, final String shardName) {
         super(listener);
         this.actorUtils = requireNonNull(actorUtils);
         this.registeredPath = requireNonNull(registeredPath);
-        this.dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
+        this.clustered = clustered;
+        dataChangeListenerActor = actorUtils.getActorSystem().actorOf(
                 DataTreeChangeListenerActor.props(getInstance(), registeredPath)
                     .withDispatcher(actorUtils.getNotificationDispatcherPath()));
-
         LOG.debug("{}: Created actor {} for DTCL {}", actorUtils.getDatastoreContext().getLogicalStoreType(),
                 dataChangeListenerActor, listener);
     }
 
+    static @NonNull DataTreeChangeListenerProxy of(final ActorUtils actorUtils,
+            final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+            final boolean clustered, final String shardName) {
+        return ofTesting(actorUtils, listener, registeredPath, clustered, shardName, MoreExecutors.directExecutor());
+    }
+
+    @VisibleForTesting
+    static @NonNull DataTreeChangeListenerProxy ofTesting(final ActorUtils actorUtils,
+            final DOMDataTreeChangeListener listener, final YangInstanceIdentifier registeredPath,
+            final boolean clustered, final String shardName, final Executor executor) {
+        final var ret = new DataTreeChangeListenerProxy(actorUtils, listener, registeredPath, clustered, shardName);
+        executor.execute(() -> {
+            LOG.debug("{}: Starting discovery of shard {}", ret.logContext(), shardName);
+            actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<>() {
+                @Override
+                public void onComplete(final Throwable failure, final ActorRef shard) {
+                    if (failure instanceof LocalShardNotFoundException) {
+                        LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} cannot be "
+                            + "registered", ret.logContext(), shardName, listener, registeredPath);
+                    } else if (failure != null) {
+                        LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} cannot be "
+                            + "registered", ret.logContext(), shardName, listener, registeredPath, failure);
+                    } else {
+                        ret.doRegistration(shard);
+                    }
+                }
+            }, actorUtils.getClientDispatcher());
+        });
+        return ret;
+    }
+
     @Override
     protected synchronized void removeRegistration() {
         if (listenerRegistrationActor != null) {
@@ -67,25 +101,6 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
         dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
     }
 
-    void init(final String shardName) {
-        Future<ActorRef> findFuture = actorUtils.findLocalShardAsync(shardName);
-        findFuture.onComplete(new OnComplete<ActorRef>() {
-            @Override
-            public void onComplete(final Throwable failure, final ActorRef shard) {
-                if (failure instanceof LocalShardNotFoundException) {
-                    LOG.debug("{}: No local shard found for {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered", logContext(), shardName, getInstance(), registeredPath);
-                } else if (failure != null) {
-                    LOG.error("{}: Failed to find local shard {} - DataTreeChangeListener {} at path {} "
-                            + "cannot be registered", logContext(), shardName, getInstance(), registeredPath,
-                            failure);
-                } else {
-                    doRegistration(shard);
-                }
-            }
-        }, actorUtils.getClientDispatcher());
-    }
-
     private void setListenerRegistrationActor(final ActorSelection actor) {
         if (actor == null) {
             LOG.debug("{}: Ignoring null actor on {}", logContext(), this);
@@ -94,7 +109,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
 
         synchronized (this) {
             if (!isClosed()) {
-                this.listenerRegistrationActor = actor;
+                listenerRegistrationActor = actor;
                 return;
             }
         }
@@ -104,25 +119,20 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
     }
 
     private void doRegistration(final ActorRef shard) {
-
-        Future<Object> future = actorUtils.executeOperationAsync(shard,
-                new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor,
-                        getInstance() instanceof ClusteredDOMDataTreeChangeListener),
-                actorUtils.getDatastoreContext().getShardInitializationTimeout());
-
-        future.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object result) {
-                if (failure != null) {
-                    LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
+        actorUtils.executeOperationAsync(shard,
+            new RegisterDataTreeChangeListener(registeredPath, dataChangeListenerActor, clustered),
+            actorUtils.getDatastoreContext().getShardInitializationTimeout()).onComplete(new OnComplete<>() {
+                @Override
+                public void onComplete(final Throwable failure, final Object result) {
+                    if (failure != null) {
+                        LOG.error("{}: Failed to register DataTreeChangeListener {} at path {}", logContext(),
                             getInstance(), registeredPath, failure);
-                } else {
-                    RegisterDataTreeNotificationListenerReply reply = (RegisterDataTreeNotificationListenerReply)result;
-                    setListenerRegistrationActor(actorUtils.actorSelection(
-                            reply.getListenerRegistrationPath()));
+                    } else {
+                        setListenerRegistrationActor(actorUtils.actorSelection(
+                            ((RegisterDataTreeNotificationListenerReply) result).getListenerRegistrationPath()));
+                    }
                 }
-            }
-        }, actorUtils.getClientDispatcher());
+            }, actorUtils.getClientDispatcher());
     }
 
     @VisibleForTesting
index 348b2aa939efc10617a8f9544a5a738acfdd5345..f5e1d1374b8b8923fee3b785c85d31b3a4778b12 100644 (file)
@@ -23,9 +23,9 @@ import org.slf4j.LoggerFactory;
 final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
 
-    private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+    private final Collection<DelayedDataTreeChangeListenerRegistration>
             delayedDataTreeChangeListenerRegistrations = ConcurrentHashMap.newKeySet();
-    private final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>>
+    private final Collection<DelayedDataTreeChangeListenerRegistration>
             delayedListenerOnAllRegistrations = ConcurrentHashMap.newKeySet();
     private final Collection<ActorSelection> leaderOnlyListenerActors = ConcurrentHashMap.newKeySet();
     private final Collection<ActorSelection> allListenerActors = ConcurrentHashMap.newKeySet();
@@ -37,7 +37,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
     void doRegistration(final RegisterDataTreeChangeListener message, final ActorRef registrationActor) {
         final ActorSelection listenerActor = processListenerRegistrationMessage(message);
 
-        final DOMDataTreeChangeListener listener = new ForwardingDataTreeChangeListener(listenerActor);
+        final DOMDataTreeChangeListener listener = new ForwardingDataTreeChangeListener(listenerActor, getSelf());
 
         LOG.debug("{}: Registering listenerActor {} for path {}", persistenceId(), listenerActor, message.getPath());
 
@@ -62,8 +62,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         }
 
         if (hasLeader) {
-            for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
-                    delayedListenerOnAllRegistrations) {
+            for (var reg : delayedListenerOnAllRegistrations) {
                 reg.doRegistration(this);
             }
 
@@ -71,8 +70,7 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         }
 
         if (isLeader) {
-            for (DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> reg :
-                    delayedDataTreeChangeListenerRegistrations) {
+            for (var reg : delayedDataTreeChangeListenerRegistrations) {
                 reg.doRegistration(this);
             }
 
@@ -91,9 +89,8 @@ final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<Reg
         } else {
             LOG.debug("{}: Shard does not have a leader - delaying registration", persistenceId());
 
-            final DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> delayedReg =
-                    new DelayedDataTreeChangeListenerRegistration<>(message, registrationActor);
-            final Collection<DelayedDataTreeChangeListenerRegistration<DOMDataTreeChangeListener>> delayedRegList;
+            final var delayedReg = new DelayedDataTreeChangeListenerRegistration(message, registrationActor);
+            final Collection<DelayedDataTreeChangeListenerRegistration> delayedRegList;
             if (message.isRegisterOnAllInstances()) {
                 delayedRegList = delayedListenerOnAllRegistrations;
             } else {
index 26c5f45568e2ca00ea1205a27175504dbe90cfc7..9efca6493716ed72ec030610fb67ebd8691b2ac3 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
@@ -20,8 +19,8 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.Executor;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.mdsal.common.api.PostCanCommitStep;
@@ -30,7 +29,7 @@ import org.opendaylight.mdsal.common.api.ThreePhaseCommitStep;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 /**
  * Proxy actor which acts as a facade to the user-provided commit cohort. Responsible for
@@ -76,7 +75,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
             return txId;
         }
 
-        protected CommitProtocolCommand(TransactionIdentifier txId) {
+        protected CommitProtocolCommand(final TransactionIdentifier txId) {
             this.txId = Objects.requireNonNull(txId);
         }
 
@@ -90,10 +89,10 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
 
         private final Collection<DOMDataTreeCandidate> candidates;
         private final ActorRef cohort;
-        private final SchemaContext schema;
+        private final EffectiveModelContext schema;
 
-        CanCommit(TransactionIdentifier txId, Collection<DOMDataTreeCandidate> candidates, SchemaContext schema,
-                ActorRef cohort) {
+        CanCommit(final TransactionIdentifier txId, final Collection<DOMDataTreeCandidate> candidates,
+                final EffectiveModelContext schema, final ActorRef cohort) {
             super(txId);
             this.cohort = Objects.requireNonNull(cohort);
             this.candidates = Objects.requireNonNull(candidates);
@@ -104,7 +103,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
             return candidates;
         }
 
-        SchemaContext getSchema() {
+        EffectiveModelContext getSchema() {
             return schema;
         }
 
@@ -123,7 +122,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
         private final ActorRef cohortRef;
         private final TransactionIdentifier txId;
 
-        protected CommitReply(ActorRef cohortRef, TransactionIdentifier txId) {
+        protected CommitReply(final ActorRef cohortRef, final TransactionIdentifier txId) {
             this.cohortRef = Objects.requireNonNull(cohortRef);
             this.txId = Objects.requireNonNull(txId);
         }
@@ -144,28 +143,28 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
 
     static final class Success extends CommitReply {
 
-        Success(ActorRef cohortRef, TransactionIdentifier txId) {
+        Success(final ActorRef cohortRef, final TransactionIdentifier txId) {
             super(cohortRef, txId);
         }
     }
 
     static final class PreCommit extends CommitProtocolCommand<Success> {
 
-        PreCommit(TransactionIdentifier txId) {
+        PreCommit(final TransactionIdentifier txId) {
             super(txId);
         }
     }
 
     static final class Abort extends CommitProtocolCommand<Success> {
 
-        Abort(TransactionIdentifier txId) {
+        Abort(final TransactionIdentifier txId) {
             super(txId);
         }
     }
 
     static final class Commit extends CommitProtocolCommand<Success> {
 
-        Commit(TransactionIdentifier txId) {
+        Commit(final TransactionIdentifier txId) {
             super(txId);
         }
     }
@@ -173,11 +172,11 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
     private abstract class CohortBehaviour<M extends CommitProtocolCommand<?>, S extends ThreePhaseCommitStep> {
         private final Class<M> handledMessageType;
 
-        CohortBehaviour(Class<M> handledMessageType) {
+        CohortBehaviour(final Class<M> handledMessageType) {
             this.handledMessageType = Objects.requireNonNull(handledMessageType);
         }
 
-        void handle(CommitProtocolCommand<?> command) {
+        void handle(final CommitProtocolCommand<?> command) {
             if (handledMessageType.isInstance(command)) {
                 onMessage(command);
             } else if (command instanceof Abort) {
@@ -189,7 +188,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
             }
         }
 
-        private void onMessage(CommitProtocolCommand<?> message) {
+        private void onMessage(final CommitProtocolCommand<?> message) {
             final ActorRef sender = getSender();
             TransactionIdentifier txId = message.getTxId();
             ListenableFuture<S> future = process(handledMessageType.cast(message));
@@ -197,49 +196,47 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
                     : DataTreeCohortActor.this::executeInSelf;
             Futures.addCallback(future, new FutureCallback<S>() {
                 @Override
-                public void onSuccess(S nextStep) {
+                public void onSuccess(final S nextStep) {
                     success(txId, sender, nextStep);
                 }
 
                 @Override
-                public void onFailure(Throwable failure) {
+                public void onFailure(final Throwable failure) {
                     failed(txId, sender, failure);
                 }
             }, callbackExecutor);
         }
 
-        private void failed(TransactionIdentifier txId, ActorRef sender, Throwable failure) {
+        private void failed(final TransactionIdentifier txId, final ActorRef sender, final Throwable failure) {
             currentStateMap.remove(txId);
             sender.tell(new Status.Failure(failure), getSelf());
         }
 
-        private void success(TransactionIdentifier txId, ActorRef sender, S nextStep) {
+        private void success(final TransactionIdentifier txId, final ActorRef sender, final S nextStep) {
             currentStateMap.computeIfPresent(txId, (key, behaviour) -> nextBehaviour(txId, nextStep));
             sender.tell(new Success(getSelf(), txId), getSelf());
         }
 
-        private void onAbort(TransactionIdentifier txId) {
+        private void onAbort(final TransactionIdentifier txId) {
             currentStateMap.remove(txId);
             final ActorRef sender = getSender();
             Futures.addCallback(abort(), new FutureCallback<Object>() {
                 @Override
-                public void onSuccess(Object noop) {
+                public void onSuccess(final Object noop) {
                     sender.tell(new Success(getSelf(), txId), getSelf());
                 }
 
                 @Override
-                public void onFailure(Throwable failure) {
+                public void onFailure(final Throwable failure) {
                     LOG.warn("Abort of transaction {} failed for cohort {}", txId, cohort, failure);
                     sender.tell(new Status.Failure(failure), getSelf());
                 }
             }, MoreExecutors.directExecutor());
         }
 
-        @Nullable
-        abstract CohortBehaviour<?, ?> nextBehaviour(TransactionIdentifier txId, S nextStep);
+        abstract @Nullable CohortBehaviour<?, ?> nextBehaviour(TransactionIdentifier txId, S nextStep);
 
-        @Nonnull
-        abstract ListenableFuture<S> process(M command);
+        abstract @NonNull ListenableFuture<S> process(M command);
 
         abstract ListenableFuture<?> abort();
 
@@ -255,12 +252,12 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
         }
 
         @Override
-        ListenableFuture<PostCanCommitStep> process(CanCommit message) {
+        ListenableFuture<PostCanCommitStep> process(final CanCommit message) {
             return cohort.canCommit(message.getTxId(), message.getSchema(), message.getCandidates());
         }
 
         @Override
-        CohortBehaviour<?, ?> nextBehaviour(TransactionIdentifier txId, PostCanCommitStep nextStep) {
+        CohortBehaviour<?, ?> nextBehaviour(final TransactionIdentifier txId, final PostCanCommitStep nextStep) {
             return new PostCanCommit(txId, nextStep);
         }
 
@@ -275,7 +272,7 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
         private final S step;
         private final TransactionIdentifier txId;
 
-        CohortStateWithStep(Class<M> handledMessageType, TransactionIdentifier txId, S step) {
+        CohortStateWithStep(final Class<M> handledMessageType, final TransactionIdentifier txId, final S step) {
             super(handledMessageType);
             this.txId = Objects.requireNonNull(txId);
             this.step = Objects.requireNonNull(step);
@@ -298,18 +295,18 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
 
     private class PostCanCommit extends CohortStateWithStep<PreCommit, PostCanCommitStep, PostPreCommitStep> {
 
-        PostCanCommit(TransactionIdentifier txId, PostCanCommitStep nextStep) {
+        PostCanCommit(final TransactionIdentifier txId, final PostCanCommitStep nextStep) {
             super(PreCommit.class, txId, nextStep);
         }
 
         @SuppressWarnings("unchecked")
         @Override
-        ListenableFuture<PostPreCommitStep> process(PreCommit message) {
+        ListenableFuture<PostPreCommitStep> process(final PreCommit message) {
             return (ListenableFuture<PostPreCommitStep>) getStep().preCommit();
         }
 
         @Override
-        CohortBehaviour<?, ?> nextBehaviour(TransactionIdentifier txId, PostPreCommitStep nextStep) {
+        CohortBehaviour<?, ?> nextBehaviour(final TransactionIdentifier txId, final PostPreCommitStep nextStep) {
             return new PostPreCommit(txId, nextStep);
         }
 
@@ -317,18 +314,18 @@ final class DataTreeCohortActor extends AbstractUntypedActor {
 
     private class PostPreCommit extends CohortStateWithStep<Commit, PostPreCommitStep, NoopThreePhaseCommitStep> {
 
-        PostPreCommit(TransactionIdentifier txId, PostPreCommitStep step) {
+        PostPreCommit(final TransactionIdentifier txId, final PostPreCommitStep step) {
             super(Commit.class, txId, step);
         }
 
         @SuppressWarnings("unchecked")
         @Override
-        ListenableFuture<NoopThreePhaseCommitStep> process(Commit message) {
+        ListenableFuture<NoopThreePhaseCommitStep> process(final Commit message) {
             return (ListenableFuture<NoopThreePhaseCommitStep>) getStep().commit();
         }
 
         @Override
-        CohortBehaviour<?, ?> nextBehaviour(TransactionIdentifier txId, NoopThreePhaseCommitStep nextStep) {
+        CohortBehaviour<?, ?> nextBehaviour(final TransactionIdentifier txId, final NoopThreePhaseCommitStep nextStep) {
             return null;
         }
     }
index 1dd0f2856bd98c05fa99389348ec6a57dd9419d3..fa10f947db49d7c9f73e369ca568f5bbdcbb9ac2 100644 (file)
@@ -5,14 +5,14 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Status;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
 import java.util.ArrayList;
@@ -21,34 +21,29 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.Executor;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeSnapshot;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Registry of user commit cohorts, which is responsible for handling registration and calculation
- * of affected cohorts based on {@link DataTreeCandidate}.
- *
+ * of affected cohorts based on {@link DataTreeCandidate}. This class is NOT thread-safe.
  */
-@NotThreadSafe
 class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
 
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortActorRegistry.class);
 
-    private final Map<ActorRef, RegistrationTreeNode<ActorRef>> cohortToNode = new HashMap<>();
+    private final Map<ActorRef, Node<ActorRef>> cohortToNode = new HashMap<>();
 
     Collection<ActorRef> getCohortActors() {
         return new ArrayList<>(cohortToNode.keySet());
@@ -59,8 +54,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         takeLock();
         try {
             final ActorRef cohortRef = cohort.getCohort();
-            final RegistrationTreeNode<ActorRef> node =
-                    findNodeFor(cohort.getPath().getRootIdentifier().getPathArguments());
+            final Node<ActorRef> node = findNodeFor(cohort.getPath().path().getPathArguments());
             addRegistration(node, cohort.getCohort());
             cohortToNode.put(cohortRef, node);
         } catch (final Exception e) {
@@ -74,7 +68,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
 
     void removeCommitCohort(final ActorRef sender, final RemoveCohort message) {
         final ActorRef cohort = message.getCohort();
-        final RegistrationTreeNode<ActorRef> node = cohortToNode.get(cohort);
+        final Node<ActorRef> node = cohortToNode.get(cohort);
         if (node != null) {
             removeRegistration(node, cohort);
             cohortToNode.remove(cohort);
@@ -84,8 +78,8 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
     }
 
     List<DataTreeCohortActor.CanCommit> createCanCommitMessages(final TransactionIdentifier txId,
-            final DataTreeCandidate candidate, final SchemaContext schema) {
-        try (RegistrationTreeSnapshot<ActorRef> cohorts = takeSnapshot()) {
+            final DataTreeCandidate candidate, final EffectiveModelContext schema) {
+        try (var cohorts = takeSnapshot()) {
             return new CanCommitMessageBuilder(txId, candidate, schema).perform(cohorts.getRootNode());
         }
     }
@@ -99,11 +93,10 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
     }
 
     abstract static class CohortRegistryCommand {
-
         private final ActorRef cohort;
 
         CohortRegistryCommand(final ActorRef cohort) {
-            this.cohort = Preconditions.checkNotNull(cohort);
+            this.cohort = requireNonNull(cohort);
         }
 
         ActorRef getCohort() {
@@ -112,82 +105,74 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
     }
 
     static class RegisterCohort extends CohortRegistryCommand {
-
         private final DOMDataTreeIdentifier path;
 
         RegisterCohort(final DOMDataTreeIdentifier path, final ActorRef cohort) {
             super(cohort);
             this.path = path;
-
         }
 
         public DOMDataTreeIdentifier getPath() {
             return path;
         }
-
     }
 
     static class RemoveCohort extends CohortRegistryCommand {
-
         RemoveCohort(final ActorRef cohort) {
             super(cohort);
         }
-
     }
 
     private static class CanCommitMessageBuilder {
-
+        private final Multimap<ActorRef, DOMDataTreeCandidate> actorToCandidates = ArrayListMultimap.create();
         private final TransactionIdentifier txId;
         private final DataTreeCandidate candidate;
-        private final SchemaContext schema;
-        private final Multimap<ActorRef, DOMDataTreeCandidate> actorToCandidates = ArrayListMultimap.create();
+        private final EffectiveModelContext schema;
 
         CanCommitMessageBuilder(final TransactionIdentifier txId, final DataTreeCandidate candidate,
-                final SchemaContext schema) {
-            this.txId = Preconditions.checkNotNull(txId);
-            this.candidate = Preconditions.checkNotNull(candidate);
+                final EffectiveModelContext schema) {
+            this.txId = requireNonNull(txId);
+            this.candidate = requireNonNull(candidate);
             this.schema = schema;
         }
 
         private void lookupAndCreateCanCommits(final List<PathArgument> args, final int offset,
-                final RegistrationTreeNode<ActorRef> node) {
+                final Node<ActorRef> node) {
 
             if (args.size() != offset) {
                 final PathArgument arg = args.get(offset);
-                final RegistrationTreeNode<ActorRef> exactChild = node.getExactChild(arg);
+                final var exactChild = node.getExactChild(arg);
                 if (exactChild != null) {
                     lookupAndCreateCanCommits(args, offset + 1, exactChild);
                 }
-                for (final RegistrationTreeNode<ActorRef> c : node.getInexactChildren(arg)) {
-                    lookupAndCreateCanCommits(args, offset + 1, c);
+                for (var inexact : node.getInexactChildren(arg)) {
+                    lookupAndCreateCanCommits(args, offset + 1, inexact);
                 }
             } else {
                 lookupAndCreateCanCommits(candidate.getRootPath(), node, candidate.getRootNode());
             }
         }
 
-        private void lookupAndCreateCanCommits(final YangInstanceIdentifier path,
-                final RegistrationTreeNode<ActorRef> regNode, final DataTreeCandidateNode candNode) {
-            if (candNode.getModificationType() == ModificationType.UNMODIFIED) {
+        private void lookupAndCreateCanCommits(final YangInstanceIdentifier path, final Node<ActorRef> regNode,
+                final DataTreeCandidateNode candNode) {
+            if (candNode.modificationType() == ModificationType.UNMODIFIED) {
                 LOG.debug("Skipping unmodified candidate {}", path);
                 return;
             }
-            final Collection<ActorRef> regs = regNode.getRegistrations();
+            final var regs = regNode.getRegistrations();
             if (!regs.isEmpty()) {
                 createCanCommits(regs, path, candNode);
             }
 
-            for (final DataTreeCandidateNode candChild : candNode.getChildNodes()) {
-                if (candChild.getModificationType() != ModificationType.UNMODIFIED) {
-                    final RegistrationTreeNode<ActorRef> regChild =
-                            regNode.getExactChild(candChild.getIdentifier());
+            for (var candChild : candNode.childNodes()) {
+                if (candChild.modificationType() != ModificationType.UNMODIFIED) {
+                    final var regChild = regNode.getExactChild(candChild.name());
                     if (regChild != null) {
-                        lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), regChild, candChild);
+                        lookupAndCreateCanCommits(path.node(candChild.name()), regChild, candChild);
                     }
 
-                    for (final RegistrationTreeNode<ActorRef> rc : regNode
-                            .getInexactChildren(candChild.getIdentifier())) {
-                        lookupAndCreateCanCommits(path.node(candChild.getIdentifier()), rc, candChild);
+                    for (var rc : regNode.getInexactChildren(candChild.name())) {
+                        lookupAndCreateCanCommits(path.node(candChild.name()), rc, candChild);
                     }
                 }
             }
@@ -202,11 +187,11 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         }
 
         private static DOMDataTreeIdentifier treeIdentifier(final YangInstanceIdentifier path) {
-            return new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, path);
+            return DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, path);
         }
 
-        List<DataTreeCohortActor.CanCommit> perform(final RegistrationTreeNode<ActorRef> rootNode) {
-            final List<PathArgument> toLookup = candidate.getRootPath().getPathArguments();
+        List<DataTreeCohortActor.CanCommit> perform(final Node<ActorRef> rootNode) {
+            final var toLookup = candidate.getRootPath().getPathArguments();
             lookupAndCreateCanCommits(toLookup, 0, rootNode);
 
             final Map<ActorRef, Collection<DOMDataTreeCandidate>> mapView = actorToCandidates.asMap();
@@ -219,7 +204,7 @@ class DataTreeCohortActorRegistry extends AbstractRegistrationTree<ActorRef> {
         }
     }
 
-    CompositeDataTreeCohort createCohort(final SchemaContext schemaContext, final TransactionIdentifier txId,
+    CompositeDataTreeCohort createCohort(final EffectiveModelContext schemaContext, final TransactionIdentifier txId,
             final Executor callbackExecutor, final Timeout commitStepTimeout) {
         return new CompositeDataTreeCohort(this, txId, schemaContext, callbackExecutor, commitStepTimeout);
     }
index 45b0f76484931ba093b4156f4cec107ec1434840..4e3c6cb8d77bc0043dc883b29e0467bc120e16c2 100644 (file)
@@ -14,11 +14,10 @@ import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
 import java.util.concurrent.TimeUnit;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
 import org.slf4j.Logger;
@@ -26,11 +25,10 @@ import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C>
-        implements DOMDataTreeCommitCohortRegistration<C> {
-
+public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort> extends AbstractObjectRegistration<C> {
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCohortRegistrationProxy.class);
     private static final Timeout TIMEOUT = new Timeout(new FiniteDuration(5, TimeUnit.SECONDS));
+
     private final DOMDataTreeIdentifier subtree;
     private final ActorRef actor;
     private final ActorUtils actorUtils;
@@ -42,8 +40,8 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         super(cohort);
         this.subtree = requireNonNull(subtree);
         this.actorUtils = requireNonNull(actorUtils);
-        this.actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
-                subtree.getRootIdentifier()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
+        actor = actorUtils.getActorSystem().actorOf(DataTreeCohortActor.props(getInstance(),
+                subtree.path()).withDispatcher(actorUtils.getNotificationDispatcherPath()));
     }
 
     public void init(final String shardName) {
@@ -72,7 +70,7 @@ public class DataTreeCohortRegistrationProxy<C extends DOMDataTreeCommitCohort>
         cohortRegistry = shard;
         Future<Object> future =
                 Patterns.ask(shard, new DataTreeCohortActorRegistry.RegisterCohort(subtree, actor), TIMEOUT);
-        future.onComplete(new OnComplete<Object>() {
+        future.onComplete(new OnComplete<>() {
 
             @Override
             public void onComplete(final Throwable failure, final Object val) {
@@ -5,10 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.datastore;
 
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBean;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 
 /**
@@ -16,16 +16,16 @@ import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
  *
  * @author Thomas Pantelis
  */
-public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
+final class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements DatastoreConfigurationMXBean {
     public static final String JMX_CATEGORY_CONFIGURATION = "Configuration";
 
     private DatastoreContext context;
 
-    public DatastoreConfigurationMXBeanImpl(String mxBeanType) {
+    DatastoreConfigurationMXBeanImpl(final String mxBeanType) {
         super("Datastore", mxBeanType, JMX_CATEGORY_CONFIGURATION);
     }
 
-    public void setContext(DatastoreContext context) {
+    public void setContext(final DatastoreContext context) {
         this.context = context;
     }
 
@@ -64,6 +64,11 @@ public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements
         return context.getShardRaftConfig().getSnapshotDataThresholdPercentage();
     }
 
+    @Override
+    public int getShardSnapshotDataThreshold() {
+        return context.getShardRaftConfig().getSnapshotDataThreshold();
+    }
+
     @Override
     public long getShardSnapshotBatchCount() {
         return context.getShardRaftConfig().getSnapshotBatchCount();
@@ -109,26 +114,6 @@ public class DatastoreConfigurationMXBeanImpl extends AbstractMXBean implements
         return context.isTransactionDebugContextEnabled();
     }
 
-    @Override
-    public int getMaxShardDataChangeExecutorPoolSize() {
-        return context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
-    }
-
-    @Override
-    public int getMaxShardDataChangeExecutorQueueSize() {
-        return context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
-    }
-
-    @Override
-    public int getMaxShardDataChangeListenerQueueSize() {
-        return context.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
-    }
-
-    @Override
-    public int getMaxShardDataStoreExecutorQueueSize() {
-        return context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
-    }
-
     @Override
     public int getMaximumMessageSliceSize() {
         return context.getMaximumMessageSliceSize();
index 1a46e3d4525f10f4bc616153e1f74733eeb81192..24b37751272b68741ab9e296b835244e6b337944 100644 (file)
@@ -7,9 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
@@ -22,7 +24,7 @@ import org.opendaylight.controller.cluster.raft.ConfigParams;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.PeerAddressResolver;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,15 +45,19 @@ public class DatastoreContext implements ClientActorConfig {
     public static final int DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS = 30;
     public static final int DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE = 1;
     public static final int DEFAULT_SNAPSHOT_BATCH_COUNT = 20000;
+    public static final int DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS = 0;
     public static final int DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS = 500;
     public static final int DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS =
             DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS * 10;
     public static final int DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY = 50000;
     public static final Timeout DEFAULT_SHARD_INITIALIZATION_TIMEOUT = new Timeout(5, TimeUnit.MINUTES);
     public static final Timeout DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT = new Timeout(30, TimeUnit.SECONDS);
+    public static final int DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER = 3;
     public static final boolean DEFAULT_PERSISTENT = true;
+    public static final boolean DEFAULT_SNAPSHOT_ON_ROOT_OVERWRITE = false;
     public static final FileAkkaConfigurationReader DEFAULT_CONFIGURATION_READER = new FileAkkaConfigurationReader();
     public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE = 12;
+    public static final int DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD = 0;
     public static final int DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR = 2;
     public static final int DEFAULT_SHARD_CANDIDATE_ELECTION_TIMEOUT_DIVISOR = 1;
     public static final int DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT = 100;
@@ -59,8 +65,10 @@ public class DatastoreContext implements ClientActorConfig {
     public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT = 1000;
     public static final long DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS =
             TimeUnit.MILLISECONDS.convert(2, TimeUnit.MINUTES);
-    public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 2048 * 1000; // 2MB
+    public static final int DEFAULT_MAX_MESSAGE_SLICE_SIZE = 480 * 1024; // 480KiB
     public static final int DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY = 512;
+    public static final ExportOnRecovery DEFAULT_EXPORT_ON_RECOVERY = ExportOnRecovery.Off;
+    public static final String DEFAULT_RECOVERY_EXPORT_BASE_DIR = "persistence-export";
 
     public static final long DEFAULT_SYNC_INDEX_THRESHOLD = 10;
 
@@ -70,7 +78,6 @@ public class DatastoreContext implements ClientActorConfig {
 
     private final DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
 
-    private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
     private FiniteDuration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
     private long operationTimeoutInMillis = DEFAULT_OPERATION_TIMEOUT_IN_MS;
     private String dataStoreMXBeanType;
@@ -78,16 +85,17 @@ public class DatastoreContext implements ClientActorConfig {
     private int shardTransactionCommitQueueCapacity = DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
     private Timeout shardInitializationTimeout = DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
     private Timeout shardLeaderElectionTimeout = DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+    private int initialSettleTimeoutMultiplier = DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER;
     private boolean persistent = DEFAULT_PERSISTENT;
+    private boolean snapshotOnRootOverwrite = DEFAULT_SNAPSHOT_ON_ROOT_OVERWRITE;
     private AkkaConfigurationReader configurationReader = DEFAULT_CONFIGURATION_READER;
     private long transactionCreationInitialRateLimit = DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT;
     private String dataStoreName = UNKNOWN_DATA_STORE_TYPE;
     private LogicalDatastoreType logicalStoreType = LogicalDatastoreType.OPERATIONAL;
-    private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.EMPTY;
+    private YangInstanceIdentifier storeRoot = YangInstanceIdentifier.of();
     private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
     private boolean writeOnlyTransactionOptimizationsEnabled = true;
     private long shardCommitQueueExpiryTimeoutInMillis = DEFAULT_SHARD_COMMIT_QUEUE_EXPIRY_TIMEOUT_IN_MS;
-    private boolean useTellBasedProtocol = false;
     private boolean transactionDebugContextEnabled = false;
     private String shardManagerPersistenceId;
     private int maximumMessageSliceSize = DEFAULT_MAX_MESSAGE_SLICE_SIZE;
@@ -95,6 +103,9 @@ public class DatastoreContext implements ClientActorConfig {
     private long requestTimeout = AbstractClientConnection.DEFAULT_REQUEST_TIMEOUT_NANOS;
     private long noProgressTimeout = AbstractClientConnection.DEFAULT_NO_PROGRESS_TIMEOUT_NANOS;
     private int initialPayloadSerializedBufferCapacity = DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY;
+    private boolean useLz4Compression = false;
+    private ExportOnRecovery exportOnRecovery = DEFAULT_EXPORT_ON_RECOVERY;
+    private String recoveryExportBaseDir = DEFAULT_RECOVERY_EXPORT_BASE_DIR;
 
     public static Set<String> getGlobalDatastoreNames() {
         return GLOBAL_DATASTORE_NAMES;
@@ -103,9 +114,11 @@ public class DatastoreContext implements ClientActorConfig {
     DatastoreContext() {
         setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
         setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
+        setRecoverySnapshotIntervalSeconds(DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS);
         setHeartbeatInterval(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS);
         setIsolatedLeaderCheckInterval(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS);
         setSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE);
+        setSnapshotDataThreshold(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD);
         setElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR);
         setCandidateElectionTimeoutDivisor(DEFAULT_SHARD_CANDIDATE_ELECTION_TIMEOUT_DIVISOR);
         setSyncIndexThreshold(DEFAULT_SYNC_INDEX_THRESHOLD);
@@ -113,41 +126,45 @@ public class DatastoreContext implements ClientActorConfig {
     }
 
     private DatastoreContext(final DatastoreContext other) {
-        this.dataStoreProperties = other.dataStoreProperties;
-        this.shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
-        this.operationTimeoutInMillis = other.operationTimeoutInMillis;
-        this.dataStoreMXBeanType = other.dataStoreMXBeanType;
-        this.shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
-        this.shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
-        this.shardInitializationTimeout = other.shardInitializationTimeout;
-        this.shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
-        this.persistent = other.persistent;
-        this.configurationReader = other.configurationReader;
-        this.transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
-        this.dataStoreName = other.dataStoreName;
-        this.logicalStoreType = other.logicalStoreType;
-        this.storeRoot = other.storeRoot;
-        this.shardBatchedModificationCount = other.shardBatchedModificationCount;
-        this.writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
-        this.shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
-        this.transactionDebugContextEnabled = other.transactionDebugContextEnabled;
-        this.shardManagerPersistenceId = other.shardManagerPersistenceId;
-        this.useTellBasedProtocol = other.useTellBasedProtocol;
-        this.backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
-        this.requestTimeout = other.requestTimeout;
-        this.noProgressTimeout = other.noProgressTimeout;
-        this.initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
+        shardTransactionIdleTimeout = other.shardTransactionIdleTimeout;
+        operationTimeoutInMillis = other.operationTimeoutInMillis;
+        dataStoreMXBeanType = other.dataStoreMXBeanType;
+        shardTransactionCommitTimeoutInSeconds = other.shardTransactionCommitTimeoutInSeconds;
+        shardTransactionCommitQueueCapacity = other.shardTransactionCommitQueueCapacity;
+        shardInitializationTimeout = other.shardInitializationTimeout;
+        shardLeaderElectionTimeout = other.shardLeaderElectionTimeout;
+        initialSettleTimeoutMultiplier = other.initialSettleTimeoutMultiplier;
+        persistent = other.persistent;
+        snapshotOnRootOverwrite = other.snapshotOnRootOverwrite;
+        configurationReader = other.configurationReader;
+        transactionCreationInitialRateLimit = other.transactionCreationInitialRateLimit;
+        dataStoreName = other.dataStoreName;
+        logicalStoreType = other.logicalStoreType;
+        storeRoot = other.storeRoot;
+        shardBatchedModificationCount = other.shardBatchedModificationCount;
+        writeOnlyTransactionOptimizationsEnabled = other.writeOnlyTransactionOptimizationsEnabled;
+        shardCommitQueueExpiryTimeoutInMillis = other.shardCommitQueueExpiryTimeoutInMillis;
+        transactionDebugContextEnabled = other.transactionDebugContextEnabled;
+        shardManagerPersistenceId = other.shardManagerPersistenceId;
+        backendAlivenessTimerInterval = other.backendAlivenessTimerInterval;
+        requestTimeout = other.requestTimeout;
+        noProgressTimeout = other.noProgressTimeout;
+        initialPayloadSerializedBufferCapacity = other.initialPayloadSerializedBufferCapacity;
+        useLz4Compression = other.useLz4Compression;
+        exportOnRecovery = other.exportOnRecovery;
+        recoveryExportBaseDir = other.recoveryExportBaseDir;
 
         setShardJournalRecoveryLogBatchSize(other.raftConfig.getJournalRecoveryLogBatchSize());
         setSnapshotBatchCount(other.raftConfig.getSnapshotBatchCount());
+        setRecoverySnapshotIntervalSeconds(other.raftConfig.getRecoverySnapshotIntervalSeconds());
         setHeartbeatInterval(other.raftConfig.getHeartBeatInterval().toMillis());
         setIsolatedLeaderCheckInterval(other.raftConfig.getIsolatedCheckIntervalInMillis());
         setSnapshotDataThresholdPercentage(other.raftConfig.getSnapshotDataThresholdPercentage());
+        setSnapshotDataThreshold(other.raftConfig.getSnapshotDataThreshold());
         setElectionTimeoutFactor(other.raftConfig.getElectionTimeoutFactor());
         setCandidateElectionTimeoutDivisor(other.raftConfig.getCandidateElectionTimeoutDivisor());
         setCustomRaftPolicyImplementation(other.raftConfig.getCustomRaftPolicyImplementationClass());
         setMaximumMessageSliceSize(other.getMaximumMessageSliceSize());
-        setShardSnapshotChunkSize(other.raftConfig.getSnapshotChunkSize());
         setPeerAddressResolver(other.raftConfig.getPeerAddressResolver());
         setTempFileDirectory(other.getTempFileDirectory());
         setFileBackedStreamingThreshold(other.getFileBackedStreamingThreshold());
@@ -162,10 +179,6 @@ public class DatastoreContext implements ClientActorConfig {
         return new Builder(new DatastoreContext(context));
     }
 
-    public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
-        return dataStoreProperties;
-    }
-
     public FiniteDuration getShardTransactionIdleTimeout() {
         return shardTransactionIdleTimeout;
     }
@@ -198,10 +211,24 @@ public class DatastoreContext implements ClientActorConfig {
         return shardLeaderElectionTimeout;
     }
 
+    /**
+     * Return the multiplier of {@link #getShardLeaderElectionTimeout()} which the frontend will wait for all shards
+     * on the local node to settle.
+     *
+     * @return Non-negative multiplier. Value of {@code 0} indicates to wait indefinitely.
+     */
+    public int getInitialSettleTimeoutMultiplier() {
+        return initialSettleTimeoutMultiplier;
+    }
+
     public boolean isPersistent() {
         return persistent;
     }
 
+    public boolean isSnapshotOnRootOverwrite() {
+        return snapshotOnRootOverwrite;
+    }
+
     public AkkaConfigurationReader getConfigurationReader() {
         return configurationReader;
     }
@@ -280,26 +307,29 @@ public class DatastoreContext implements ClientActorConfig {
     }
 
     private void setSnapshotDataThresholdPercentage(final int shardSnapshotDataThresholdPercentage) {
-        Preconditions.checkArgument(shardSnapshotDataThresholdPercentage >= 0
-                && shardSnapshotDataThresholdPercentage <= 100);
+        checkArgument(shardSnapshotDataThresholdPercentage >= 0 && shardSnapshotDataThresholdPercentage <= 100);
         raftConfig.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
     }
 
+    private void setSnapshotDataThreshold(final int shardSnapshotDataThreshold) {
+        checkArgument(shardSnapshotDataThreshold >= 0);
+        raftConfig.setSnapshotDataThreshold(shardSnapshotDataThreshold);
+    }
+
     private void setSnapshotBatchCount(final long shardSnapshotBatchCount) {
         raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
     }
 
-    @Deprecated
-    private void setShardSnapshotChunkSize(final int shardSnapshotChunkSize) {
-        // We'll honor the shardSnapshotChunkSize setting for backwards compatibility but only if it doesn't exceed
-        // maximumMessageSliceSize.
-        if (shardSnapshotChunkSize < maximumMessageSliceSize) {
-            raftConfig.setSnapshotChunkSize(shardSnapshotChunkSize);
-        }
+    /**
+     * Set the interval in seconds after which a snapshot should be taken during the recovery process.
+     * 0 means don't take snapshots
+     */
+    private void setRecoverySnapshotIntervalSeconds(final int recoverySnapshotInterval) {
+        raftConfig.setRecoverySnapshotIntervalSeconds(recoverySnapshotInterval);
     }
 
     private void setMaximumMessageSliceSize(final int maximumMessageSliceSize) {
-        raftConfig.setSnapshotChunkSize(maximumMessageSliceSize);
+        raftConfig.setMaximumMessageSliceSize(maximumMessageSliceSize);
         this.maximumMessageSliceSize = maximumMessageSliceSize;
     }
 
@@ -323,8 +353,16 @@ public class DatastoreContext implements ClientActorConfig {
         return transactionDebugContextEnabled;
     }
 
-    public boolean isUseTellBasedProtocol() {
-        return useTellBasedProtocol;
+    public boolean isUseLz4Compression() {
+        return useLz4Compression;
+    }
+
+    public ExportOnRecovery getExportOnRecovery() {
+        return exportOnRecovery;
+    }
+
+    public String getRecoveryExportBaseDir() {
+        return recoveryExportBaseDir;
     }
 
     @Override
@@ -351,30 +389,11 @@ public class DatastoreContext implements ClientActorConfig {
         return initialPayloadSerializedBufferCapacity;
     }
 
-    public static class Builder implements org.opendaylight.yangtools.concepts.Builder<DatastoreContext> {
+    public static class Builder {
         private final DatastoreContext datastoreContext;
-        private int maxShardDataChangeExecutorPoolSize =
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE;
-        private int maxShardDataChangeExecutorQueueSize =
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE;
-        private int maxShardDataChangeListenerQueueSize =
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE;
-        private int maxShardDataStoreExecutorQueueSize =
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE;
 
         Builder(final DatastoreContext datastoreContext) {
             this.datastoreContext = datastoreContext;
-
-            if (datastoreContext.getDataStoreProperties() != null) {
-                maxShardDataChangeExecutorPoolSize =
-                        datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize();
-                maxShardDataChangeExecutorQueueSize =
-                        datastoreContext.getDataStoreProperties().getMaxDataChangeExecutorQueueSize();
-                maxShardDataChangeListenerQueueSize =
-                        datastoreContext.getDataStoreProperties().getMaxDataChangeListenerQueueSize();
-                maxShardDataStoreExecutorQueueSize =
-                        datastoreContext.getDataStoreProperties().getMaxDataStoreExecutorQueueSize();
-            }
         }
 
         public Builder boundedMailboxCapacity(final int boundedMailboxCapacity) {
@@ -427,11 +446,22 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
+        public Builder recoverySnapshotIntervalSeconds(final int recoverySnapshotIntervalSeconds) {
+            checkArgument(recoverySnapshotIntervalSeconds >= 0);
+            datastoreContext.setRecoverySnapshotIntervalSeconds(recoverySnapshotIntervalSeconds);
+            return this;
+        }
+
         public Builder shardSnapshotDataThresholdPercentage(final int shardSnapshotDataThresholdPercentage) {
             datastoreContext.setSnapshotDataThresholdPercentage(shardSnapshotDataThresholdPercentage);
             return this;
         }
 
+        public Builder shardSnapshotDataThreshold(final int shardSnapshotDataThreshold) {
+            datastoreContext.setSnapshotDataThreshold(shardSnapshotDataThreshold);
+            return this;
+        }
+
         public Builder shardHeartbeatIntervalInMillis(final int shardHeartbeatIntervalInMillis) {
             datastoreContext.setHeartbeatInterval(shardHeartbeatIntervalInMillis);
             return this;
@@ -456,6 +486,12 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
+        public Builder initialSettleTimeoutMultiplier(final int multiplier) {
+            checkArgument(multiplier >= 0);
+            datastoreContext.initialSettleTimeoutMultiplier = multiplier;
+            return this;
+        }
+
         public Builder shardLeaderElectionTimeoutInSeconds(final long timeout) {
             return shardLeaderElectionTimeout(timeout, TimeUnit.SECONDS);
         }
@@ -470,6 +506,11 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
+        public Builder snapshotOnRootOverwrite(final boolean snapshotOnRootOverwrite) {
+            datastoreContext.snapshotOnRootOverwrite = snapshotOnRootOverwrite;
+            return this;
+        }
+
         public Builder shardIsolatedLeaderCheckIntervalInMillis(final int shardIsolatedLeaderCheckIntervalInMillis) {
             datastoreContext.setIsolatedLeaderCheckInterval(shardIsolatedLeaderCheckIntervalInMillis);
             return this;
@@ -491,7 +532,7 @@ public class DatastoreContext implements ClientActorConfig {
         }
 
         public Builder logicalStoreType(final LogicalDatastoreType logicalStoreType) {
-            datastoreContext.logicalStoreType = Preconditions.checkNotNull(logicalStoreType);
+            datastoreContext.logicalStoreType = requireNonNull(logicalStoreType);
 
             // Retain compatible naming
             switch (logicalStoreType) {
@@ -514,7 +555,7 @@ public class DatastoreContext implements ClientActorConfig {
         }
 
         public Builder dataStoreName(final String dataStoreName) {
-            datastoreContext.dataStoreName = Preconditions.checkNotNull(dataStoreName);
+            datastoreContext.dataStoreName = requireNonNull(dataStoreName);
             datastoreContext.dataStoreMXBeanType = "Distributed" + WordUtils.capitalize(dataStoreName) + "Datastore";
             return this;
         }
@@ -545,28 +586,18 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        public Builder maxShardDataChangeExecutorPoolSize(final int newMaxShardDataChangeExecutorPoolSize) {
-            this.maxShardDataChangeExecutorPoolSize = newMaxShardDataChangeExecutorPoolSize;
+        public Builder useLz4Compression(final boolean value) {
+            datastoreContext.useLz4Compression = value;
             return this;
         }
 
-        public Builder maxShardDataChangeExecutorQueueSize(final int newMaxShardDataChangeExecutorQueueSize) {
-            this.maxShardDataChangeExecutorQueueSize = newMaxShardDataChangeExecutorQueueSize;
+        public Builder exportOnRecovery(final ExportOnRecovery value) {
+            datastoreContext.exportOnRecovery = value;
             return this;
         }
 
-        public Builder maxShardDataChangeListenerQueueSize(final int newMaxShardDataChangeListenerQueueSize) {
-            this.maxShardDataChangeListenerQueueSize = newMaxShardDataChangeListenerQueueSize;
-            return this;
-        }
-
-        public Builder maxShardDataStoreExecutorQueueSize(final int newMaxShardDataStoreExecutorQueueSize) {
-            this.maxShardDataStoreExecutorQueueSize = newMaxShardDataStoreExecutorQueueSize;
-            return this;
-        }
-
-        public Builder useTellBasedProtocol(final boolean value) {
-            datastoreContext.useTellBasedProtocol = value;
+        public Builder recoveryExportBaseDir(final String value) {
+            datastoreContext.recoveryExportBaseDir = value;
             return this;
         }
 
@@ -584,14 +615,6 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        @Deprecated
-        public Builder shardSnapshotChunkSize(final int shardSnapshotChunkSize) {
-            LOG.warn("The shard-snapshot-chunk-size configuration parameter is deprecated - "
-                    + "use maximum-message-slice-size instead");
-            datastoreContext.setShardSnapshotChunkSize(shardSnapshotChunkSize);
-            return this;
-        }
-
         public Builder maximumMessageSliceSize(final int maximumMessageSliceSize) {
             datastoreContext.setMaximumMessageSliceSize(maximumMessageSliceSize);
             return this;
@@ -637,15 +660,7 @@ public class DatastoreContext implements ClientActorConfig {
             return this;
         }
 
-        @Override
         public DatastoreContext build() {
-            datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.builder()
-                    .maxDataChangeExecutorPoolSize(maxShardDataChangeExecutorPoolSize)
-                    .maxDataChangeExecutorQueueSize(maxShardDataChangeExecutorQueueSize)
-                    .maxDataChangeListenerQueueSize(maxShardDataChangeListenerQueueSize)
-                    .maxDataStoreExecutorQueueSize(maxShardDataStoreExecutorQueueSize)
-                    .build();
-
             if (datastoreContext.dataStoreName != null) {
                 GLOBAL_DATASTORE_NAMES.add(datastoreContext.dataStoreName);
             }
index 579e096af77572393ad1e1f0d07439cd16b60210..ac50ff30a26e961f81517ac365441e578d1c51e0 100644 (file)
@@ -7,16 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.primitives.Primitives;
-import java.beans.BeanInfo;
-import java.beans.ConstructorProperties;
-import java.beans.IntrospectionException;
-import java.beans.Introspector;
-import java.beans.MethodDescriptor;
-import java.beans.PropertyDescriptor;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
@@ -25,20 +20,22 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Locale;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import javax.annotation.concurrent.GuardedBy;
+import java.util.function.Function;
+import javax.management.ConstructorParameters;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.text.WordUtils;
+import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStoreProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.opendaylight.yangtools.yang.common.Uint64;
+import org.opendaylight.yangtools.yang.common.Uint8;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,12 +55,20 @@ public class DatastoreContextIntrospector {
 
     private static final Map<String, Method> BUILDER_SETTERS = new HashMap<>();
 
+    private static final ImmutableMap<Class<?>, Function<String, Object>> UINT_FACTORIES =
+            ImmutableMap.<Class<?>, Function<String, Object>>builder()
+            .put(Uint8.class, Uint8::valueOf)
+            .put(Uint16.class, Uint16::valueOf)
+            .put(Uint32.class, Uint32::valueOf)
+            .put(Uint64.class, Uint64::valueOf)
+            .build();
+
     static {
         try {
             introspectDatastoreContextBuilder();
             introspectDataStoreProperties();
             introspectPrimitiveTypes();
-        } catch (final IntrospectionException e) {
+        } catch (final IllegalArgumentException e) {
             LOG.error("Error initializing DatastoreContextIntrospector", e);
         }
     }
@@ -78,12 +83,12 @@ public class DatastoreContextIntrospector {
     private static void introspectPrimitiveTypes() {
         final Set<Class<?>> primitives = ImmutableSet.<Class<?>>builder().addAll(
                 Primitives.allWrapperTypes()).add(String.class).build();
-        for (final Class<?> primitive: primitives) {
+        for (final Class<?> primitive : primitives) {
             try {
                 processPropertyType(primitive);
             } catch (final NoSuchMethodException e) {
                 // Ignore primitives that can't be constructed from a String, eg Character and Void.
-            } catch (SecurityException | IntrospectionException e) {
+            } catch (SecurityException | IllegalArgumentException e) {
                 LOG.error("Error introspect primitive type {}", primitive, e);
             }
         }
@@ -108,24 +113,26 @@ public class DatastoreContextIntrospector {
      * yang grouping. We use the bean Introspector to find the types of all the properties defined
      * in the interface (this is the type returned from the getter method). For each type, we find
      * the appropriate constructor that we will use.
+     *
+     * @throws IllegalArgumentException if failed to process yang-defined property
      */
-    private static void introspectDataStoreProperties() throws IntrospectionException {
-        final BeanInfo beanInfo = Introspector.getBeanInfo(DataStoreProperties.class);
-        for (final PropertyDescriptor desc: beanInfo.getPropertyDescriptors()) {
-            processDataStoreProperty(desc.getName(), desc.getPropertyType(), desc.getReadMethod());
+    private static void introspectDataStoreProperties() {
+        for (final Method method : DataStoreProperties.class.getDeclaredMethods()) {
+            final String propertyName = getPropertyName(method);
+            if (propertyName != null) {
+                processDataStoreProperty(propertyName, method.getReturnType(), method);
+            }
         }
+    }
 
-        // Getter methods that return Boolean and start with "is" instead of "get" aren't recognized as
-        // properties and thus aren't returned from getPropertyDescriptors. A getter starting with
-        // "is" is only supported if it returns primitive boolean. So we'll check for these via
-        // getMethodDescriptors.
-        for (final MethodDescriptor desc: beanInfo.getMethodDescriptors()) {
-            final String methodName = desc.getName();
-            if (Boolean.class.equals(desc.getMethod().getReturnType()) && methodName.startsWith("is")) {
-                final String propertyName = WordUtils.uncapitalize(methodName.substring(2));
-                processDataStoreProperty(propertyName, Boolean.class, desc.getMethod());
-            }
+    private static String getPropertyName(final Method method) {
+        final String methodName = method.getName();
+        if (Boolean.class.equals(method.getReturnType()) && methodName.startsWith("is")) {
+            return WordUtils.uncapitalize(methodName.substring(2));
+        } else if (methodName.startsWith("get")) {
+            return WordUtils.uncapitalize(methodName.substring(3));
         }
+        return null;
     }
 
     /**
@@ -134,9 +141,9 @@ public class DatastoreContextIntrospector {
     @SuppressWarnings("checkstyle:IllegalCatch")
     private static void processDataStoreProperty(final String name, final Class<?> propertyType,
             final Method readMethod) {
-        Preconditions.checkArgument(BUILDER_SETTERS.containsKey(name), String.format(
+        checkArgument(BUILDER_SETTERS.containsKey(name),
                 "DataStoreProperties property \"%s\" does not have corresponding setter in DatastoreContext.Builder",
-                name));
+                name);
         try {
             processPropertyType(propertyType);
             DATA_STORE_PROP_INFO.put(name, new SimpleImmutableEntry<>(propertyType, readMethod));
@@ -148,9 +155,11 @@ public class DatastoreContextIntrospector {
     /**
      * Finds the appropriate constructor for the specified type that we will use to construct
      * instances.
+     *
+     * @throws IllegalArgumentException if yang-defined type has no property, annotated by ConstructorParameters
      */
     private static void processPropertyType(final Class<?> propertyType)
-            throws NoSuchMethodException, SecurityException, IntrospectionException {
+            throws NoSuchMethodException, SecurityException {
         final Class<?> wrappedType = Primitives.wrap(propertyType);
         if (CONSTRUCTORS.containsKey(wrappedType)) {
             return;
@@ -165,11 +174,11 @@ public class DatastoreContextIntrospector {
             // This must be a yang-defined type. We need to find the constructor that takes a
             // primitive as the only argument. This will be used to construct instances to perform
             // validation (eg range checking). The yang-generated types have a couple single-argument
-            // constructors but the one we want has the bean ConstructorProperties annotation.
+            // constructors but the one we want has the ConstructorParameters annotation.
             for (final Constructor<?> ctor: propertyType.getConstructors()) {
-                final ConstructorProperties ctorPropsAnnotation = ctor.getAnnotation(ConstructorProperties.class);
-                if (ctor.getParameterCount() == 1 && ctorPropsAnnotation != null) {
-                    findYangTypeGetter(propertyType, ctorPropsAnnotation.value()[0]);
+                final ConstructorParameters ctorParAnnotation = ctor.getAnnotation(ConstructorParameters.class);
+                if (ctor.getParameterCount() == 1 && ctorParAnnotation != null) {
+                    findYangTypeGetter(propertyType, ctorParAnnotation.value()[0]);
                     CONSTRUCTORS.put(propertyType, ctor);
                     break;
                 }
@@ -179,17 +188,19 @@ public class DatastoreContextIntrospector {
 
     /**
      * Finds the getter method on a yang-generated type for the specified property name.
+     *
+     * @throws IllegalArgumentException if passed type has no passed property
      */
-    private static void findYangTypeGetter(final Class<?> type, final String propertyName)
-            throws IntrospectionException {
-        for (final PropertyDescriptor desc: Introspector.getBeanInfo(type).getPropertyDescriptors()) {
-            if (desc.getName().equals(propertyName)) {
-                YANG_TYPE_GETTERS.put(type, desc.getReadMethod());
+    private static void findYangTypeGetter(final Class<?> type, final String propertyName) {
+        for (Method method : type.getDeclaredMethods()) {
+            final String property = getPropertyName(method);
+            if (property != null && property.equals(propertyName)) {
+                YANG_TYPE_GETTERS.put(type, method);
                 return;
             }
         }
 
-        throw new IntrospectionException(String.format(
+        throw new IllegalArgumentException(String.format(
                 "Getter method for constructor property %s not found for YANG type %s",
                 propertyName, type));
     }
@@ -200,12 +211,7 @@ public class DatastoreContextIntrospector {
     private Map<String, Object> currentProperties;
 
     public DatastoreContextIntrospector(final DatastoreContext context,
-            final BindingNormalizedNodeSerializer bindingSerializer) {
-        final QName qname = BindingReflections.findQName(DataStorePropertiesContainer.class);
-        final DataStorePropertiesContainer defaultPropsContainer = (DataStorePropertiesContainer)
-                bindingSerializer.fromNormalizedNode(bindingSerializer.toYangInstanceIdentifier(
-                        InstanceIdentifier.builder(DataStorePropertiesContainer.class).build()),
-                ImmutableNodes.containerNode(qname)).getValue();
+            final DataStorePropertiesContainer defaultPropsContainer) {
 
         final Builder builder = DatastoreContext.newBuilderFrom(context);
         for (Entry<String, Entry<Class<?>, Method>> entry: DATA_STORE_PROP_INFO.entrySet()) {
@@ -332,8 +338,12 @@ public class DatastoreContextIntrospector {
 
             // Call the setter method on the Builder instance.
             final Method setter = BUILDER_SETTERS.get(key);
-            setter.invoke(builder, constructorValueRecursively(
-                    Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+            if (value.getClass().isEnum()) {
+                setter.invoke(builder, value);
+            } else {
+                setter.invoke(builder, constructorValueRecursively(
+                        Primitives.wrap(setter.getParameterTypes()[0]), value.toString()));
+            }
 
             return true;
         } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException
@@ -368,6 +378,17 @@ public class DatastoreContextIntrospector {
         LOG.debug("Type for property {}: {}, converting value {} ({})",
                 name, propertyType.getSimpleName(), from, from.getClass().getSimpleName());
 
+        if (propertyType.isEnum()) {
+            try {
+                final Method enumConstructor = propertyType.getDeclaredMethod("forName", String.class);
+                if (enumConstructor.getReturnType().equals(propertyType)) {
+                    return enumConstructor.invoke(null, from.toString().toLowerCase(Locale.ROOT));
+                }
+            } catch (NoSuchMethodException e) {
+                LOG.error("Error constructing value ({}) for enum {}", from, propertyType);
+            }
+        }
+
         // Recurse the chain of constructors depth-first to get the resulting value. Eg, if the
         // property type is the yang-generated NonZeroUint32Type, it's constructor takes a Long so
         // we have to first construct a Long instance from the input value.
@@ -392,13 +413,18 @@ public class DatastoreContextIntrospector {
         }
 
         final Constructor<?> ctor = CONSTRUCTORS.get(toType);
-
-        LOG.trace("Found {}", ctor);
-
         if (ctor == null) {
+            if (fromValue instanceof String) {
+                final Function<String, Object> factory = UINT_FACTORIES.get(toType);
+                if (factory != null) {
+                    return factory.apply((String) fromValue);
+                }
+            }
+
             throw new IllegalArgumentException(String.format("Constructor not found for type %s", toType));
         }
 
+        LOG.trace("Found {}", ctor);
         Object value = fromValue;
 
         // Once we find a constructor that takes the original type as an argument, we're done recursing.
index 1b7c0281a6f40646e9622690e77618a079ea3daf..1bc5e9dac461f6e39198c478826cc311bb31b8a1 100644 (file)
@@ -7,47 +7,25 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.annotations.VisibleForTesting;
-import javassist.ClassPool;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.binding.dom.codec.gen.impl.StreamWriterGenerator;
-import org.opendaylight.mdsal.binding.dom.codec.impl.BindingNormalizedNodeCodecRegistry;
-import org.opendaylight.mdsal.binding.generator.api.ClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.util.BindingRuntimeContext;
-import org.opendaylight.mdsal.binding.generator.util.JavassistUtils;
+import java.util.Map;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 
 /**
- * Factory for creating DatastoreContextIntrospector instances.
+ * Factory for creating {@link DatastoreContextIntrospector} instances.
  *
  * @author Thomas Pantelis
  */
-public class DatastoreContextIntrospectorFactory {
-    private final DOMSchemaService schemaService;
-    private final ClassLoadingStrategy classLoadingStrategy;
-
-    public DatastoreContextIntrospectorFactory(DOMSchemaService schemaService,
-            ClassLoadingStrategy classLoadingStrategy) {
-        this.schemaService = schemaService;
-        this.classLoadingStrategy = classLoadingStrategy;
-    }
-
-    public DatastoreContextIntrospector newInstance(LogicalDatastoreType datastoreType) {
-        return new DatastoreContextIntrospector(DatastoreContext.newBuilder()
-                .logicalStoreType(datastoreType).tempFileDirectory("./data").build(), newBindingSerializer());
-    }
-
-    @VisibleForTesting
-    DatastoreContextIntrospector newInstance(DatastoreContext context) {
-        return new DatastoreContextIntrospector(context, newBindingSerializer());
-    }
-
-    private BindingNormalizedNodeSerializer newBindingSerializer() {
-        BindingNormalizedNodeCodecRegistry codecRegistry = new BindingNormalizedNodeCodecRegistry(
-                StreamWriterGenerator.create(JavassistUtils.forClassPool(ClassPool.getDefault())));
-        codecRegistry.onBindingRuntimeContextUpdated(BindingRuntimeContext.create(classLoadingStrategy,
-                schemaService.getGlobalContext()));
-        return codecRegistry;
-    }
+@NonNullByDefault
+public interface DatastoreContextIntrospectorFactory {
+    /**
+     * Create a new {@link DatastoreContextIntrospector} initialized with specified properties.
+     *
+     * @param datastoreType Datastore type
+     * @param properties optional initial properties
+     * @return A new DatastoreContextIntrospector
+     */
+    DatastoreContextIntrospector newInstance(LogicalDatastoreType datastoreType,
+        @Nullable Map<String, Object> properties);
 }
@@ -5,8 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+package org.opendaylight.controller.cluster.datastore;
 
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBean;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 
@@ -15,18 +16,26 @@ import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
  *
  * @author Thomas Pantelis
  */
-public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
-
+final class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
     private final ActorUtils actorUtils;
 
-    public DatastoreInfoMXBeanImpl(String mxBeanType, ActorUtils actorUtils) {
+    DatastoreInfoMXBeanImpl(final String mxBeanType, final ActorUtils actorUtils) {
         super("GeneralRuntimeInfo", mxBeanType, null);
         this.actorUtils = actorUtils;
     }
 
-
     @Override
     public double getTransactionCreationRateLimit() {
         return actorUtils.getTxCreationLimit();
     }
+
+    @Override
+    public long getAskTimeoutExceptionCount() {
+        return actorUtils.getAskTimeoutExceptionCount();
+    }
+
+    @Override
+    public void resetAskTimeoutExceptionCount() {
+        actorUtils.resetAskTimeoutExceptionCount();
+    }
 }
index db9acaebcdd3e39316d0cc67abd456f68fc922ea..f6068dd539a0490bc3f74ae9a21ecc4e47bcd571 100644 (file)
@@ -7,19 +7,9 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.ObjectInputStream;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicReference;
+import com.google.common.annotations.Beta;
+import java.util.Optional;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshotList;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * This class looks for a previously saved data store backup file in a directory and, if found, de-serializes
@@ -27,72 +17,8 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public final class DatastoreSnapshotRestore {
-    private static final Logger LOG = LoggerFactory.getLogger(DatastoreSnapshotRestore.class);
+@Beta
+public interface DatastoreSnapshotRestore {
 
-    private static AtomicReference<DatastoreSnapshotRestore> instance = new AtomicReference<>();
-
-    private final String restoreDirectoryPath;
-    private final Map<String, DatastoreSnapshot> datastoreSnapshots = new ConcurrentHashMap<>();
-
-    public static DatastoreSnapshotRestore instance(final String restoreDirectoryPath) {
-        instance.compareAndSet(null, new DatastoreSnapshotRestore(restoreDirectoryPath));
-        return instance.get();
-    }
-
-    private DatastoreSnapshotRestore(final String restoreDirectoryPath) {
-        this.restoreDirectoryPath = Preconditions.checkNotNull(restoreDirectoryPath);
-    }
-
-    // synchronize this method so that, in case of concurrent access to getAndRemove(),
-    // no one ends up with partially initialized data
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private synchronized void initialize() {
-
-        File restoreDirectoryFile = new File(restoreDirectoryPath);
-
-        String[] files = restoreDirectoryFile.list();
-        if (files == null || files.length == 0) {
-            LOG.debug("Restore directory {} does not exist or is empty", restoreDirectoryFile);
-            return;
-        }
-
-        if (files.length > 1) {
-            LOG.error(
-                "Found {} files in clustered datastore restore directory {} - expected 1. No restore will be attempted",
-                files.length, restoreDirectoryFile);
-            return;
-        }
-
-        File restoreFile = new File(restoreDirectoryFile, files[0]);
-
-        LOG.info("Clustered datastore will be restored from file {}", restoreFile);
-
-        try (FileInputStream fis = new FileInputStream(restoreFile)) {
-            DatastoreSnapshotList snapshots = deserialize(fis);
-            LOG.debug("Deserialized {} snapshots", snapshots.size());
-
-            for (DatastoreSnapshot snapshot: snapshots) {
-                datastoreSnapshots.put(snapshot.getType(), snapshot);
-            }
-        } catch (ClassNotFoundException | IOException e) {
-            LOG.error("Error reading clustered datastore restore file {}", restoreFile, e);
-        } finally {
-            if (!restoreFile.delete()) {
-                LOG.error("Could not delete clustered datastore restore file {}", restoreFile);
-            }
-        }
-    }
-
-    private static DatastoreSnapshotList deserialize(final InputStream inputStream)
-            throws IOException, ClassNotFoundException {
-        try (ObjectInputStream ois = new ObjectInputStream(inputStream)) {
-            return (DatastoreSnapshotList) ois.readObject();
-        }
-    }
-
-    public DatastoreSnapshot getAndRemove(final String datastoreType) {
-        initialize();
-        return datastoreSnapshots.remove(datastoreType);
-    }
+    Optional<DatastoreSnapshot> getAndRemove(String datastoreType);
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index 9f9d169..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * An AbstractThreePhaseCommitCohort implementation used for debugging. If a failure occurs, the transaction
- * call site is printed.
- *
- * @author Thomas Pantelis
- */
-class DebugThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
-    private static final Logger LOG = LoggerFactory.getLogger(DebugThreePhaseCommitCohort.class);
-
-    private final AbstractThreePhaseCommitCohort<?> delegate;
-    private final Throwable debugContext;
-    private final TransactionIdentifier transactionId;
-
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_FINAL")
-    private Logger log = LOG;
-
-    DebugThreePhaseCommitCohort(final TransactionIdentifier transactionId,
-            final AbstractThreePhaseCommitCohort<?> delegate, final Throwable debugContext) {
-        this.delegate = Preconditions.checkNotNull(delegate);
-        this.debugContext = Preconditions.checkNotNull(debugContext);
-        this.transactionId = Preconditions.checkNotNull(transactionId);
-    }
-
-    private <V> ListenableFuture<V> addFutureCallback(final ListenableFuture<V> future) {
-        Futures.addCallback(future, new FutureCallback<V>() {
-            @Override
-            public void onSuccess(final V result) {
-                // no-op
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                log.warn("Transaction {} failed with error \"{}\" - was allocated in the following context",
-                        transactionId, failure, debugContext);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return future;
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return addFutureCallback(delegate.canCommit());
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return addFutureCallback(delegate.preCommit());
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return addFutureCallback(delegate.commit());
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate.abort();
-    }
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return ((AbstractThreePhaseCommitCohort)delegate).getCohortFutures();
-    }
-
-    @VisibleForTesting
-    void setLogger(final Logger logger) {
-        this.log = logger;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreContextIntrospectorFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreContextIntrospectorFactory.java
new file mode 100644 (file)
index 0000000..5f56457
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+
+@Beta
+public final class DefaultDatastoreContextIntrospectorFactory extends AbstractDatastoreContextIntrospectorFactory {
+    private final BindingNormalizedNodeSerializer serializer;
+
+    public DefaultDatastoreContextIntrospectorFactory(final BindingNormalizedNodeSerializer serializer) {
+        this.serializer = requireNonNull(serializer);
+    }
+
+    @Override
+    BindingNormalizedNodeSerializer serializer() {
+        return serializer;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreSnapshotRestore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DefaultDatastoreSnapshotRestore.java
new file mode 100644 (file)
index 0000000..5fbc7c1
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectInputStream;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshotList;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class looks for a previously saved data store backup file in a directory and, if found, de-serializes
+ * the DatastoreSnapshot instances. This class has a static singleton that is created on bundle activation.
+ *
+ * @author Thomas Pantelis
+ */
+@Beta
+@Component(immediate = true)
+public final class DefaultDatastoreSnapshotRestore implements DatastoreSnapshotRestore {
+    private static final Logger LOG = LoggerFactory.getLogger(DefaultDatastoreSnapshotRestore.class);
+
+    private final Map<String, DatastoreSnapshot> datastoreSnapshots = new ConcurrentHashMap<>();
+    private final String restoreDirectoryPath;
+
+    public DefaultDatastoreSnapshotRestore() {
+        this("./clustered-datastore-restore");
+    }
+
+    public DefaultDatastoreSnapshotRestore(final String restoreDirectoryPath) {
+        this.restoreDirectoryPath = requireNonNull(restoreDirectoryPath);
+    }
+
+    @Override
+    public Optional<DatastoreSnapshot> getAndRemove(final String datastoreType) {
+        return Optional.ofNullable(datastoreSnapshots.remove(datastoreType));
+    }
+
+    @Activate
+    @SuppressWarnings("checkstyle:IllegalCatch")
+    void activate() {
+        final File restoreDirectoryFile = new File(restoreDirectoryPath);
+        final String[] files = restoreDirectoryFile.list();
+        if (files == null || files.length == 0) {
+            LOG.debug("Restore directory {} does not exist or is empty", restoreDirectoryFile);
+            return;
+        }
+
+        if (files.length > 1) {
+            LOG.error(
+                "Found {} files in clustered datastore restore directory {} - expected 1. No restore will be attempted",
+                files.length, restoreDirectoryFile);
+            return;
+        }
+
+        final File restoreFile = new File(restoreDirectoryFile, files[0]);
+        LOG.info("Clustered datastore will be restored from file {}", restoreFile);
+
+        try (FileInputStream fis = new FileInputStream(restoreFile)) {
+            DatastoreSnapshotList snapshots = deserialize(fis);
+            LOG.debug("Deserialized {} snapshots", snapshots.size());
+
+            for (DatastoreSnapshot snapshot: snapshots) {
+                datastoreSnapshots.put(snapshot.getType(), snapshot);
+            }
+        } catch (ClassNotFoundException | IOException e) {
+            LOG.error("Error reading clustered datastore restore file {}", restoreFile, e);
+        } finally {
+            if (!restoreFile.delete()) {
+                LOG.error("Could not delete clustered datastore restore file {}", restoreFile);
+            }
+        }
+    }
+
+    private static DatastoreSnapshotList deserialize(final InputStream inputStream)
+            throws IOException, ClassNotFoundException {
+        try (ObjectInputStream ois = new ObjectInputStream(inputStream)) {
+            return (DatastoreSnapshotList) ois.readObject();
+        }
+    }
+}
index e17e3faae94fbc864814423542b87ccbb6e8e333..d7d90474f33ad244688e150a0a54d5835c9859c0 100644 (file)
@@ -7,32 +7,29 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Optional;
-import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
 import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Default implementation of ShardDataTreeChangeListenerPublisher that directly generates and publishes
- * notifications for DataTreeChangeListeners.
+ * notifications for DataTreeChangeListeners. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  */
-@NotThreadSafe
 final class DefaultShardDataTreeChangeListenerPublisher extends AbstractDOMStoreTreeChangePublisher
         implements ShardDataTreeChangeListenerPublisher {
     private static final Logger LOG = LoggerFactory.getLogger(DefaultShardDataTreeChangeListenerPublisher.class);
     private String logContext;
 
-    DefaultShardDataTreeChangeListenerPublisher(String logContext) {
+    DefaultShardDataTreeChangeListenerPublisher(final String logContext) {
         this.logContext = logContext;
     }
 
@@ -48,42 +45,38 @@ final class DefaultShardDataTreeChangeListenerPublisher extends AbstractDOMStore
     }
 
     @Override
-    protected void notifyListener(AbstractDOMDataTreeChangeListenerRegistration<?> registration,
-            Collection<DataTreeCandidate> changes) {
-        LOG.debug("{}: notifyListener: listener: {}", logContext, registration.getInstance());
-        registration.getInstance().onDataTreeChanged(changes);
+    protected void notifyListener(final Reg registration, final List<DataTreeCandidate> changes) {
+        final var listener = registration.listener();
+        LOG.debug("{}: notifyListener: listener: {}", logContext, listener);
+        listener.onDataTreeChanged(changes);
     }
 
     @Override
-    protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
+    protected void registrationRemoved(final Reg registration) {
         LOG.debug("Registration {} removed", registration);
     }
 
     @Override
-    public void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
-            Optional<DataTreeCandidate> initialState,
-            Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+    public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> initialState,
+            final Consumer<Registration> onRegistration) {
         registerTreeChangeListener(treeId, listener, onRegistration);
 
         if (initialState.isPresent()) {
-            notifySingleListener(treeId, listener, initialState.get(), logContext);
+            notifySingleListener(treeId, listener, initialState.orElseThrow(), logContext);
         } else {
             listener.onInitialData();
         }
     }
 
-    void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
-            Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+    void registerTreeChangeListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
+            final Consumer<Registration> onRegistration) {
         LOG.debug("{}: registerTreeChangeListener: path: {}, listener: {}", logContext, treeId, listener);
-
-        AbstractDOMDataTreeChangeListenerRegistration<DOMDataTreeChangeListener> registration =
-                super.registerTreeChangeListener(treeId, listener);
-
-        onRegistration.accept(registration);
+        onRegistration.accept(super.registerTreeChangeListener(treeId, listener));
     }
 
-    static void notifySingleListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
-            DataTreeCandidate state, String logContext) {
+    static void notifySingleListener(final YangInstanceIdentifier treeId, final DOMDataTreeChangeListener listener,
+            final DataTreeCandidate state, final String logContext) {
         LOG.debug("{}: notifySingleListener: path: {}, listener: {}", logContext, treeId, listener);
         DefaultShardDataTreeChangeListenerPublisher publisher =
                 new DefaultShardDataTreeChangeListenerPublisher(logContext);
index 02cfda36090dd35ee80e2cee8c46de525449d4c5..740aef92b8d38bf61ab8e59316f20397efd4ace0 100644 (file)
@@ -8,12 +8,11 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
-import java.util.EventListener;
-import javax.annotation.concurrent.GuardedBy;
+import org.checkerframework.checker.lock.qual.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 
-class DelayedDataTreeChangeListenerRegistration<L extends EventListener> implements ListenerRegistration<L> {
+class DelayedDataTreeChangeListenerRegistration implements Registration {
     private final RegisterDataTreeChangeListener registrationMessage;
     private final ActorRef registrationActor;
 
@@ -32,17 +31,6 @@ class DelayedDataTreeChangeListenerRegistration<L extends EventListener> impleme
         }
     }
 
-    @Override
-    public L getInstance() {
-        // ObjectRegistration annotates this method as @Nonnull but we could return null if the delegate is not set yet.
-        // In reality, we do not and should not ever call this method on DelayedDataTreeChangeListenerRegistration
-        // instances anyway but, since we have to provide an implementation to satisfy the interface, we throw
-        // UnsupportedOperationException to honor the API contract of not returning null and to avoid a FindBugs error
-        // for possibly returning null.
-        throw new UnsupportedOperationException(
-                "getInstance should not be called on this instance since it could be null");
-    }
-
     @Override
     public synchronized void close() {
         closed = true;
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
deleted file mode 100644 (file)
index 16198ff..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import com.google.common.annotations.VisibleForTesting;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Implements a distributed DOMStore using Akka Patterns.ask().
- */
-public class DistributedDataStore extends AbstractDataStore {
-
-    private final TransactionContextFactory txContextFactory;
-
-    public DistributedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
-            final Configuration configuration, final DatastoreContextFactory datastoreContextFactory,
-            final DatastoreSnapshot restoreFromSnapshot) {
-        super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
-        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
-    }
-
-    @VisibleForTesting
-    DistributedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier) {
-        super(actorUtils, identifier);
-        this.txContextFactory = new TransactionContextFactory(getActorUtils(), getIdentifier());
-    }
-
-
-    @Override
-    public DOMStoreTransactionChain createTransactionChain() {
-        return txContextFactory.createTransactionChain();
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        return new TransactionProxy(txContextFactory, TransactionType.READ_ONLY);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return new TransactionProxy(txContextFactory, TransactionType.WRITE_ONLY);
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return new TransactionProxy(txContextFactory, TransactionType.READ_WRITE);
-    }
-
-    @Override
-    public void close() {
-        txContextFactory.close();
-        super.close();
-    }
-}
index 0846590c1e1cd912320b2d5b13bc5399bf43f964..350b915b579cf724c181db200905da4fe3981f0d 100644 (file)
@@ -7,12 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import akka.actor.ActorSystem;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -34,46 +32,47 @@ public final class DistributedDataStoreFactory {
                 introspector, updater, null);
     }
 
+    // TODO: separate out settle wait so it is better controlled
     public static AbstractDataStore createInstance(final DOMSchemaService schemaService,
             final DatastoreContext initialDatastoreContext, final DatastoreSnapshotRestore datastoreSnapshotRestore,
             final ActorSystemProvider actorSystemProvider, final DatastoreContextIntrospector introspector,
             final DatastoreContextPropertiesUpdater updater, final Configuration orgConfig) {
 
+        final AbstractDataStore dataStore = createInstance(actorSystemProvider, initialDatastoreContext,
+            introspector, datastoreSnapshotRestore, orgConfig);
+
+        updater.setListener(dataStore);
+
+        schemaService.registerSchemaContextListener(dataStore::onModelContextUpdated);
+
+        dataStore.setCloseable(updater);
+        dataStore.waitTillReady();
+
+        return dataStore;
+    }
+
+    public static AbstractDataStore createInstance(final ActorSystemProvider actorSystemProvider,
+            final DatastoreContext initialDatastoreContext, final DatastoreContextIntrospector introspector,
+            final DatastoreSnapshotRestore datastoreSnapshotRestore, final Configuration orgConfig) {
+
         final String datastoreName = initialDatastoreContext.getDataStoreName();
         LOG.info("Create data store instance of type : {}", datastoreName);
 
-        final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
-        final DatastoreSnapshot restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName);
+        final var actorSystem = actorSystemProvider.getActorSystem();
+        final var restoreFromSnapshot = datastoreSnapshotRestore.getAndRemove(datastoreName).orElse(null);
 
-        Configuration config;
+        final Configuration config;
         if (orgConfig == null) {
             config = new ConfigurationImpl(DEFAULT_MODULE_SHARDS_PATH, DEFAULT_MODULES_PATH);
         } else {
             config = orgConfig;
         }
-        final ClusterWrapper clusterWrapper = new ClusterWrapperImpl(actorSystem);
-        final DatastoreContextFactory contextFactory = introspector.newContextFactory();
+        final var clusterWrapper = new ClusterWrapperImpl(actorSystem);
+        final var contextFactory = introspector.newContextFactory();
 
-        // This is the potentially-updated datastore context, distinct from the initial one
-        final DatastoreContext datastoreContext = contextFactory.getBaseDatastoreContext();
-
-        final AbstractDataStore dataStore;
-        if (datastoreContext.isUseTellBasedProtocol()) {
-            dataStore = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
-                restoreFromSnapshot);
-            LOG.info("Data store {} is using tell-based protocol", datastoreName);
-        } else {
-            dataStore = new DistributedDataStore(actorSystem, clusterWrapper, config, contextFactory,
-                restoreFromSnapshot);
-            LOG.info("Data store {} is using ask-based protocol", datastoreName);
-        }
-        updater.setListener(dataStore);
-
-        schemaService.registerSchemaContextListener(dataStore);
-
-        dataStore.setCloseable(updater);
-        dataStore.waitTillReady();
-
-        return dataStore;
+        final var ret = new ClientBackedDataStore(actorSystem, clusterWrapper, config, contextFactory,
+            restoreFromSnapshot);
+        LOG.info("Data store {} is using tell-based protocol", datastoreName);
+        return ret;
     }
 }
index 8d696c0448f68fbe0104251d0bf578f83ad1adc6..5f4e30978ef8952a4813ca6f17574af0abc7ccd6 100644 (file)
@@ -7,15 +7,23 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import com.google.common.annotations.Beta;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
- * The public interface exposed vi a DistributedDataStore via the OSGi registry.
+ * The public interface exposed by an AbstractDataStore via the OSGi registry.
  *
  * @author Thomas Pantelis
  */
 public interface DistributedDataStoreInterface extends DOMStore {
 
     ActorUtils getActorUtils();
+
+    @Beta
+    Registration registerProxyListener(YangInstanceIdentifier shardLookup, YangInstanceIdentifier insideShard,
+            DOMDataTreeChangeListener delegate);
 }
index 41427608e4e7507113113707c12b15ab39838ff3..dca9c0773e06f4ee02c52ace1e9616c346ecb04d 100644 (file)
@@ -7,14 +7,16 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
-import java.util.Collection;
+import java.util.List;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -27,25 +29,28 @@ final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListene
     private static final Logger LOG = LoggerFactory.getLogger(ForwardingDataTreeChangeListener.class);
 
     private final ActorSelection actor;
+    private final ActorRef sendingActor;
 
-    ForwardingDataTreeChangeListener(final ActorSelection actor) {
-        this.actor = Preconditions.checkNotNull(actor, "actor should not be null");
+    ForwardingDataTreeChangeListener(final ActorSelection actor, @Nullable final ActorRef sendingActor) {
+        this.actor = requireNonNull(actor, "actor should not be null");
+        this.sendingActor = sendingActor;
     }
 
     @Override
-    public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
         LOG.debug("Sending DataTreeChanged to {}", actor);
-        actor.tell(new DataTreeChanged(changes), ActorRef.noSender());
+        actor.tell(new DataTreeChanged(changes), sendingActor);
     }
 
     @Override
     public void onInitialData() {
         LOG.debug("Sending OnInitialData to {}", actor);
-        actor.tell(OnInitialData.INSTANCE, ActorRef.noSender());
+        actor.tell(OnInitialData.INSTANCE, sendingActor);
     }
 
     @Override
     public String toString() {
-        return "ForwardingDataTreeChangeListener [actor=" + actor + "]";
+        return "ForwardingDataTreeChangeListener [actor=" + actor
+            + ", sending actor=" + (sendingActor != null ? sendingActor : "NO_SENDER") + "]";
     }
 }
index 6270b380cb701bbcf7f513a8bfa9bacb2923ddf4..c89627800fc72ef8a275ca9b65cfdc19c08e1e85 100644 (file)
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import static com.google.common.base.Verify.verify;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import com.google.common.base.VerifyException;
 import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableList;
 import java.util.HashMap;
 import java.util.Map;
-import javax.annotation.Nonnull;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
-import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongRangeSet;
-import org.opendaylight.yangtools.concepts.Builder;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@NotThreadSafe
-final class FrontendClientMetadataBuilder implements Builder<FrontendClientMetadata>, Identifiable<ClientIdentifier> {
-    private static final Logger LOG = LoggerFactory.getLogger(FrontendClientMetadataBuilder.class);
+/**
+ * This class is NOT thread-safe.
+ */
+abstract sealed class FrontendClientMetadataBuilder {
+    static final class Disabled extends FrontendClientMetadataBuilder {
+        Disabled(final String shardName, final ClientIdentifier clientId) {
+            super(shardName, clientId);
+        }
 
-    private final Map<LocalHistoryIdentifier, FrontendHistoryMetadataBuilder> currentHistories = new HashMap<>();
-    private final UnsignedLongRangeSet purgedHistories;
-    private final LocalHistoryIdentifier standaloneId;
-    private final ClientIdentifier identifier;
-    private final String shardName;
+        @Override
+        FrontendClientMetadata build() {
+            return new FrontendClientMetadata(clientId(), ImmutableUnsignedLongSet.of(), ImmutableList.of());
+        }
 
-    FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier identifier) {
-        this.shardName = requireNonNull(shardName);
-        this.identifier = requireNonNull(identifier);
-        purgedHistories = UnsignedLongRangeSet.create();
+        @Override
+        void onHistoryCreated(final LocalHistoryIdentifier historyId) {
+            // No-op
+        }
 
-        // History for stand-alone transactions is always present
-        standaloneId = standaloneHistoryId();
-        currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
-    }
+        @Override
+        void onHistoryClosed(final LocalHistoryIdentifier historyId) {
+            // No-op
+        }
 
-    FrontendClientMetadataBuilder(final String shardName, final FrontendClientMetadata meta) {
-        this.shardName = requireNonNull(shardName);
-        this.identifier = meta.getIdentifier();
-        purgedHistories = UnsignedLongRangeSet.create(meta.getPurgedHistories());
+        @Override
+        void onHistoryPurged(final LocalHistoryIdentifier historyId) {
+            // No-op
+        }
 
-        for (FrontendHistoryMetadata h : meta.getCurrentHistories()) {
-            final FrontendHistoryMetadataBuilder b = new FrontendHistoryMetadataBuilder(identifier, h);
-            currentHistories.put(b.getIdentifier(), b);
+        @Override
+        void onTransactionAborted(final TransactionIdentifier txId) {
+            // No-op
         }
 
-        // Sanity check and recovery
-        standaloneId = standaloneHistoryId();
-        if (!currentHistories.containsKey(standaloneId)) {
-            LOG.warn("{}: Client {} recovered histories {} do not contain stand-alone history, attempting recovery",
-                shardName, identifier, currentHistories);
-            currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
+        @Override
+        void onTransactionCommitted(final TransactionIdentifier txId) {
+            // No-op
         }
-    }
 
-    private LocalHistoryIdentifier standaloneHistoryId() {
-        return new LocalHistoryIdentifier(identifier, 0);
-    }
+        @Override
+        void onTransactionPurged(final TransactionIdentifier txId) {
+            // No-op
+        }
 
-    @Override
-    public FrontendClientMetadata build() {
-        return new FrontendClientMetadata(identifier, purgedHistories.toImmutable(),
-            Collections2.transform(currentHistories.values(), FrontendHistoryMetadataBuilder::build));
-    }
+        @Override
+        void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+            // No-op
+        }
 
-    @Override
-    public ClientIdentifier getIdentifier() {
-        return identifier;
+        @Override
+        LeaderFrontendState toLeaderState(final Shard shard) {
+            return new LeaderFrontendState.Disabled(shard.persistenceId(), clientId(), shard.getDataStore());
+        }
     }
 
-    void onHistoryCreated(final LocalHistoryIdentifier historyId) {
-        final FrontendHistoryMetadataBuilder newMeta = new FrontendHistoryMetadataBuilder(historyId);
-        final FrontendHistoryMetadataBuilder oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
-        if (oldMeta != null) {
-            // This should not be happening, warn about it
-            LOG.warn("{}: Reused local history {}", shardName, historyId);
-        } else {
-            LOG.debug("{}: Created local history {}", shardName, historyId);
+    static final class Enabled extends FrontendClientMetadataBuilder {
+        private final Map<LocalHistoryIdentifier, FrontendHistoryMetadataBuilder> currentHistories = new HashMap<>();
+        private final MutableUnsignedLongSet purgedHistories;
+        private final LocalHistoryIdentifier standaloneId;
+
+        Enabled(final String shardName, final ClientIdentifier clientId) {
+            super(shardName, clientId);
+
+            purgedHistories = MutableUnsignedLongSet.of();
+
+            // History for stand-alone transactions is always present
+            standaloneId = standaloneHistoryId();
+            currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
         }
-    }
 
-    void onHistoryClosed(final LocalHistoryIdentifier historyId) {
-        final FrontendHistoryMetadataBuilder builder = currentHistories.get(historyId);
-        if (builder != null) {
-            builder.onHistoryClosed();
-            LOG.debug("{}: Closed history {}", shardName, historyId);
-        } else {
-            LOG.warn("{}: Closed unknown history {}, ignoring", shardName, historyId);
+        Enabled(final String shardName, final FrontendClientMetadata meta) {
+            super(shardName, meta.clientId());
+
+            purgedHistories = meta.getPurgedHistories().mutableCopy();
+            for (var historyMeta : meta.getCurrentHistories()) {
+                final var builder = new FrontendHistoryMetadataBuilder(clientId(), historyMeta);
+                currentHistories.put(builder.getIdentifier(), builder);
+            }
+
+            // Sanity check and recovery
+            standaloneId = standaloneHistoryId();
+            if (!currentHistories.containsKey(standaloneId)) {
+                LOG.warn("{}: Client {} recovered histories {} do not contain stand-alone history, attempting recovery",
+                    shardName, clientId(), currentHistories);
+                currentHistories.put(standaloneId, new FrontendHistoryMetadataBuilder(standaloneId));
+            }
+        }
+
+        @Override
+        FrontendClientMetadata build() {
+            return new FrontendClientMetadata(clientId(), purgedHistories.immutableCopy(),
+                Collections2.transform(currentHistories.values(), FrontendHistoryMetadataBuilder::build));
+        }
+
+        @Override
+        void onHistoryCreated(final LocalHistoryIdentifier historyId) {
+            final var newMeta = new FrontendHistoryMetadataBuilder(historyId);
+            final var oldMeta = currentHistories.putIfAbsent(historyId, newMeta);
+            if (oldMeta != null) {
+                // This should not be happening, warn about it
+                LOG.warn("{}: Reused local history {}", shardName(), historyId);
+            } else {
+                LOG.debug("{}: Created local history {}", shardName(), historyId);
+            }
+        }
+
+        @Override
+        void onHistoryClosed(final LocalHistoryIdentifier historyId) {
+            final var builder = currentHistories.get(historyId);
+            if (builder != null) {
+                builder.onHistoryClosed();
+                LOG.debug("{}: Closed history {}", shardName(), historyId);
+            } else {
+                LOG.warn("{}: Closed unknown history {}, ignoring", shardName(), historyId);
+            }
         }
-    }
 
-    void onHistoryPurged(final LocalHistoryIdentifier historyId) {
-        final FrontendHistoryMetadataBuilder history = currentHistories.remove(historyId);
-        final long historyBits = historyId.getHistoryId();
-        if (history == null) {
-            if (!purgedHistories.contains(historyBits)) {
+        @Override
+        void onHistoryPurged(final LocalHistoryIdentifier historyId) {
+            final var history = currentHistories.remove(historyId);
+            final long historyBits = historyId.getHistoryId();
+            if (history == null) {
+                if (!purgedHistories.contains(historyBits)) {
+                    purgedHistories.add(historyBits);
+                    LOG.warn("{}: Purging unknown history {}", shardName(), historyId);
+                } else {
+                    LOG.warn("{}: Duplicate purge of history {}", shardName(), historyId);
+                }
+            } else {
                 purgedHistories.add(historyBits);
-                LOG.warn("{}: Purging unknown history {}", shardName, historyId);
+                LOG.debug("{}: Purged history {}", shardName(), historyId);
+            }
+        }
+
+        @Override
+        void onTransactionAborted(final TransactionIdentifier txId) {
+            final var history = getHistory(txId);
+            if (history != null) {
+                history.onTransactionAborted(txId);
+                LOG.debug("{}: Aborted transaction {}", shardName(), txId);
             } else {
-                LOG.warn("{}: Duplicate purge of history {}", shardName, historyId);
+                LOG.warn("{}: Unknown history for aborted transaction {}, ignoring", shardName(), txId);
             }
-        } else {
-            purgedHistories.add(historyBits);
-            LOG.debug("{}: Purged history {}", shardName, historyId);
         }
-    }
 
-    void onTransactionAborted(final TransactionIdentifier txId) {
-        final FrontendHistoryMetadataBuilder history = getHistory(txId);
-        if (history != null) {
-            history.onTransactionAborted(txId);
-            LOG.debug("{}: Aborted transaction {}", shardName, txId);
-        } else {
-            LOG.warn("{}: Unknown history for aborted transaction {}, ignoring", shardName, txId);
+        @Override
+        void onTransactionCommitted(final TransactionIdentifier txId) {
+            final var history = getHistory(txId);
+            if (history != null) {
+                history.onTransactionCommitted(txId);
+                LOG.debug("{}: Committed transaction {}", shardName(), txId);
+            } else {
+                LOG.warn("{}: Unknown history for commited transaction {}, ignoring", shardName(), txId);
+            }
+        }
+
+        @Override
+        void onTransactionPurged(final TransactionIdentifier txId) {
+            final var history = getHistory(txId);
+            if (history != null) {
+                history.onTransactionPurged(txId);
+                LOG.debug("{}: Purged transaction {}", shardName(), txId);
+            } else {
+                LOG.warn("{}: Unknown history for purged transaction {}, ignoring", shardName(), txId);
+            }
         }
-    }
 
-    void onTransactionCommitted(final TransactionIdentifier txId) {
-        final FrontendHistoryMetadataBuilder history = getHistory(txId);
-        if (history != null) {
-            history.onTransactionCommitted(txId);
-            LOG.debug("{}: Committed transaction {}", shardName, txId);
-        } else {
-            LOG.warn("{}: Unknown history for commited transaction {}, ignoring", shardName, txId);
+        @Override
+        void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+            final FrontendHistoryMetadataBuilder history = getHistory(historyId);
+            if (history != null) {
+                history.onTransactionsSkipped(txIds);
+                LOG.debug("{}: History {} skipped transactions {}", shardName(), historyId, txIds);
+            } else {
+                LOG.warn("{}: Unknown history {} for skipped transactions, ignoring", shardName(), historyId);
+            }
         }
-    }
 
-    void onTransactionPurged(final TransactionIdentifier txId) {
-        final FrontendHistoryMetadataBuilder history = getHistory(txId);
-        if (history != null) {
-            history.onTransactionPurged(txId);
-            LOG.debug("{}: Purged transaction {}", shardName, txId);
-        } else {
-            LOG.warn("{}: Unknown history for purged transaction {}, ignoring", shardName, txId);
+        @Override
+        LeaderFrontendState toLeaderState(final Shard shard) {
+            // Note: we have to make sure to *copy* all current state and not leak any views, otherwise leader/follower
+            //       interactions would get intertwined leading to inconsistencies.
+            final var histories = new HashMap<LocalHistoryIdentifier, LocalFrontendHistory>();
+            for (var historyMetaBuilder : currentHistories.values()) {
+                final var historyId = historyMetaBuilder.getIdentifier();
+                if (historyId.getHistoryId() != 0) {
+                    final var state = historyMetaBuilder.toLeaderState(shard);
+                    if (state instanceof LocalFrontendHistory localState) {
+                        histories.put(historyId, localState);
+                    } else {
+                        throw new VerifyException("Unexpected state " + state);
+                    }
+                }
+            }
+
+            final AbstractFrontendHistory singleHistory;
+            final var singleHistoryMeta = currentHistories.get(new LocalHistoryIdentifier(clientId(), 0));
+            if (singleHistoryMeta == null) {
+                final var tree = shard.getDataStore();
+                singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), clientId(), tree);
+            } else {
+                singleHistory = singleHistoryMeta.toLeaderState(shard);
+            }
+
+            return new LeaderFrontendState.Enabled(shard.persistenceId(), clientId(), shard.getDataStore(),
+                purgedHistories.mutableCopy(), singleHistory, histories);
         }
-    }
 
-    /**
-     * Transform frontend metadata for a particular client into its {@link LeaderFrontendState} counterpart.
-     *
-     * @param shard parent shard
-     * @return Leader frontend state
-     */
-    @Nonnull LeaderFrontendState toLeaderState(@Nonnull final Shard shard) {
-        // Note: we have to make sure to *copy* all current state and not leak any views, otherwise leader/follower
-        //       interactions would get intertwined leading to inconsistencies.
-        final Map<LocalHistoryIdentifier, LocalFrontendHistory> histories = new HashMap<>();
-        for (FrontendHistoryMetadataBuilder e : currentHistories.values()) {
-            if (e.getIdentifier().getHistoryId() != 0) {
-                final AbstractFrontendHistory state = e.toLeaderState(shard);
-                verify(state instanceof LocalFrontendHistory, "Unexpected state %s", state);
-                histories.put(e.getIdentifier(), (LocalFrontendHistory) state);
+        @Override
+        ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+            return super.addToStringAttributes(helper).add("current", currentHistories).add("purged", purgedHistories);
+        }
+
+        private FrontendHistoryMetadataBuilder getHistory(final TransactionIdentifier txId) {
+            return getHistory(txId.getHistoryId());
+        }
+
+        private FrontendHistoryMetadataBuilder getHistory(final LocalHistoryIdentifier historyId) {
+            final LocalHistoryIdentifier local;
+            if (historyId.getHistoryId() == 0 && historyId.getCookie() != 0) {
+                // We are pre-creating the history for free-standing transactions with a zero cookie, hence our lookup
+                // needs to account for that.
+                LOG.debug("{}: looking up {} instead of {}", shardName(), standaloneId, historyId);
+                local = standaloneId;
+            } else {
+                local = historyId;
             }
+
+            return currentHistories.get(local);
         }
 
-        final AbstractFrontendHistory singleHistory;
-        final FrontendHistoryMetadataBuilder singleHistoryMeta = currentHistories.get(
-            new LocalHistoryIdentifier(identifier, 0));
-        if (singleHistoryMeta == null) {
-            final ShardDataTree tree = shard.getDataStore();
-            singleHistory = StandaloneFrontendHistory.create(shard.persistenceId(), getIdentifier(), tree);
-        } else {
-            singleHistory = singleHistoryMeta.toLeaderState(shard);
+        private LocalHistoryIdentifier standaloneHistoryId() {
+            return new LocalHistoryIdentifier(clientId(), 0);
         }
+    }
 
-        return new LeaderFrontendState(shard.persistenceId(), getIdentifier(), shard.getDataStore(),
-            purgedHistories.copy(), singleHistory, histories);
+    private static final Logger LOG = LoggerFactory.getLogger(FrontendClientMetadataBuilder.class);
+
+    private final @NonNull ClientIdentifier clientId;
+    private final @NonNull String shardName;
+
+    FrontendClientMetadataBuilder(final String shardName, final ClientIdentifier clientId) {
+        this.shardName = requireNonNull(shardName);
+        this.clientId = requireNonNull(clientId);
     }
 
-    private FrontendHistoryMetadataBuilder getHistory(final TransactionIdentifier txId) {
-        LocalHistoryIdentifier historyId = txId.getHistoryId();
-        if (historyId.getHistoryId() == 0 && historyId.getCookie() != 0) {
-            // We are pre-creating the history for free-standing transactions with a zero cookie, hence our lookup
-            // needs to account for that.
-            LOG.debug("{}: looking up {} instead of {}", shardName, standaloneId, historyId);
-            historyId = standaloneId;
-        }
+    static FrontendClientMetadataBuilder of(final String shardName, final FrontendClientMetadata meta) {
+        // Completely empty histories imply disabled state, as otherwise we'd have a record of the single history --
+        // either purged or active
+        return meta.getCurrentHistories().isEmpty() && meta.getPurgedHistories().isEmpty()
+            ? new Disabled(shardName, meta.clientId()) : new Enabled(shardName, meta);
+    }
+
+    final ClientIdentifier clientId() {
+        return clientId;
+    }
 
-        return currentHistories.get(historyId);
+    final String shardName() {
+        return shardName;
     }
 
+    abstract FrontendClientMetadata build();
+
+    abstract void onHistoryCreated(LocalHistoryIdentifier historyId);
+
+    abstract void onHistoryClosed(LocalHistoryIdentifier historyId);
+
+    abstract void onHistoryPurged(LocalHistoryIdentifier historyId);
+
+    abstract void onTransactionAborted(TransactionIdentifier txId);
+
+    abstract void onTransactionCommitted(TransactionIdentifier txId);
+
+    abstract void onTransactionPurged(TransactionIdentifier txId);
+
+    abstract void onTransactionsSkipped(LocalHistoryIdentifier historyId, ImmutableUnsignedLongSet txIds);
+
+    /**
+     * Transform frontend metadata for a particular client into its {@link LeaderFrontendState} counterpart.
+     *
+     * @param shard parent shard
+     * @return Leader frontend state
+     */
+    abstract @NonNull LeaderFrontendState toLeaderState(@NonNull Shard shard);
+
     @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this).add("identifier", identifier).add("current", currentHistories)
-                .add("purged", purgedHistories).toString();
+    public final String toString() {
+        return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
+    }
+
+    ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+        return helper.add("clientId", clientId);
     }
 }
index e787edb7a80e391bc2d8bd900535f2b2836b8d10..f869e7ac5ac679658ca146ab38685ebf91d65947 100644 (file)
@@ -7,40 +7,39 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.primitives.UnsignedLong;
 import java.util.HashMap;
 import java.util.Map;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendHistoryMetadata;
-import org.opendaylight.yangtools.concepts.Builder;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
-final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMetadata>,
-        Identifiable<LocalHistoryIdentifier> {
-
-    private final Map<UnsignedLong, Boolean> closedTransactions;
-    private final RangeSet<UnsignedLong> purgedTransactions;
-    private final LocalHistoryIdentifier identifier;
+final class FrontendHistoryMetadataBuilder implements Identifiable<LocalHistoryIdentifier> {
+    private final @NonNull Map<UnsignedLong, Boolean> closedTransactions;
+    private final @NonNull MutableUnsignedLongSet purgedTransactions;
+    private final @NonNull LocalHistoryIdentifier identifier;
 
     private boolean closed;
 
     FrontendHistoryMetadataBuilder(final LocalHistoryIdentifier identifier) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.purgedTransactions = TreeRangeSet.create();
-        this.closedTransactions = new HashMap<>(2);
+        this.identifier = requireNonNull(identifier);
+        purgedTransactions = MutableUnsignedLongSet.of();
+        closedTransactions = new HashMap<>(2);
     }
 
     FrontendHistoryMetadataBuilder(final ClientIdentifier clientId, final FrontendHistoryMetadata meta) {
         identifier = new LocalHistoryIdentifier(clientId, meta.getHistoryId(), meta.getCookie());
-        closedTransactions = new HashMap<>(meta.getClosedTransactions());
-        purgedTransactions = TreeRangeSet.create(meta.getPurgedTransactions());
+        closedTransactions = meta.getClosedTransactions().mutableCopy();
+        purgedTransactions = meta.getPurgedTransactions().mutableCopy();
         closed = meta.isClosed();
     }
 
@@ -49,14 +48,13 @@ final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMet
         return identifier;
     }
 
-    @Override
     public FrontendHistoryMetadata build() {
         return new FrontendHistoryMetadata(identifier.getHistoryId(), identifier.getCookie(), closed,
-            closedTransactions, purgedTransactions);
+            UnsignedLongBitmap.copyOf(closedTransactions), purgedTransactions.immutableCopy());
     }
 
     void onHistoryClosed() {
-        Preconditions.checkState(identifier.getHistoryId() != 0);
+        checkState(identifier.getHistoryId() != 0);
         closed = true;
     }
 
@@ -69,9 +67,13 @@ final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMet
     }
 
     void onTransactionPurged(final TransactionIdentifier txId) {
-        final UnsignedLong id = UnsignedLong.fromLongBits(txId.getTransactionId());
-        closedTransactions.remove(id);
-        purgedTransactions.add(Range.closedOpen(id, UnsignedLong.ONE.plus(id)));
+        final long txidBits = txId.getTransactionId();
+        closedTransactions.remove(UnsignedLong.fromLongBits(txidBits));
+        purgedTransactions.add(txidBits);
+    }
+
+    void onTransactionsSkipped(final ImmutableUnsignedLongSet txIds) {
+        purgedTransactions.addAll(txIds);
     }
 
     /**
@@ -80,7 +82,7 @@ final class FrontendHistoryMetadataBuilder implements Builder<FrontendHistoryMet
      * @param shard parent shard
      * @return Leader history state
      */
-    @Nonnull AbstractFrontendHistory toLeaderState(@Nonnull final Shard shard) {
+    @NonNull AbstractFrontendHistory toLeaderState(final @NonNull Shard shard) {
         if (identifier.getHistoryId() == 0) {
             return StandaloneFrontendHistory.recreate(shard.persistenceId(), identifier.getClientId(),
                 shard.getDataStore(), closedTransactions, purgedTransactions);
index d323b1026d5b8661b7bf5ac44690c9aa0294dd7d..abb97e59a43701df5f223e526bc8a3596e8c044f 100644 (file)
@@ -7,30 +7,31 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Maps;
 import java.util.HashMap;
 import java.util.Map;
-import javax.annotation.Nonnull;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Frontend state as observed by a shard follower. This class is responsible for maintaining metadata state
  * so that this can be used to seed {@link LeaderFrontendState} with proper state so that the frontend/backend
- * conversation can continue where it left off.
+ * conversation can continue where it left off. This class is NOT thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTreeSnapshotMetadata> {
     private static final Logger LOG = LoggerFactory.getLogger(FrontendMetadata.class);
 
@@ -38,7 +39,7 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
     private final String shardName;
 
     FrontendMetadata(final String shardName) {
-        this.shardName = Preconditions.checkNotNull(shardName);
+        this.shardName = requireNonNull(shardName);
     }
 
     @Override
@@ -57,13 +58,13 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
         LOG.debug("{}: applying snapshot {} over clients {}", shardName, snapshot, clients);
         clients.clear();
 
-        for (FrontendClientMetadata m : snapshot.getClients()) {
-            LOG.debug("{}: applying metadata {}", shardName, m);
-            final FrontendClientMetadataBuilder b = new FrontendClientMetadataBuilder(shardName, m);
-            final FrontendIdentifier client = m.getIdentifier().getFrontendId();
+        for (var clientMeta : snapshot.getClients()) {
+            LOG.debug("{}: applying metadata {}", shardName, clientMeta);
+            final var builder = FrontendClientMetadataBuilder.of(shardName, clientMeta);
+            final var frontendId = clientMeta.clientId().getFrontendId();
 
-            LOG.debug("{}: client {} updated to {}", shardName, client, b);
-            clients.put(client, b);
+            LOG.debug("{}: client {} updated to {}", shardName, frontendId, builder);
+            clients.put(frontendId, builder);
         }
     }
 
@@ -74,13 +75,13 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
     }
 
     private FrontendClientMetadataBuilder ensureClient(final ClientIdentifier id) {
-        final FrontendClientMetadataBuilder existing = clients.get(id.getFrontendId());
-        if (existing != null && id.equals(existing.getIdentifier())) {
+        final var existing = clients.get(id.getFrontendId());
+        if (existing != null && id.equals(existing.clientId())) {
             return existing;
         }
 
-        final FrontendClientMetadataBuilder client = new FrontendClientMetadataBuilder(shardName, id);
-        final FrontendClientMetadataBuilder previous = clients.put(id.getFrontendId(), client);
+        final var client = new FrontendClientMetadataBuilder.Enabled(shardName, id);
+        final var previous = clients.put(id.getFrontendId(), client);
         if (previous != null) {
             LOG.debug("{}: Replaced client {} with {}", shardName, previous, client);
         } else {
@@ -119,12 +120,45 @@ final class FrontendMetadata extends ShardDataTreeMetadata<FrontendShardDataTree
         ensureClient(txId.getHistoryId().getClientId()).onTransactionPurged(txId);
     }
 
+    @Override
+    void onTransactionsSkipped(final LocalHistoryIdentifier historyId, final ImmutableUnsignedLongSet txIds) {
+        ensureClient(historyId.getClientId()).onTransactionsSkipped(historyId, txIds);
+    }
+
     /**
      * Transform frontend metadata into an active leader state map.
      *
      * @return Leader frontend state
      */
-    @Nonnull Map<FrontendIdentifier, LeaderFrontendState> toLeaderState(@Nonnull final Shard shard) {
+    @NonNull Map<FrontendIdentifier, LeaderFrontendState> toLeaderState(final @NonNull Shard shard) {
         return new HashMap<>(Maps.transformValues(clients, meta -> meta.toLeaderState(shard)));
     }
+
+    void disableTracking(final ClientIdentifier clientId) {
+        final var frontendId = clientId.getFrontendId();
+        final var client = clients.get(frontendId);
+        if (client == null) {
+            // When we have not seen the client before, we still need to disable tracking for him since this only gets
+            // triggered once.
+            LOG.debug("{}: disableTracking {} does not match any client, pre-disabling client.", shardName, clientId);
+            clients.put(frontendId, new FrontendClientMetadataBuilder.Disabled(shardName, clientId));
+            return;
+        }
+        if (!clientId.equals(client.clientId())) {
+            LOG.debug("{}: disableTracking {} does not match client {}, ignoring", shardName, clientId, client);
+            return;
+        }
+        if (client instanceof FrontendClientMetadataBuilder.Disabled) {
+            LOG.debug("{}: client {} is has already disabled tracking", shardName, client);
+            return;
+        }
+
+        verify(clients.replace(frontendId, client, new FrontendClientMetadataBuilder.Disabled(shardName, clientId)));
+    }
+
+    ImmutableSet<ClientIdentifier> getClients() {
+        return clients.values().stream()
+            .map(FrontendClientMetadataBuilder::clientId)
+            .collect(ImmutableSet.toImmutableSet());
+    }
 }
index daba580906064a081714a67ea427b693cf9a47a8..e1b8a3fb94a8aed70bdc44c41d0f592a70227d77 100644 (file)
@@ -7,10 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Optional;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ExistsTransactionSuccess;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
@@ -28,11 +28,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Read-only frontend transaction state as observed by the shard leader.
+ * Read-only frontend transaction state as observed by the shard leader. This class is NOT thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 final class FrontendReadOnlyTransaction extends FrontendTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(FrontendReadOnlyTransaction.class);
 
@@ -41,7 +40,7 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction {
     private FrontendReadOnlyTransaction(final AbstractFrontendHistory history,
             final ReadOnlyShardDataTreeTransaction transaction) {
         super(history, transaction.getIdentifier());
-        this.openTransaction = Preconditions.checkNotNull(transaction);
+        openTransaction = requireNonNull(transaction);
     }
 
     static FrontendReadOnlyTransaction create(final AbstractFrontendHistory history,
@@ -51,7 +50,7 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction {
 
     // Sequence has already been checked
     @Override
-    @Nullable TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
+    TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
             final long now) throws RequestException {
         if (request instanceof ExistsTransactionRequest) {
             return handleExistsTransaction((ExistsTransactionRequest) request);
@@ -75,21 +74,21 @@ final class FrontendReadOnlyTransaction extends FrontendTransaction {
             final long now) {
         // The only valid request here is with abort protocol
         final Optional<PersistenceProtocol> optProto = request.getPersistenceProtocol();
-        Preconditions.checkArgument(optProto.isPresent(), "Commit protocol is missing in %s", request);
-        Preconditions.checkArgument(optProto.get() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s",
-                request);
+        checkArgument(optProto.isPresent(), "Commit protocol is missing in %s", request);
+        checkArgument(optProto.orElseThrow() == PersistenceProtocol.ABORT, "Unsupported commit protocol in %s",
+            request);
         openTransaction.abort(() -> recordAndSendSuccess(envelope, now,
             new ModifyTransactionSuccess(request.getTarget(), request.getSequence())));
     }
 
     private ExistsTransactionSuccess handleExistsTransaction(final ExistsTransactionRequest request) {
-        final Optional<NormalizedNode<?, ?>> data = openTransaction.getSnapshot().readNode(request.getPath());
+        final Optional<NormalizedNode> data = openTransaction.getSnapshot().readNode(request.getPath());
         return recordSuccess(request.getSequence(), new ExistsTransactionSuccess(openTransaction.getIdentifier(),
             request.getSequence(), data.isPresent()));
     }
 
     private ReadTransactionSuccess handleReadTransaction(final ReadTransactionRequest request) {
-        final Optional<NormalizedNode<?, ?>> data = openTransaction.getSnapshot().readNode(request.getPath());
+        final Optional<NormalizedNode> data = openTransaction.getSnapshot().readNode(request.getPath());
         return recordSuccess(request.getSequence(), new ReadTransactionSuccess(openTransaction.getIdentifier(),
             request.getSequence(), data));
     }
index 28ab70a351bc15b56b68e5057abea8faf721dc78..c626791547aafea037bb806d2ec1200b8ea0eb06 100644 (file)
@@ -7,13 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Collection;
 import java.util.Optional;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
@@ -41,18 +42,18 @@ import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Frontend read-write transaction state as observed by the shard leader.
+ * Frontend read-write transaction state as observed by the shard leader. This class is NOT thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 final class FrontendReadWriteTransaction extends FrontendTransaction {
     private enum CommitStage {
         READY,
@@ -72,7 +73,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
         final RequestException cause;
 
         Failed(final RequestException cause) {
-            this.cause = Preconditions.checkNotNull(cause);
+            this.cause = requireNonNull(cause);
         }
 
         @Override
@@ -85,7 +86,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
         final ReadWriteShardDataTreeTransaction openTransaction;
 
         Open(final ReadWriteShardDataTreeTransaction openTransaction) {
-            this.openTransaction = Preconditions.checkNotNull(openTransaction);
+            this.openTransaction = requireNonNull(openTransaction);
         }
 
         @Override
@@ -99,8 +100,8 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
         CommitStage stage;
 
         Ready(final ShardDataTreeCohort readyCohort) {
-            this.readyCohort = Preconditions.checkNotNull(readyCohort);
-            this.stage = CommitStage.READY;
+            this.readyCohort = requireNonNull(readyCohort);
+            stage = CommitStage.READY;
         }
 
         @Override
@@ -113,7 +114,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
         final DataTreeModification sealedModification;
 
         Sealed(final DataTreeModification sealedModification) {
-            this.sealedModification = Preconditions.checkNotNull(sealedModification);
+            this.sealedModification = requireNonNull(sealedModification);
         }
 
         @Override
@@ -163,13 +164,13 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
     private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
             final ReadWriteShardDataTreeTransaction transaction) {
         super(history, id);
-        this.state = new Open(transaction);
+        state = new Open(transaction);
     }
 
     private FrontendReadWriteTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id,
             final DataTreeModification mod) {
         super(history, id);
-        this.state = new Sealed(mod);
+        state = new Sealed(mod);
     }
 
     static FrontendReadWriteTransaction createOpen(final AbstractFrontendHistory history,
@@ -184,22 +185,22 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
     // Sequence has already been checked
     @Override
-    @Nullable TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
+    TransactionSuccess<?> doHandleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
             final long now) throws RequestException {
-        if (request instanceof ModifyTransactionRequest) {
-            return handleModifyTransaction((ModifyTransactionRequest) request, envelope, now);
-        } else if (request instanceof CommitLocalTransactionRequest) {
-            handleCommitLocalTransaction((CommitLocalTransactionRequest) request, envelope, now);
+        if (request instanceof ModifyTransactionRequest modifyRequest) {
+            return handleModifyTransaction(modifyRequest, envelope, now);
+        } else if (request instanceof CommitLocalTransactionRequest commitLocalRequest) {
+            handleCommitLocalTransaction(commitLocalRequest, envelope, now);
             return null;
-        } else if (request instanceof ExistsTransactionRequest) {
-            return handleExistsTransaction((ExistsTransactionRequest) request);
-        } else if (request instanceof ReadTransactionRequest) {
-            return handleReadTransaction((ReadTransactionRequest) request);
-        } else if (request instanceof TransactionPreCommitRequest) {
-            handleTransactionPreCommit((TransactionPreCommitRequest) request, envelope, now);
+        } else if (request instanceof ExistsTransactionRequest existsRequest) {
+            return handleExistsTransaction(existsRequest);
+        } else if (request instanceof ReadTransactionRequest readRequest) {
+            return handleReadTransaction(readRequest);
+        } else if (request instanceof TransactionPreCommitRequest preCommitRequest) {
+            handleTransactionPreCommit(preCommitRequest, envelope, now);
             return null;
-        } else if (request instanceof TransactionDoCommitRequest) {
-            handleTransactionDoCommit((TransactionDoCommitRequest) request, envelope, now);
+        } else if (request instanceof TransactionDoCommitRequest doCommitRequest) {
+            handleTransactionDoCommit(doCommitRequest, envelope, now);
             return null;
         } else if (request instanceof TransactionAbortRequest) {
             return handleTransactionAbort(request.getSequence(), envelope, now);
@@ -349,9 +350,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
         final Ready ready = checkReady();
         startAbort();
-        ready.readyCohort.abort(new FutureCallback<Void>() {
+        ready.readyCohort.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final Empty result) {
                 recordAndSendSuccess(envelope, now, new TransactionAbortSuccess(getIdentifier(), sequence));
                 finishAbort();
             }
@@ -377,9 +378,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             case READY:
                 ready.stage = CommitStage.CAN_COMMIT_PENDING;
                 LOG.debug("{}: Transaction {} initiating canCommit", persistenceId(), getIdentifier());
-                checkReady().readyCohort.canCommit(new FutureCallback<Void>() {
+                checkReady().readyCohort.canCommit(new FutureCallback<>() {
                     @Override
-                    public void onSuccess(final Void result) {
+                    public void onSuccess(final Empty result) {
                         successfulCanCommit(envelope, now);
                     }
 
@@ -429,9 +430,9 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             case READY:
                 ready.stage = CommitStage.CAN_COMMIT_PENDING;
                 LOG.debug("{}: Transaction {} initiating direct canCommit", persistenceId(), getIdentifier());
-                ready.readyCohort.canCommit(new FutureCallback<Void>() {
+                ready.readyCohort.canCommit(new FutureCallback<>() {
                     @Override
-                    public void onSuccess(final Void result) {
+                    public void onSuccess(final Empty result) {
                         successfulDirectCanCommit(envelope, now);
                     }
 
@@ -511,7 +512,8 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
 
         final Optional<Exception> optFailure = request.getDelayedFailure();
         if (optFailure.isPresent()) {
-            state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification, optFailure.get()));
+            state = new Ready(history().createFailedCohort(getIdentifier(), sealedModification,
+                optFailure.orElseThrow()));
         } else {
             state = new Ready(history().createReadyCohort(getIdentifier(), sealedModification, Optional.empty()));
         }
@@ -524,13 +526,13 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
     }
 
     private ExistsTransactionSuccess handleExistsTransaction(final ExistsTransactionRequest request) {
-        final Optional<NormalizedNode<?, ?>> data = checkOpen().getSnapshot().readNode(request.getPath());
+        final Optional<NormalizedNode> data = checkOpen().getSnapshot().readNode(request.getPath());
         return recordSuccess(request.getSequence(), new ExistsTransactionSuccess(getIdentifier(), request.getSequence(),
             data.isPresent()));
     }
 
     private ReadTransactionSuccess handleReadTransaction(final ReadTransactionRequest request) {
-        final Optional<NormalizedNode<?, ?>> data = checkOpen().getSnapshot().readNode(request.getPath());
+        final Optional<NormalizedNode> data = checkOpen().getSnapshot().readNode(request.getPath());
         return recordSuccess(request.getSequence(), new ReadTransactionSuccess(getIdentifier(), request.getSequence(),
             data));
     }
@@ -545,10 +547,10 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             for (TransactionModification m : modifications) {
                 if (m instanceof TransactionDelete) {
                     modification.delete(m.getPath());
-                } else if (m instanceof TransactionWrite) {
-                    modification.write(m.getPath(), ((TransactionWrite) m).getData());
-                } else if (m instanceof TransactionMerge) {
-                    modification.merge(m.getPath(), ((TransactionMerge) m).getData());
+                } else if (m instanceof TransactionWrite write) {
+                    modification.write(m.getPath(), write.getData());
+                } else if (m instanceof TransactionMerge merge) {
+                    modification.merge(m.getPath(), merge.getData());
                 } else {
                     LOG.warn("{}: ignoring unhandled modification {}", persistenceId(), m);
                 }
@@ -556,8 +558,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
         }
     }
 
-    @Nullable
-    private TransactionSuccess<?> handleModifyTransaction(final ModifyTransactionRequest request,
+    private @Nullable TransactionSuccess<?> handleModifyTransaction(final ModifyTransactionRequest request,
             final RequestEnvelope envelope, final long now) throws RequestException {
         // We need to examine the persistence protocol first to see if this is an idempotent request. If there is no
         // protocol, there is nothing for us to do.
@@ -567,7 +568,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
             return replyModifySuccess(request.getSequence());
         }
 
-        switch (maybeProto.get()) {
+        switch (maybeProto.orElseThrow()) {
             case ABORT:
                 if (ABORTING.equals(state)) {
                     LOG.debug("{}: Transaction {} already aborting", persistenceId(), getIdentifier());
@@ -593,7 +594,7 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
                 coordinatedCommit(envelope, now);
                 return null;
             default:
-                LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.get());
+                LOG.warn("{}: rejecting unsupported protocol {}", persistenceId(), maybeProto.orElseThrow());
                 throw new UnsupportedRequestException(request);
         }
     }
@@ -619,20 +620,17 @@ final class FrontendReadWriteTransaction extends FrontendTransaction {
     }
 
     private ReadWriteShardDataTreeTransaction checkOpen() {
-        Preconditions.checkState(state instanceof Open, "%s expect to be open, is in state %s", getIdentifier(),
-            state);
+        checkState(state instanceof Open, "%s expect to be open, is in state %s", getIdentifier(), state);
         return ((Open) state).openTransaction;
     }
 
     private Ready checkReady() {
-        Preconditions.checkState(state instanceof Ready, "%s expect to be ready, is in state %s", getIdentifier(),
-            state);
+        checkState(state instanceof Ready, "%s expect to be ready, is in state %s", getIdentifier(), state);
         return (Ready) state;
     }
 
     private DataTreeModification checkSealed() {
-        Preconditions.checkState(state instanceof Sealed, "%s expect to be sealed, is in state %s", getIdentifier(),
-            state);
+        checkState(state instanceof Sealed, "%s expect to be sealed, is in state %s", getIdentifier(), state);
         return ((Sealed) state).sealedModification;
     }
 
index 25cb174ea6a4085f4d767cf9ce62a85b61ae30ef..de46d9d2ae4e90b766bace83bd5b19d80ea3e2c5 100644 (file)
@@ -7,14 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Verify;
 import java.util.ArrayDeque;
 import java.util.Optional;
 import java.util.Queue;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.IncrementTransactionSequenceRequest;
 import org.opendaylight.controller.cluster.access.commands.IncrementTransactionSequenceSuccess;
 import org.opendaylight.controller.cluster.access.commands.OutOfOrderRequestException;
@@ -29,11 +29,10 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Frontend common transaction state as observed by the shard leader.
+ * Frontend common transaction state as observed by the shard leader. This class is NOT thread-safe.
  *
  * @author Robert Varga
  */
-@NotThreadSafe
 abstract class FrontendTransaction implements Identifiable<TransactionIdentifier> {
     private static final Logger LOG = LoggerFactory.getLogger(FrontendTransaction.class);
 
@@ -53,8 +52,8 @@ abstract class FrontendTransaction implements Identifiable<TransactionIdentifier
     private RequestException previousFailure;
 
     FrontendTransaction(final AbstractFrontendHistory history, final TransactionIdentifier id) {
-        this.history = Preconditions.checkNotNull(history);
-        this.id = Preconditions.checkNotNull(id);
+        this.history = requireNonNull(history);
+        this.id = requireNonNull(id);
     }
 
     @Override
@@ -120,9 +119,8 @@ abstract class FrontendTransaction implements Identifiable<TransactionIdentifier
 
     // Request order has already been checked by caller and replaySequence()
     @SuppressWarnings("checkstyle:IllegalCatch")
-    @Nullable
-    final TransactionSuccess<?> handleRequest(final TransactionRequest<?> request, final RequestEnvelope envelope,
-            final long now) throws RequestException {
+    final @Nullable TransactionSuccess<?> handleRequest(final TransactionRequest<?> request,
+            final RequestEnvelope envelope, final long now) throws RequestException {
         if (request instanceof IncrementTransactionSequenceRequest) {
             final IncrementTransactionSequenceRequest incr = (IncrementTransactionSequenceRequest) request;
             expectedSequence += incr.getIncrement();
@@ -151,8 +149,7 @@ abstract class FrontendTransaction implements Identifiable<TransactionIdentifier
         }
     }
 
-    @Nullable
-    abstract TransactionSuccess<?> doHandleRequest(TransactionRequest<?> request, RequestEnvelope envelope,
+    abstract @Nullable TransactionSuccess<?> doHandleRequest(TransactionRequest<?> request, RequestEnvelope envelope,
             long now) throws RequestException;
 
     abstract void retire();
index 7611b029ca53a4562753d7346f86db26daef02db..916cb75f5acd9d6c75c3372671cfad28ab5d1865 100644 (file)
@@ -7,13 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
+import com.google.common.base.MoreObjects.ToStringHelper;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.Map;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryRequest;
 import org.opendaylight.controller.cluster.access.commands.DeadHistoryException;
 import org.opendaylight.controller.cluster.access.commands.DestroyLocalHistoryRequest;
@@ -30,235 +31,290 @@ import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.UnsupportedRequestException;
 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
-import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongRangeSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
 import org.opendaylight.yangtools.concepts.Identifiable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Frontend state as observed by the shard leader. This class is responsible for tracking generations and sequencing
- * in the frontend/backend conversation.
- *
- * @author Robert Varga
+ * in the frontend/backend conversation. This class is NOT thread-safe.
  */
-@NotThreadSafe
-final class LeaderFrontendState implements Identifiable<ClientIdentifier> {
-    private static final Logger LOG = LoggerFactory.getLogger(LeaderFrontendState.class);
-
-    // Histories which have not been purged
-    private final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories;
+abstract sealed class LeaderFrontendState implements Identifiable<ClientIdentifier> {
+    static final class Disabled extends LeaderFrontendState {
+        Disabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
+            super(persistenceId, clientId, tree);
+        }
 
-    // RangeSet performs automatic merging, hence we keep minimal state tracking information
-    private final UnsignedLongRangeSet purgedHistories;
+        @Override
+        LocalHistorySuccess handleLocalHistoryRequest(final LocalHistoryRequest<?> request,
+                final RequestEnvelope envelope, final long now) throws RequestException {
+            throw new UnsupportedRequestException(request);
+        }
 
-    // Used for all standalone transactions
-    private final AbstractFrontendHistory standaloneHistory;
-    private final ShardDataTree tree;
-    private final ClientIdentifier clientId;
-    private final String persistenceId;
+        @Override
+        TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
+                final RequestEnvelope envelope, final long now) throws RequestException {
+            throw new UnsupportedRequestException(request);
+        }
+    }
 
-    private long lastConnectTicks;
-    private long lastSeenTicks;
-    private long expectedTxSequence;
-    private Long lastSeenHistory = null;
+    static final class Enabled extends LeaderFrontendState {
+        // Histories which have not been purged
+        private final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories;
 
-    // TODO: explicit failover notification
-    //       Record the ActorRef for the originating actor and when we switch to being a leader send a notification
-    //       to the frontend client -- that way it can immediately start sending requests
+        // UnsignedLongSet performs automatic merging, hence we keep minimal state tracking information
+        private final MutableUnsignedLongSet purgedHistories;
 
-    // TODO: add statistics:
-    // - number of requests processed
-    // - number of histories processed
-    // - per-RequestException throw counters
+        // Used for all standalone transactions
+        private final AbstractFrontendHistory standaloneHistory;
 
-    LeaderFrontendState(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
-        this(persistenceId, clientId, tree, UnsignedLongRangeSet.create(),
-            StandaloneFrontendHistory.create(persistenceId, clientId, tree), new HashMap<>());
-    }
+        private long expectedTxSequence;
+        private Long lastSeenHistory = null;
 
-    LeaderFrontendState(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree,
-        final UnsignedLongRangeSet purgedHistories, final AbstractFrontendHistory standaloneHistory,
-        final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories) {
-        this.persistenceId = Preconditions.checkNotNull(persistenceId);
-        this.clientId = Preconditions.checkNotNull(clientId);
-        this.tree = Preconditions.checkNotNull(tree);
-        this.purgedHistories = Preconditions.checkNotNull(purgedHistories);
-        this.standaloneHistory = Preconditions.checkNotNull(standaloneHistory);
-        this.localHistories = Preconditions.checkNotNull(localHistories);
-        this.lastSeenTicks = tree.readTime();
-    }
+        Enabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
+            this(persistenceId, clientId, tree, MutableUnsignedLongSet.of(),
+                StandaloneFrontendHistory.create(persistenceId, clientId, tree), new HashMap<>());
+        }
 
-    @Override
-    public ClientIdentifier getIdentifier() {
-        return clientId;
-    }
+        Enabled(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree,
+                final MutableUnsignedLongSet purgedHistories, final AbstractFrontendHistory standaloneHistory,
+                final Map<LocalHistoryIdentifier, LocalFrontendHistory> localHistories) {
+            super(persistenceId, clientId, tree);
+            this.purgedHistories = requireNonNull(purgedHistories);
+            this.standaloneHistory = requireNonNull(standaloneHistory);
+            this.localHistories = requireNonNull(localHistories);
+        }
 
-    private void checkRequestSequence(final RequestEnvelope envelope) throws OutOfSequenceEnvelopeException {
-        if (expectedTxSequence != envelope.getTxSequence()) {
-            throw new OutOfSequenceEnvelopeException(expectedTxSequence);
+        @Override
+        @Nullable LocalHistorySuccess handleLocalHistoryRequest(final LocalHistoryRequest<?> request,
+                final RequestEnvelope envelope, final long now) throws RequestException {
+            checkRequestSequence(envelope);
+
+            try {
+                if (request instanceof CreateLocalHistoryRequest req) {
+                    return handleCreateHistory(req, envelope, now);
+                } else if (request instanceof DestroyLocalHistoryRequest req) {
+                    return handleDestroyHistory(req, envelope, now);
+                } else if (request instanceof PurgeLocalHistoryRequest req) {
+                    return handlePurgeHistory(req, envelope, now);
+                } else {
+                    LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
+                    throw new UnsupportedRequestException(request);
+                }
+            } finally {
+                expectNextRequest();
+            }
         }
-    }
 
-    private void expectNextRequest() {
-        expectedTxSequence++;
-    }
+        @Override
+        @Nullable TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
+                final RequestEnvelope envelope, final long now) throws RequestException {
+            checkRequestSequence(envelope);
+
+            try {
+                final var lhId = request.getTarget().getHistoryId();
+                final AbstractFrontendHistory history;
+
+                if (lhId.getHistoryId() != 0) {
+                    history = localHistories.get(lhId);
+                    if (history == null) {
+                        if (purgedHistories.contains(lhId.getHistoryId())) {
+                            LOG.warn("{}: rejecting request {} to purged history", persistenceId(), request);
+                            throw new DeadHistoryException(purgedHistories.toRangeSet());
+                        }
+
+                        LOG.warn("{}: rejecting unknown history request {}", persistenceId(), request);
+                        throw new UnknownHistoryException(lastSeenHistory);
+                    }
+                } else {
+                    history = standaloneHistory;
+                }
 
-    @Nullable LocalHistorySuccess handleLocalHistoryRequest(final LocalHistoryRequest<?> request,
-            final RequestEnvelope envelope, final long now) throws RequestException {
-        checkRequestSequence(envelope);
-
-        try {
-            if (request instanceof CreateLocalHistoryRequest) {
-                return handleCreateHistory((CreateLocalHistoryRequest) request, envelope, now);
-            } else if (request instanceof DestroyLocalHistoryRequest) {
-                return handleDestroyHistory((DestroyLocalHistoryRequest) request, envelope, now);
-            } else if (request instanceof PurgeLocalHistoryRequest) {
-                return handlePurgeHistory((PurgeLocalHistoryRequest)request, envelope, now);
-            } else {
-                LOG.warn("{}: rejecting unsupported request {}", persistenceId, request);
-                throw new UnsupportedRequestException(request);
+                return history.handleTransactionRequest(request, envelope, now);
+            } finally {
+                expectNextRequest();
             }
-        } finally {
-            expectNextRequest();
         }
-    }
 
-    private LocalHistorySuccess handleCreateHistory(final CreateLocalHistoryRequest request,
-            final RequestEnvelope envelope, final long now) throws RequestException {
-        final LocalHistoryIdentifier historyId = request.getTarget();
-        final AbstractFrontendHistory existing = localHistories.get(historyId);
-        if (existing != null) {
-            // History already exists: report success
-            LOG.debug("{}: history {} already exists", persistenceId, historyId);
-            return new LocalHistorySuccess(historyId, request.getSequence());
+        @Override
+        void reconnect() {
+            expectedTxSequence = 0;
+            super.reconnect();
         }
 
-        // We have not found the history. Before we create it we need to check history ID sequencing so that we do not
-        // end up resurrecting a purged history.
-        if (purgedHistories.contains(historyId.getHistoryId())) {
-            LOG.debug("{}: rejecting purged request {}", persistenceId, request);
-            throw new DeadHistoryException(purgedHistories.toImmutable());
+        @Override
+        void retire() {
+            super.retire();
+
+            // Clear out all transaction chains
+            localHistories.values().forEach(AbstractFrontendHistory::retire);
+            localHistories.clear();
+            standaloneHistory.retire();
         }
 
-        // Update last history we have seen
-        if (lastSeenHistory == null || Long.compareUnsigned(lastSeenHistory, historyId.getHistoryId()) < 0) {
-            lastSeenHistory = historyId.getHistoryId();
+        @Override
+        ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+            return super.addToStringAttributes(helper).add("purgedHistories", purgedHistories);
         }
 
-        // We have to send the response only after persistence has completed
-        final ShardDataTreeTransactionChain chain = tree.ensureTransactionChain(historyId, () -> {
-            LOG.debug("{}: persisted history {}", persistenceId, historyId);
-            envelope.sendSuccess(new LocalHistorySuccess(historyId, request.getSequence()), tree.readTime() - now);
-        });
+        private LocalHistorySuccess handleCreateHistory(final CreateLocalHistoryRequest request,
+                final RequestEnvelope envelope, final long now) throws RequestException {
+            final var historyId = request.getTarget();
+            final var existing = localHistories.get(historyId);
+            if (existing != null) {
+                // History already exists: report success
+                LOG.debug("{}: history {} already exists", persistenceId(), historyId);
+                return new LocalHistorySuccess(historyId, request.getSequence());
+            }
 
-        localHistories.put(historyId, LocalFrontendHistory.create(persistenceId, tree, chain));
-        LOG.debug("{}: created history {}", persistenceId, historyId);
-        return null;
-    }
+            // We have not found the history. Before we create it we need to check history ID sequencing so that we do
+            // not end up resurrecting a purged history.
+            if (purgedHistories.contains(historyId.getHistoryId())) {
+                LOG.debug("{}: rejecting purged request {}", persistenceId(), request);
+                throw new DeadHistoryException(purgedHistories.toRangeSet());
+            }
 
-    private LocalHistorySuccess handleDestroyHistory(final DestroyLocalHistoryRequest request,
-            final RequestEnvelope envelope, final long now) {
-        final LocalHistoryIdentifier id = request.getTarget();
-        final LocalFrontendHistory existing = localHistories.get(id);
-        if (existing == null) {
-            // History does not exist: report success
-            LOG.debug("{}: history {} does not exist, nothing to destroy", persistenceId, id);
-            return new LocalHistorySuccess(id, request.getSequence());
-        }
+            // Update last history we have seen
+            if (lastSeenHistory == null || Long.compareUnsigned(lastSeenHistory, historyId.getHistoryId()) < 0) {
+                lastSeenHistory = historyId.getHistoryId();
+            }
 
-        existing.destroy(request.getSequence(), envelope, now);
-        return null;
-    }
+            // We have to send the response only after persistence has completed
+            final var chain = tree().ensureTransactionChain(historyId, () -> {
+                LOG.debug("{}: persisted history {}", persistenceId(), historyId);
+                envelope.sendSuccess(new LocalHistorySuccess(historyId, request.getSequence()),
+                    tree().readTime() - now);
+            });
 
-    private LocalHistorySuccess handlePurgeHistory(final PurgeLocalHistoryRequest request,
-            final RequestEnvelope envelope, final long now) {
-        final LocalHistoryIdentifier id = request.getTarget();
-        final LocalFrontendHistory existing = localHistories.remove(id);
-        if (existing == null) {
-            LOG.debug("{}: history {} has already been purged", persistenceId, id);
-            return new LocalHistorySuccess(id, request.getSequence());
+            localHistories.put(historyId, LocalFrontendHistory.create(persistenceId(), tree(), chain));
+            LOG.debug("{}: created history {}", persistenceId(), historyId);
+            return null;
         }
 
-        LOG.debug("{}: purging history {}", persistenceId, id);
-        purgedHistories.add(id.getHistoryId());
-        existing.purge(request.getSequence(), envelope, now);
-        return null;
-    }
+        private LocalHistorySuccess handleDestroyHistory(final DestroyLocalHistoryRequest request,
+                final RequestEnvelope envelope, final long now) {
+            final var id = request.getTarget();
+            final var existing = localHistories.get(id);
+            if (existing == null) {
+                // History does not exist: report success
+                LOG.debug("{}: history {} does not exist, nothing to destroy", persistenceId(), id);
+                return new LocalHistorySuccess(id, request.getSequence());
+            }
 
-    @Nullable TransactionSuccess<?> handleTransactionRequest(final TransactionRequest<?> request,
-            final RequestEnvelope envelope, final long now) throws RequestException {
-        checkRequestSequence(envelope);
+            existing.destroy(request.getSequence(), envelope, now);
+            return null;
+        }
 
-        try {
-            final LocalHistoryIdentifier lhId = request.getTarget().getHistoryId();
-            final AbstractFrontendHistory history;
+        private LocalHistorySuccess handlePurgeHistory(final PurgeLocalHistoryRequest request,
+                final RequestEnvelope envelope, final long now) {
+            final var id = request.getTarget();
+            final var existing = localHistories.remove(id);
+            if (existing == null) {
+                LOG.debug("{}: history {} has already been purged", persistenceId(), id);
+                return new LocalHistorySuccess(id, request.getSequence());
+            }
 
-            if (lhId.getHistoryId() != 0) {
-                history = localHistories.get(lhId);
-                if (history == null) {
-                    if (purgedHistories.contains(lhId.getHistoryId())) {
-                        LOG.warn("{}: rejecting request {} to purged history", persistenceId, request);
-                        throw new DeadHistoryException(purgedHistories.toImmutable());
-                    }
+            LOG.debug("{}: purging history {}", persistenceId(), id);
+            purgedHistories.add(id.getHistoryId());
+            existing.purge(request.getSequence(), envelope, now);
+            return null;
+        }
 
-                    LOG.warn("{}: rejecting unknown history request {}", persistenceId, request);
-                    throw new UnknownHistoryException(lastSeenHistory);
-                }
-            } else {
-                history = standaloneHistory;
+        private void checkRequestSequence(final RequestEnvelope envelope) throws OutOfSequenceEnvelopeException {
+            if (expectedTxSequence != envelope.getTxSequence()) {
+                throw new OutOfSequenceEnvelopeException(expectedTxSequence);
             }
+        }
 
-            return history.handleTransactionRequest(request, envelope, now);
-        } finally {
-            expectNextRequest();
+        private void expectNextRequest() {
+            expectedTxSequence++;
         }
     }
 
+    private static final Logger LOG = LoggerFactory.getLogger(LeaderFrontendState.class);
+
+    private final @NonNull ClientIdentifier clientId;
+    private final @NonNull String persistenceId;
+    private final @NonNull ShardDataTree tree;
+
+    private long lastConnectTicks;
+    private long lastSeenTicks;
+
+    // TODO: explicit failover notification
+    //       Record the ActorRef for the originating actor and when we switch to being a leader send a notification
+    //       to the frontend client -- that way it can immediately start sending requests
+
+    // TODO: add statistics:
+    // - number of requests processed
+    // - number of histories processed
+    // - per-RequestException throw counters
+
+    LeaderFrontendState(final String persistenceId, final ClientIdentifier clientId, final ShardDataTree tree) {
+        this.persistenceId = requireNonNull(persistenceId);
+        this.clientId = requireNonNull(clientId);
+        this.tree = requireNonNull(tree);
+        lastSeenTicks = tree.readTime();
+    }
+
+    @Override
+    public final ClientIdentifier getIdentifier() {
+        return clientId;
+    }
+
+    final String persistenceId() {
+        return persistenceId;
+    }
+
+    final long getLastConnectTicks() {
+        return lastConnectTicks;
+    }
+
+    final long getLastSeenTicks() {
+        return lastSeenTicks;
+    }
+
+    final ShardDataTree tree() {
+        return tree;
+    }
+
+    final void touch() {
+        lastSeenTicks = tree.readTime();
+    }
+
+    abstract @Nullable LocalHistorySuccess handleLocalHistoryRequest(LocalHistoryRequest<?> request,
+            RequestEnvelope envelope, long now) throws RequestException;
+
+    abstract @Nullable TransactionSuccess<?> handleTransactionRequest(TransactionRequest<?> request,
+            RequestEnvelope envelope, long now) throws RequestException;
+
     void reconnect() {
-        expectedTxSequence = 0;
         lastConnectTicks = tree.readTime();
     }
 
     void retire() {
         // Hunt down any transactions associated with this frontend
-        final Iterator<SimpleShardDataTreeCohort> it = tree.cohortIterator();
+        final var it = tree.cohortIterator();
         while (it.hasNext()) {
-            final SimpleShardDataTreeCohort cohort = it.next();
-            if (clientId.equals(cohort.getIdentifier().getHistoryId().getClientId())) {
+            final var cohort = it.next();
+            final var transactionId = cohort.transactionId();
+            if (clientId.equals(transactionId.getHistoryId().getClientId())) {
                 if (cohort.getState() != State.COMMIT_PENDING) {
-                    LOG.debug("{}: Retiring transaction {}", persistenceId, cohort.getIdentifier());
+                    LOG.debug("{}: Retiring transaction {}", persistenceId, transactionId);
                     it.remove();
                 } else {
-                    LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId,
-                        cohort.getIdentifier());
+                    LOG.debug("{}: Transaction {} already committing, not retiring it", persistenceId, transactionId);
                 }
             }
         }
-
-        // Clear out all transaction chains
-        localHistories.values().forEach(AbstractFrontendHistory::retire);
-        localHistories.clear();
-        standaloneHistory.retire();
-    }
-
-    long getLastConnectTicks() {
-        return lastConnectTicks;
     }
 
-    long getLastSeenTicks() {
-        return lastSeenTicks;
-    }
-
-    void touch() {
-        this.lastSeenTicks = tree.readTime();
+    @Override
+    public final String toString() {
+        return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
     }
 
-    @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(LeaderFrontendState.class)
-                .add("clientId", clientId)
-                .add("nanosAgo", tree.readTime() - lastSeenTicks)
-                .add("purgedHistories", purgedHistories)
-                .toString();
+    ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+        return helper.add("clientId", clientId).add("nanosAgo", tree.readTime() - lastSeenTicks);
     }
 }
index b6fb127714f07b8529de25bc1760dd7c16532f91..af57577c4d37bc08eb5ebd7df92bdd2df2663b31 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Props;
-import com.google.common.base.Preconditions;
 
 /**
  * Base class for factories instantiating delegates which are local to the
@@ -24,7 +25,7 @@ abstract class LeaderLocalDelegateFactory<M> {
     private final Shard shard;
 
     protected LeaderLocalDelegateFactory(final Shard shard) {
-        this.shard = Preconditions.checkNotNull(shard);
+        this.shard = requireNonNull(shard);
     }
 
     protected final ActorRef getSelf() {
@@ -47,11 +48,11 @@ abstract class LeaderLocalDelegateFactory<M> {
         return shard.getContext().actorOf(props);
     }
 
-    protected final ActorSelection selectActor(ActorRef ref) {
+    protected final ActorSelection selectActor(final ActorRef ref) {
         return shard.getContext().system().actorSelection(ref.path());
     }
 
-    protected final ActorSelection selectActor(ActorPath path) {
+    protected final ActorSelection selectActor(final ActorPath path) {
         return shard.getContext().system().actorSelection(path);
     }
 
index 17e861a3e7252a9c269801bc00a2ebe9ec31d66b..8226ac3c758cb36fbcb4ef7f6e943c065bbb80f5 100644 (file)
@@ -7,10 +7,9 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
 import com.google.common.primitives.UnsignedLong;
 import java.util.HashMap;
 import java.util.Map;
@@ -18,8 +17,8 @@ import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Chained transaction specialization of {@link AbstractFrontendHistory}. It prevents concurrent open transactions.
@@ -31,21 +30,21 @@ final class LocalFrontendHistory extends AbstractFrontendHistory {
 
     private LocalFrontendHistory(final String persistenceId, final ShardDataTree tree,
             final ShardDataTreeTransactionChain chain, final Map<UnsignedLong, Boolean> closedTransactions,
-            final RangeSet<UnsignedLong> purgedTransactions) {
+            final MutableUnsignedLongSet purgedTransactions) {
         super(persistenceId, tree, closedTransactions, purgedTransactions);
-        this.chain = Preconditions.checkNotNull(chain);
+        this.chain = requireNonNull(chain);
     }
 
     static LocalFrontendHistory create(final String persistenceId, final ShardDataTree tree,
             final ShardDataTreeTransactionChain chain) {
-        return new LocalFrontendHistory(persistenceId, tree, chain, ImmutableMap.of(), TreeRangeSet.create());
+        return new LocalFrontendHistory(persistenceId, tree, chain, ImmutableMap.of(), MutableUnsignedLongSet.of());
     }
 
     static LocalFrontendHistory recreate(final String persistenceId, final ShardDataTree tree,
             final ShardDataTreeTransactionChain chain, final Map<UnsignedLong, Boolean> closedTransactions,
-            final RangeSet<UnsignedLong> purgedTransactions) {
+            final MutableUnsignedLongSet purgedTransactions) {
         return new LocalFrontendHistory(persistenceId, tree, chain, new HashMap<>(closedTransactions),
-            TreeRangeSet.create(purgedTransactions));
+            purgedTransactions.mutableCopy());
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index ac279b7..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Fake {@link DOMStoreThreePhaseCommitCohort} instantiated for local transactions to conform with the DOM
- * transaction APIs. It is only used to hold the data from a local DOM transaction ready operation and to
- * initiate direct or coordinated commits from the front-end by sending the ReadyLocalTransaction message.
- * It is not actually called by the front-end to perform 3PC thus the canCommit/preCommit/commit methods
- * are no-ops.
- */
-class LocalThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
-    private static final Logger LOG = LoggerFactory.getLogger(LocalThreePhaseCommitCohort.class);
-
-    private final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction;
-    private final DataTreeModification modification;
-    private final ActorUtils actorUtils;
-    private final ActorSelection leader;
-    private final Exception operationError;
-
-    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-            final DataTreeModification modification,
-            final Exception operationError) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.leader = requireNonNull(leader);
-        this.transaction = requireNonNull(transaction);
-        this.modification = requireNonNull(modification);
-        this.operationError = operationError;
-    }
-
-    protected LocalThreePhaseCommitCohort(final ActorUtils actorUtils, final ActorSelection leader,
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction, final Exception operationError) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.leader = requireNonNull(leader);
-        this.transaction = requireNonNull(transaction);
-        this.operationError = requireNonNull(operationError);
-        this.modification = null;
-    }
-
-    private Future<Object> initiateCommit(final boolean immediate,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        if (operationError != null) {
-            return Futures.failed(operationError);
-        }
-
-        final ReadyLocalTransaction message = new ReadyLocalTransaction(transaction.getIdentifier(),
-                modification, immediate, participatingShardNames);
-        return actorUtils.executeOperationAsync(leader, message, actorUtils.getTransactionCommitOperationTimeout());
-    }
-
-    Future<ActorSelection> initiateCoordinatedCommit(final Optional<SortedSet<String>> participatingShardNames) {
-        final Future<Object> messageFuture = initiateCommit(false, participatingShardNames);
-        final Future<ActorSelection> ret = TransactionReadyReplyMapper.transform(messageFuture, actorUtils,
-                transaction.getIdentifier());
-        ret.onComplete(new OnComplete<ActorSelection>() {
-            @Override
-            public void onComplete(final Throwable failure, final ActorSelection success) {
-                if (failure != null) {
-                    LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
-                    transactionAborted(transaction);
-                    return;
-                }
-
-                LOG.debug("Transaction {} resolved to actor {}", transaction.getIdentifier(), success);
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return ret;
-    }
-
-    Future<Object> initiateDirectCommit() {
-        final Future<Object> messageFuture = initiateCommit(true, Optional.empty());
-        messageFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object message) {
-                if (failure != null) {
-                    LOG.warn("Failed to prepare transaction {} on backend", transaction.getIdentifier(), failure);
-                    transactionAborted(transaction);
-                } else if (CommitTransactionReply.isSerializedType(message)) {
-                    LOG.debug("Transaction {} committed successfully", transaction.getIdentifier());
-                    transactionCommitted(transaction);
-                } else {
-                    LOG.error("Transaction {} resulted in unhandled message type {}, aborting",
-                        transaction.getIdentifier(), message.getClass());
-                    transactionAborted(transaction);
-                }
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return messageFuture;
-    }
-
-    @Override
-    public final ListenableFuture<Boolean> canCommit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> preCommit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> abort() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public final ListenableFuture<Void> commit() {
-        // Intended no-op
-        throw new UnsupportedOperationException();
-    }
-
-    protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> aborted) {
-    }
-
-    protected void transactionCommitted(final SnapshotBackedWriteTransaction<TransactionIdentifier> comitted) {
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java
deleted file mode 100644 (file)
index 6610983..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.AbstractSnapshotBackedTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-
-/**
- * Transaction chain instantiated on top of a locally-available DataTree. It does not instantiate
- * a transaction in the leader and rather chains transactions on top of themselves.
- */
-final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain<TransactionIdentifier>
-        implements LocalTransactionFactory {
-    private static final Throwable ABORTED = new Throwable("Transaction aborted");
-    private final TransactionChainProxy parent;
-    private final ActorSelection leader;
-    private final DataTree tree;
-
-    LocalTransactionChain(final TransactionChainProxy parent, final ActorSelection leader, final DataTree tree) {
-        this.parent = Preconditions.checkNotNull(parent);
-        this.leader = Preconditions.checkNotNull(leader);
-        this.tree = Preconditions.checkNotNull(tree);
-    }
-
-    DataTree getDataTree() {
-        return tree;
-    }
-
-    @Override
-    protected TransactionIdentifier nextTransactionIdentifier() {
-        throw new UnsupportedOperationException();
-    }
-
-    @Override
-    protected boolean getDebugTransactions() {
-        return false;
-    }
-
-    @Override
-    protected DataTreeSnapshot takeSnapshot() {
-        return tree.takeSnapshot();
-    }
-
-    @Override
-    protected DOMStoreThreePhaseCommitCohort createCohort(
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-            final DataTreeModification modification,
-            final Exception operationError) {
-        return new LocalChainThreePhaseCommitCohort(transaction, modification, operationError);
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
-        return super.newReadOnlyTransaction(identifier);
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
-        return super.newReadWriteTransaction(identifier);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
-        return super.newWriteOnlyTransaction(identifier);
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
-    @Override
-    public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
-            @Nullable Exception operationError) {
-        Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
-        if (operationError != null) {
-            return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx,
-                    operationError);
-        }
-
-        try {
-            return (LocalThreePhaseCommitCohort) tx.ready();
-        } catch (Exception e) {
-            // Unfortunately we need to cast to SnapshotBackedWriteTransaction here as it's required by
-            // LocalThreePhaseCommitCohort and the base class.
-            return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, e);
-        }
-    }
-
-    private class LocalChainThreePhaseCommitCohort extends LocalThreePhaseCommitCohort {
-
-        protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-                DataTreeModification modification, Exception operationError) {
-            super(parent.getActorUtils(), leader, transaction, modification, operationError);
-        }
-
-        protected LocalChainThreePhaseCommitCohort(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction,
-                Exception operationError) {
-            super(parent.getActorUtils(), leader, transaction, operationError);
-        }
-
-        @Override
-        protected void transactionAborted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
-            onTransactionFailed(transaction, ABORTED);
-        }
-
-        @Override
-        protected void transactionCommitted(SnapshotBackedWriteTransaction<TransactionIdentifier> transaction) {
-            onTransactionCommited(transaction);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContext.java
deleted file mode 100644 (file)
index da6d6d1..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import scala.concurrent.Future;
-
-/**
- * Processes front-end transaction operations locally before being committed to the destination shard.
- * Instances of this class are used when the destination shard is local to the caller.
- *
- * @author Thomas Pantelis
- */
-abstract class LocalTransactionContext extends AbstractTransactionContext {
-    private final DOMStoreTransaction txDelegate;
-    private final LocalTransactionReadySupport readySupport;
-    private Exception operationError;
-
-    LocalTransactionContext(final DOMStoreTransaction txDelegate, final TransactionIdentifier identifier,
-            final LocalTransactionReadySupport readySupport) {
-        super(identifier);
-        this.txDelegate = Preconditions.checkNotNull(txDelegate);
-        this.readySupport = readySupport;
-    }
-
-    protected abstract DOMStoreWriteTransaction getWriteDelegate();
-
-    protected abstract DOMStoreReadTransaction getReadDelegate();
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void executeModification(final AbstractModification modification, final Boolean havePermit) {
-        incrementModificationCount();
-        if (operationError == null) {
-            try {
-                modification.apply(getWriteDelegate());
-            } catch (Exception e) {
-                operationError = e;
-            }
-        }
-    }
-
-    @Override
-    public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture,
-            final Boolean havePermit) {
-        Futures.addCallback(readCmd.apply(getReadDelegate()), new FutureCallback<T>() {
-            @Override
-            public void onSuccess(final T result) {
-                proxyFuture.set(result);
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                proxyFuture.setException(failure instanceof Exception
-                        ? ReadFailedException.MAPPER.apply((Exception) failure) : failure);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    private LocalThreePhaseCommitCohort ready() {
-        logModificationCount();
-        return readySupport.onTransactionReady(getWriteDelegate(), operationError);
-    }
-
-    @Override
-    public Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        final LocalThreePhaseCommitCohort cohort = ready();
-        return cohort.initiateCoordinatedCommit(participatingShardNames);
-    }
-
-    @Override
-    public Future<Object> directCommit(final Boolean havePermit) {
-        final LocalThreePhaseCommitCohort cohort = ready();
-        return cohort.initiateDirectCommit();
-    }
-
-    @Override
-    public void closeTransaction() {
-        txDelegate.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactory.java
deleted file mode 100644 (file)
index e6be3a0..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * A factory for creating local transactions used by {@link AbstractTransactionContextFactory} to instantiate
- * transactions on shards which are co-located with the shard leader.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionFactory extends LocalTransactionReadySupport {
-    DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier);
-
-    DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier);
-
-    DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier);
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java
deleted file mode 100644 (file)
index 4834590..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedTransactions;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-
-/**
- * {@link LocalTransactionFactory} for instantiating backing transactions which are
- * disconnected from each other, ie not chained. These are used by {@link AbstractTransactionContextFactory}
- * to instantiate transactions on shards which are co-located with the shard leader.
- */
-final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<TransactionIdentifier>
-        implements LocalTransactionFactory {
-
-    private final ActorSelection leader;
-    private final DataTree dataTree;
-    private final ActorUtils actorUtils;
-
-    LocalTransactionFactoryImpl(final ActorUtils actorUtils, final ActorSelection leader, final DataTree dataTree) {
-        this.leader = requireNonNull(leader);
-        this.dataTree = requireNonNull(dataTree);
-        this.actorUtils = actorUtils;
-    }
-
-    DataTree getDataTree() {
-        return dataTree;
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newReadTransaction(identifier, false, dataTree.takeSnapshot());
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newReadWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction(TransactionIdentifier identifier) {
-        return SnapshotBackedTransactions.newWriteTransaction(identifier, false, dataTree.takeSnapshot(), this);
-    }
-
-    @Override
-    protected void transactionAborted(final SnapshotBackedWriteTransaction<TransactionIdentifier> tx) {
-        // No-op
-    }
-
-    @Override
-    protected DOMStoreThreePhaseCommitCohort transactionReady(
-            final SnapshotBackedWriteTransaction<TransactionIdentifier> tx,
-            final DataTreeModification tree,
-            final Exception readyError) {
-        return new LocalThreePhaseCommitCohort(actorUtils, leader, tx, tree, readyError);
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
-    @Override
-    public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
-            @Nullable Exception operationError) {
-        checkArgument(tx instanceof SnapshotBackedWriteTransaction);
-        if (operationError != null) {
-            return new LocalThreePhaseCommitCohort(actorUtils, leader,
-                    (SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
-        }
-
-        return (LocalThreePhaseCommitCohort) tx.ready();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionReadySupport.java
deleted file mode 100644 (file)
index 9fc53a8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-/**
- * Interface for a class that can "ready" a transaction.
- *
- * @author Thomas Pantelis
- */
-interface LocalTransactionReadySupport {
-    LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
-            @Nullable Exception operationError);
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index 1f5f5bc..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.List;
-import scala.concurrent.Future;
-
-/**
- * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
- * instance given out for empty transactions.
- */
-final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort<Object> {
-    static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
-    private NoOpDOMStoreThreePhaseCommitCohort() {
-        // Hidden to prevent instantiation
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return IMMEDIATE_BOOLEAN_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return IMMEDIATE_VOID_SUCCESS;
-    }
-
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return Collections.emptyList();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java
deleted file mode 100644 (file)
index 89a8c03..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-final class NoOpTransactionContext extends AbstractTransactionContext {
-    private static final Logger LOG = LoggerFactory.getLogger(NoOpTransactionContext.class);
-
-    private final Throwable failure;
-
-    NoOpTransactionContext(final Throwable failure, final TransactionIdentifier identifier) {
-        super(identifier);
-        this.failure = failure;
-    }
-
-    @Override
-    public void closeTransaction() {
-        LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
-    }
-
-    @Override
-    public Future<Object> directCommit(final Boolean havePermit) {
-        LOG.debug("Tx {} directCommit called, failure", getIdentifier(), failure);
-        return akka.dispatch.Futures.failed(failure);
-    }
-
-    @Override
-    public Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNamess) {
-        LOG.debug("Tx {} readyTransaction called, failure", getIdentifier(), failure);
-        return akka.dispatch.Futures.failed(failure);
-    }
-
-    @Override
-    public void executeModification(final AbstractModification modification, final Boolean havePermit) {
-        LOG.debug("Tx {} executeModification {} called path = {}", getIdentifier(),
-                modification.getClass().getSimpleName(), modification.getPath());
-    }
-
-    @Override
-    public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> proxyFuture,
-            final Boolean havePermit) {
-        LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
-                readCmd.getPath());
-
-        final Throwable t;
-        if (failure instanceof NoShardLeaderException) {
-            t = new DataStoreUnavailableException(failure.getMessage(), failure);
-        } else {
-            t = failure;
-        }
-        proxyFuture.setException(new ReadFailedException("Error executeRead " + readCmd.getClass().getSimpleName()
-                + " for path " + readCmd.getPath(), t));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDOMStore.java
new file mode 100644 (file)
index 0000000..43e9c3e
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.annotations.Beta;
+import java.util.Map;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.mdsal.dom.spi.store.DOMStore;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * OSGi manifestation of a the distributed datastore, as represented by {@link AbstractDataStore}. This component's
+ * configuration is managed by {@link OSGiDistributedDataStore}.
+ */
+@Beta
+@Component(factory = OSGiDOMStore.FACTORY_NAME, service = { DOMStore.class,  DistributedDataStoreInterface.class })
+public final class OSGiDOMStore
+        implements DistributedDataStoreInterface, DOMStoreTreeChangePublisher, CommitCohortExtension {
+    // OSGi DS Component Factory name
+    static final String FACTORY_NAME = "org.opendaylight.controller.cluster.datastore.OSGiDOMStore";
+    static final String DATASTORE_INST_PROP = ".datastore.instance";
+    static final String DATASTORE_TYPE_PROP = ".datastore.type";
+
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiDOMStore.class);
+
+    private final LogicalDatastoreType datastoreType;
+    private AbstractDataStore datastore;
+
+    @Activate
+    public OSGiDOMStore(final Map<String, ?> properties) {
+        datastoreType = (LogicalDatastoreType) verifyNotNull(properties.get(DATASTORE_TYPE_PROP));
+        datastore = (AbstractDataStore) verifyNotNull(properties.get(DATASTORE_INST_PROP));
+        LOG.info("Datastore service type {} activated", datastoreType);
+    }
+
+    @Deactivate
+    void deactivate() {
+        datastore = null;
+        LOG.info("Datastore service type {} deactivated", datastoreType);
+    }
+
+    @Override
+    public ActorUtils getActorUtils() {
+        return datastore.getActorUtils();
+    }
+
+    @Override
+    public Registration registerProxyListener(final YangInstanceIdentifier shardLookup,
+            final YangInstanceIdentifier insideShard, final DOMDataTreeChangeListener delegate) {
+        return datastore.registerProxyListener(shardLookup, insideShard, delegate);
+    }
+
+    @Override
+    public Registration registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return datastore.registerTreeChangeListener(treeId, listener);
+    }
+
+    @Override
+    public Registration registerCommitCohort(final DOMDataTreeIdentifier path, final DOMDataTreeCommitCohort cohort) {
+        return datastore.registerCommitCohort(path, cohort);
+    }
+
+    @Override
+    public DOMStoreTransactionChain createTransactionChain() {
+        return datastore.createTransactionChain();
+    }
+
+    @Override
+    public DOMStoreReadTransaction newReadOnlyTransaction() {
+        return datastore.newReadOnlyTransaction();
+    }
+
+    @Override
+    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
+        return datastore.newWriteOnlyTransaction();
+    }
+
+    @Override
+    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
+        return datastore.newReadWriteTransaction();
+    }
+
+    @Override
+    public Registration registerLegacyTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener) {
+        return datastore.registerLegacyTreeChangeListener(treeId, listener);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDatastoreContextIntrospectorFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDatastoreContextIntrospectorFactory.java
new file mode 100644 (file)
index 0000000..8332bcf
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static com.google.common.base.Verify.verifyNotNull;
+
+import com.google.common.annotations.Beta;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Beta
+@Component(immediate = true, service = DatastoreContextIntrospectorFactory.class)
+public final class OSGiDatastoreContextIntrospectorFactory extends AbstractDatastoreContextIntrospectorFactory {
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiDatastoreContextIntrospectorFactory.class);
+
+    @Reference
+    volatile BindingNormalizedNodeSerializer serializer = null;
+
+    @Override
+    BindingNormalizedNodeSerializer serializer() {
+        return verifyNotNull(serializer);
+    }
+
+    @Activate
+    @SuppressWarnings("static-method")
+    void activate() {
+        LOG.info("Datastore Context Introspector activated");
+    }
+
+    @Deactivate
+    @SuppressWarnings("static-method")
+    void deactivate() {
+        LOG.info("Datastore Context Introspector deactivated");
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OSGiDistributedDataStore.java
new file mode 100644 (file)
index 0000000..3e2db7d
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import java.util.Map;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
+import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
+import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfigProvider;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMSchemaService;
+import org.osgi.framework.FrameworkUtil;
+import org.osgi.service.component.ComponentFactory;
+import org.osgi.service.component.ComponentInstance;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Modified;
+import org.osgi.service.component.annotations.Reference;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Global bootstrap component. It is responsible to start all distributed datastore instances and activate
+ * {@link OSGiDOMStore} as appropriate. It also provides routing of datastore proprerties towards AbstractDataStore.
+ */
+@Beta
+@Component(immediate = true, configurationPid = "org.opendaylight.controller.cluster.datastore")
+public final class OSGiDistributedDataStore {
+    /**
+     * Internal state associated with a particular datastore. An instance is created for each datastore and once the
+     * datastore settles, we create a new component configuration of {@link OSGiDOMStore}. This indirection is needed
+     * to not block Service Component Runtime from activating other components while we are waiting for the datastore
+     * to settle (which can take a long time).
+     */
+    private final class DatastoreState implements FutureCallback<Object> {
+        private final DatastoreContextIntrospector introspector;
+        private final LogicalDatastoreType datastoreType;
+        private final AbstractDataStore datastore;
+        private final String serviceType;
+
+        @GuardedBy("this")
+        private ComponentInstance<OSGiDOMStore> component;
+        @GuardedBy("this")
+        private boolean stopped;
+
+        DatastoreState(final DatastoreContextIntrospector introspector, final LogicalDatastoreType datastoreType,
+                final AbstractDataStore datastore, final String serviceType) {
+            this.introspector = requireNonNull(introspector);
+            this.datastoreType = requireNonNull(datastoreType);
+            this.datastore = requireNonNull(datastore);
+            this.serviceType = requireNonNull(serviceType);
+        }
+
+        synchronized void updateProperties(final Map<String, Object> properties) {
+            if (introspector.update(properties)) {
+                LOG.info("Distributed Datastore type {} updating context", datastoreType);
+                datastore.onDatastoreContextUpdated(introspector.newContextFactory());
+            }
+        }
+
+        void stop() {
+            LOG.info("Distributed Datastore type {} stopping", datastoreType);
+
+            synchronized (this) {
+                stopped = true;
+                if (component != null) {
+                    component.dispose();
+                    component = null;
+                }
+                datastore.close();
+                LOG.info("Distributed Datastore type {} stopped", datastoreType);
+            }
+        }
+
+        @Override
+        public void onSuccess(final Object result) {
+            LOG.debug("Distributed Datastore type {} reached initial settle", datastoreType);
+
+            synchronized (this) {
+                if (!stopped) {
+                    component = datastoreFactory.newInstance(FrameworkUtil.asDictionary(Map.of(
+                        OSGiDOMStore.DATASTORE_TYPE_PROP, datastoreType,
+                        OSGiDOMStore.DATASTORE_INST_PROP, datastore,
+                        "type", serviceType)));
+                    LOG.info("Distributed Datastore type {} started", datastoreType);
+                }
+            }
+        }
+
+        @Override
+        public synchronized void onFailure(final Throwable cause) {
+            LOG.error("Distributed Datastore type {} failed to settle", datastoreType, cause);
+        }
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiDistributedDataStore.class);
+
+    private final ComponentFactory<OSGiDOMStore> datastoreFactory;
+    private DatastoreState configDatastore;
+    private DatastoreState operDatastore;
+
+    @Activate
+    public OSGiDistributedDataStore(@Reference final DOMSchemaService schemaService,
+            @Reference final ActorSystemProvider actorSystemProvider,
+            @Reference final DatastoreContextIntrospectorFactory introspectorFactory,
+            @Reference final DatastoreSnapshotRestore snapshotRestore,
+            @Reference final ModuleShardConfigProvider configProvider,
+            @Reference(target = "(component.factory=" + OSGiDOMStore.FACTORY_NAME + ")")
+            final ComponentFactory<OSGiDOMStore> datastoreFactory, final Map<String, Object> properties) {
+        this.datastoreFactory = requireNonNull(datastoreFactory);
+        configDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+            LogicalDatastoreType.CONFIGURATION, "distributed-config", properties, null);
+        operDatastore = createDatastore(schemaService, actorSystemProvider, snapshotRestore, introspectorFactory,
+            LogicalDatastoreType.OPERATIONAL, "distributed-operational", properties,
+            new ConfigurationImpl(configProvider));
+    }
+
+    @Modified
+    void modified(final Map<String, Object> properties) {
+        LOG.debug("Overlaying settings: {}", properties);
+        configDatastore.updateProperties(properties);
+        operDatastore.updateProperties(properties);
+    }
+
+    @Deactivate
+    void deactivate() {
+        operDatastore.stop();
+        operDatastore = null;
+        configDatastore.stop();
+        configDatastore = null;
+    }
+
+    private DatastoreState createDatastore(final DOMSchemaService schemaService,
+            final ActorSystemProvider actorSystemProvider, final DatastoreSnapshotRestore snapshotRestore,
+            final DatastoreContextIntrospectorFactory introspectorFactory, final LogicalDatastoreType datastoreType,
+            final String serviceType, final Map<String, Object> properties,final Configuration config) {
+        LOG.info("Distributed Datastore type {} starting", datastoreType);
+        final var introspector = introspectorFactory.newInstance(datastoreType, properties);
+        final var datastore = DistributedDataStoreFactory.createInstance(actorSystemProvider,
+            introspector.getContext(), introspector, snapshotRestore, config);
+        datastore.setCloseable(schemaService.registerSchemaContextListener(datastore::onModelContextUpdated));
+        final var state = new DatastoreState(introspector, datastoreType, datastore, serviceType);
+
+        Futures.addCallback(datastore.initialSettleFuture(), state,
+            // Note we are invoked from shard manager and therefore could block it, hence the round-trip to executor
+            datastore.getActorUtils().getClientDispatcher()::execute);
+        return state;
+    }
+}
@@ -5,12 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
@@ -24,7 +25,7 @@ import scala.concurrent.Await;
  *
  * @author Thomas Pantelis
  */
-class OnDemandShardStateCache {
+final class OnDemandShardStateCache {
     private static final Cache<String, OnDemandShardState> ONDEMAND_SHARD_STATE_CACHE =
             CacheBuilder.newBuilder().expireAfterWrite(2, TimeUnit.SECONDS).build();
 
@@ -33,7 +34,7 @@ class OnDemandShardStateCache {
     private volatile String stateRetrievalTime;
 
     OnDemandShardStateCache(final String shardName, final ActorRef shardActor) {
-        this.shardName = Preconditions.checkNotNull(shardName);
+        this.shardName = requireNonNull(shardName);
         this.shardActor = shardActor;
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/OperationLimiter.java
deleted file mode 100644 (file)
index e1fee6d..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class for limiting operations.
- */
-public class OperationLimiter  {
-    private static final Logger LOG = LoggerFactory.getLogger(OperationLimiter.class);
-    private final TransactionIdentifier identifier;
-    private final long acquireTimeout;
-    private final Semaphore semaphore;
-    private final int maxPermits;
-
-    OperationLimiter(final TransactionIdentifier identifier, final int maxPermits, final long acquireTimeoutSeconds) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-
-        Preconditions.checkArgument(acquireTimeoutSeconds >= 0);
-        this.acquireTimeout = TimeUnit.SECONDS.toNanos(acquireTimeoutSeconds);
-
-        Preconditions.checkArgument(maxPermits >= 0);
-        this.maxPermits = maxPermits;
-        this.semaphore = new Semaphore(maxPermits);
-    }
-
-    boolean acquire() {
-        return acquire(1);
-    }
-
-    boolean acquire(final int acquirePermits) {
-        try {
-            if (semaphore.tryAcquire(acquirePermits, acquireTimeout, TimeUnit.NANOSECONDS)) {
-                return true;
-            }
-        } catch (InterruptedException e) {
-            if (LOG.isDebugEnabled()) {
-                LOG.debug("Interrupted when trying to acquire operation permit for transaction {}", identifier, e);
-            } else {
-                LOG.warn("Interrupted when trying to acquire operation permit for transaction {}", identifier);
-            }
-        }
-
-        return false;
-    }
-
-    void release() {
-        release(1);
-    }
-
-    void release(int permits) {
-        this.semaphore.release(permits);
-    }
-
-    @VisibleForTesting
-    TransactionIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    @VisibleForTesting
-    int availablePermits() {
-        return semaphore.availablePermits();
-    }
-
-    /**
-     * Release all the permits.
-     */
-    public void releaseAll() {
-        this.semaphore.release(maxPermits - availablePermits());
-    }
-}
index 4df1352b1904467f15aa9d4df846884f7cb1745a..28042ecc3dc28746bb7bb0d4babfff3ce297fa2e 100644 (file)
@@ -8,11 +8,11 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 final class ReadOnlyShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeSnapshot> {
     ReadOnlyShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
-        final DataTreeSnapshot snapshot) {
+            final DataTreeSnapshot snapshot) {
         super(parent, id, snapshot);
     }
 }
index f28d0d08b3e0a9e610bda2973da3a3dbc8b6682c..b55d24ac8b20f90bdee32f62811ff46ee1a5227a 100644 (file)
@@ -7,21 +7,21 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public final class ReadWriteShardDataTreeTransaction extends AbstractShardDataTreeTransaction<DataTreeModification> {
-
     ReadWriteShardDataTreeTransaction(final ShardDataTreeTransactionParent parent, final TransactionIdentifier id,
-        final DataTreeModification modification) {
+            final DataTreeModification modification) {
         super(parent, id, modification);
     }
 
-    ShardDataTreeCohort ready(Optional<SortedSet<String>> participatingShardNames) {
-        Preconditions.checkState(close(), "Transaction is already closed");
+    ShardDataTreeCohort ready(final Optional<SortedSet<String>> participatingShardNames) {
+        checkState(close(), "Transaction is already closed");
         return getParent().finishTransaction(this, participatingShardNames);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContext.java
deleted file mode 100644 (file)
index 6714815..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * Redirects front-end transaction operations to a shard for processing. Instances of this class are used
- * when the destination shard is remote to the caller.
- *
- * @author Thomas Pantelis
- */
-public class RemoteTransactionContext extends AbstractTransactionContext {
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContext.class);
-
-    private final ActorUtils actorUtils;
-    private final ActorSelection actor;
-    private final OperationLimiter limiter;
-
-    private BatchedModifications batchedModifications;
-    private int totalBatchedModificationsSent;
-    private int batchPermits;
-
-    /**
-     * We have observed a failed modification batch. This transaction context is effectively doomed, as the backend
-     * does not have a correct view of the world. If this happens, we do not limit operations but rather short-cut them
-     * to a either a no-op (modifications) or a failure (reads). Once the transaction is ready, though, we send the
-     * message to resynchronize with the backend, sharing a 'lost message' failure path.
-     */
-    private volatile Throwable failedModification;
-
-    protected RemoteTransactionContext(final TransactionIdentifier identifier, final ActorSelection actor,
-            final ActorUtils actorUtils, final short remoteTransactionVersion, final OperationLimiter limiter) {
-        super(identifier, remoteTransactionVersion);
-        this.limiter = requireNonNull(limiter);
-        this.actor = actor;
-        this.actorUtils = actorUtils;
-    }
-
-    private ActorSelection getActor() {
-        return actor;
-    }
-
-    protected ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    @Override
-    public void closeTransaction() {
-        LOG.debug("Tx {} closeTransaction called", getIdentifier());
-        TransactionContextCleanup.untrack(this);
-
-        actorUtils.sendOperationAsync(getActor(), new CloseTransaction(getTransactionVersion()).toSerializable());
-    }
-
-    @Override
-    public Future<Object> directCommit(final Boolean havePermit) {
-        LOG.debug("Tx {} directCommit called", getIdentifier());
-
-        // Send the remaining batched modifications, if any, with the ready flag set.
-        bumpPermits(havePermit);
-        return sendBatchedModifications(true, true, Optional.empty());
-    }
-
-    @Override
-    public Future<ActorSelection> readyTransaction(final Boolean havePermit,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        logModificationCount();
-
-        LOG.debug("Tx {} readyTransaction called", getIdentifier());
-
-        // Send the remaining batched modifications, if any, with the ready flag set.
-
-        bumpPermits(havePermit);
-        Future<Object> lastModificationsFuture = sendBatchedModifications(true, false, participatingShardNames);
-
-        return transformReadyReply(lastModificationsFuture);
-    }
-
-    private void bumpPermits(final Boolean havePermit) {
-        if (Boolean.TRUE.equals(havePermit)) {
-            ++batchPermits;
-        }
-    }
-
-    protected Future<ActorSelection> transformReadyReply(final Future<Object> readyReplyFuture) {
-        // Transform the last reply Future into a Future that returns the cohort actor path from
-        // the last reply message. That's the end result of the ready operation.
-
-        return TransactionReadyReplyMapper.transform(readyReplyFuture, actorUtils, getIdentifier());
-    }
-
-    private BatchedModifications newBatchedModifications() {
-        return new BatchedModifications(getIdentifier(), getTransactionVersion());
-    }
-
-    private void batchModification(final Modification modification, final boolean havePermit) {
-        incrementModificationCount();
-        if (havePermit) {
-            ++batchPermits;
-        }
-
-        if (batchedModifications == null) {
-            batchedModifications = newBatchedModifications();
-        }
-
-        batchedModifications.addModification(modification);
-
-        if (batchedModifications.getModifications().size()
-                >= actorUtils.getDatastoreContext().getShardBatchedModificationCount()) {
-            sendBatchedModifications();
-        }
-    }
-
-    protected Future<Object> sendBatchedModifications() {
-        return sendBatchedModifications(false, false, Optional.empty());
-    }
-
-    protected Future<Object> sendBatchedModifications(final boolean ready, final boolean doCommitOnReady,
-            final Optional<SortedSet<String>> participatingShardNames) {
-        Future<Object> sent = null;
-        if (ready || batchedModifications != null && !batchedModifications.getModifications().isEmpty()) {
-            if (batchedModifications == null) {
-                batchedModifications = newBatchedModifications();
-            }
-
-            LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
-                    batchedModifications.getModifications().size(), ready);
-
-            batchedModifications.setDoCommitOnReady(doCommitOnReady);
-            batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
-
-            final BatchedModifications toSend = batchedModifications;
-            final int permitsToRelease = batchPermits;
-            batchPermits = 0;
-
-            if (ready) {
-                batchedModifications.setReady(participatingShardNames);
-                batchedModifications.setDoCommitOnReady(doCommitOnReady);
-                batchedModifications = null;
-            } else {
-                batchedModifications = newBatchedModifications();
-
-                final Throwable failure = failedModification;
-                if (failure != null) {
-                    // We have observed a modification failure, it does not make sense to send this batch. This speeds
-                    // up the time when the application could be blocked due to messages timing out and operation
-                    // limiter kicking in.
-                    LOG.debug("Tx {} modifications previously failed, not sending a non-ready batch", getIdentifier());
-                    limiter.release(permitsToRelease);
-                    return Futures.failed(failure);
-                }
-            }
-
-            sent = actorUtils.executeOperationAsync(getActor(), toSend.toSerializable(),
-                actorUtils.getTransactionCommitOperationTimeout());
-            sent.onComplete(new OnComplete<Object>() {
-                @Override
-                public void onComplete(final Throwable failure, final Object success) {
-                    if (failure != null) {
-                        LOG.debug("Tx {} modifications failed", getIdentifier(), failure);
-                        failedModification = failure;
-                    } else {
-                        LOG.debug("Tx {} modifications completed with {}", getIdentifier(), success);
-                    }
-                    limiter.release(permitsToRelease);
-                }
-            }, actorUtils.getClientDispatcher());
-        }
-
-        return sent;
-    }
-
-    @Override
-    public void executeModification(final AbstractModification modification, final Boolean havePermit) {
-        LOG.debug("Tx {} executeModification {} called path = {}", getIdentifier(),
-                modification.getClass().getSimpleName(), modification.getPath());
-
-        final boolean permitToRelease;
-        if (havePermit == null) {
-            permitToRelease = failedModification == null && acquireOperation();
-        } else {
-            permitToRelease = havePermit.booleanValue();
-        }
-
-        batchModification(modification, permitToRelease);
-    }
-
-    @Override
-    public <T> void executeRead(final AbstractRead<T> readCmd, final SettableFuture<T> returnFuture,
-            final Boolean havePermit) {
-        LOG.debug("Tx {} executeRead {} called path = {}", getIdentifier(), readCmd.getClass().getSimpleName(),
-                readCmd.getPath());
-
-        final Throwable failure = failedModification;
-        if (failure != null) {
-            // If we know there was a previous modification failure, we must not send a read request, as it risks
-            // returning incorrect data. We check this before acquiring an operation simply because we want the app
-            // to complete this transaction as soon as possible.
-            returnFuture.setException(new ReadFailedException("Previous modification failed, cannot "
-                    + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
-            return;
-        }
-
-        // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
-        // public API contract.
-
-        final boolean permitToRelease = havePermit == null ? acquireOperation() : havePermit.booleanValue();
-        sendBatchedModifications();
-
-        OnComplete<Object> onComplete = new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                // We have previously acquired an operation, now release it, no matter what happened
-                if (permitToRelease) {
-                    limiter.release();
-                }
-
-                if (failure != null) {
-                    LOG.debug("Tx {} {} operation failed", getIdentifier(), readCmd.getClass().getSimpleName(),
-                        failure);
-
-                    returnFuture.setException(new ReadFailedException("Error checking "
-                        + readCmd.getClass().getSimpleName() + " for path " + readCmd.getPath(), failure));
-                } else {
-                    LOG.debug("Tx {} {} operation succeeded", getIdentifier(), readCmd.getClass().getSimpleName());
-                    readCmd.processResponse(response, returnFuture);
-                }
-            }
-        };
-
-        final Future<Object> future = actorUtils.executeOperationAsync(getActor(),
-            readCmd.asVersion(getTransactionVersion()).toSerializable(), actorUtils.getOperationTimeout());
-        future.onComplete(onComplete, actorUtils.getClientDispatcher());
-    }
-
-    /**
-     * Acquire operation from the limiter if the hand-off has completed. If the hand-off is still ongoing, this method
-     * does nothing.
-     *
-     * @return True if a permit was successfully acquired, false otherwise
-     */
-    private boolean acquireOperation() {
-        checkState(isOperationHandOffComplete(),
-            "Attempted to acquire execute operation permit for transaction %s on actor %s during handoff",
-            getIdentifier(), actor);
-
-        if (limiter.acquire()) {
-            return true;
-        }
-
-        LOG.warn("Failed to acquire execute operation permit for transaction {} on actor {}", getIdentifier(), actor);
-        return false;
-    }
-
-    @Override
-    public boolean usesOperationLimiting() {
-        return true;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextSupport.java
deleted file mode 100644 (file)
index a93d46f..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
-import akka.pattern.AskTimeoutException;
-import akka.util.Timeout;
-import com.google.common.base.Preconditions;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Handles creation of TransactionContext instances for remote transactions. This class creates
- * remote transactions, if necessary, by sending CreateTransaction messages with retries, up to a limit,
- * if the shard doesn't have a leader yet. This is done by scheduling a retry task after a short delay.
- * <p/>
- * The end result from a completed CreateTransaction message is a TransactionContext that is
- * used to perform transaction operations. Transaction operations that occur before the
- * CreateTransaction completes are cache via a TransactionContextWrapper and executed once the
- * CreateTransaction completes, successfully or not.
- */
-final class RemoteTransactionContextSupport {
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteTransactionContextSupport.class);
-
-    private static final long CREATE_TX_TRY_INTERVAL_IN_MS = 1000;
-    private static final long MAX_CREATE_TX_MSG_TIMEOUT_IN_MS = 5000;
-
-    private final TransactionProxy parent;
-    private final String shardName;
-
-    /**
-     * The target primary shard.
-     */
-    private volatile PrimaryShardInfo primaryShardInfo;
-
-    /**
-     * The total timeout for creating a tx on the primary shard.
-     */
-    private volatile long totalCreateTxTimeout;
-
-    private final Timeout createTxMessageTimeout;
-
-    private final TransactionContextWrapper transactionContextWrapper;
-
-    RemoteTransactionContextSupport(final TransactionContextWrapper transactionContextWrapper,
-            final TransactionProxy parent, final String shardName) {
-        this.parent = Preconditions.checkNotNull(parent);
-        this.shardName = shardName;
-        this.transactionContextWrapper = transactionContextWrapper;
-
-        // For the total create tx timeout, use 2 times the election timeout. This should be enough time for
-        // a leader re-election to occur if we happen to hit it in transition.
-        totalCreateTxTimeout = parent.getActorUtils().getDatastoreContext().getShardRaftConfig()
-                .getElectionTimeOutInterval().toMillis() * 2;
-
-        // We'll use the operationTimeout for the the create Tx message timeout so it can be set appropriately
-        // for unit tests but cap it at MAX_CREATE_TX_MSG_TIMEOUT_IN_MS. The operationTimeout could be set
-        // larger than the totalCreateTxTimeout in production which we don't want.
-        long operationTimeout = parent.getActorUtils().getOperationTimeout().duration().toMillis();
-        createTxMessageTimeout = new Timeout(Math.min(operationTimeout, MAX_CREATE_TX_MSG_TIMEOUT_IN_MS),
-                TimeUnit.MILLISECONDS);
-    }
-
-    String getShardName() {
-        return shardName;
-    }
-
-    private TransactionType getTransactionType() {
-        return parent.getType();
-    }
-
-    private ActorUtils getActorUtils() {
-        return parent.getActorUtils();
-    }
-
-    private TransactionIdentifier getIdentifier() {
-        return parent.getIdentifier();
-    }
-
-    /**
-     * Sets the target primary shard and initiates a CreateTransaction try.
-     */
-    void setPrimaryShard(final PrimaryShardInfo newPrimaryShardInfo) {
-        this.primaryShardInfo = newPrimaryShardInfo;
-
-        if (getTransactionType() == TransactionType.WRITE_ONLY
-                && getActorUtils().getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
-            ActorSelection primaryShard = newPrimaryShardInfo.getPrimaryShardActor();
-
-            LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
-                getIdentifier(), primaryShard);
-
-            // For write-only Tx's we prepare the transaction modifications directly on the shard actor
-            // to avoid the overhead of creating a separate transaction actor.
-            transactionContextWrapper.executePriorTransactionOperations(createValidTransactionContext(
-                    primaryShard, String.valueOf(primaryShard.path()), newPrimaryShardInfo.getPrimaryShardVersion()));
-        } else {
-            tryCreateTransaction();
-        }
-    }
-
-    /**
-      Performs a CreateTransaction try async.
-     */
-    private void tryCreateTransaction() {
-        LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(),
-                primaryShardInfo.getPrimaryShardActor());
-
-        Object serializedCreateMessage = new CreateTransaction(getIdentifier(), getTransactionType().ordinal(),
-                    primaryShardInfo.getPrimaryShardVersion()).toSerializable();
-
-        Future<Object> createTxFuture = getActorUtils().executeOperationAsync(
-                primaryShardInfo.getPrimaryShardActor(), serializedCreateMessage, createTxMessageTimeout);
-
-        createTxFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                onCreateTransactionComplete(failure, response);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    private void tryFindPrimaryShard() {
-        LOG.debug("Tx {} Retrying findPrimaryShardAsync for shard {}", getIdentifier(), shardName);
-
-        this.primaryShardInfo = null;
-        Future<PrimaryShardInfo> findPrimaryFuture = getActorUtils().findPrimaryShardAsync(shardName);
-        findPrimaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
-            @Override
-            public void onComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
-                onFindPrimaryShardComplete(failure, newPrimaryShardInfo);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    private void onFindPrimaryShardComplete(final Throwable failure, final PrimaryShardInfo newPrimaryShardInfo) {
-        if (failure == null) {
-            this.primaryShardInfo = newPrimaryShardInfo;
-            tryCreateTransaction();
-        } else {
-            LOG.debug("Tx {}: Find primary for shard {} failed", getIdentifier(), shardName, failure);
-
-            onCreateTransactionComplete(failure, null);
-        }
-    }
-
-    private void onCreateTransactionComplete(final Throwable failure, final Object response) {
-        // An AskTimeoutException will occur if the local shard forwards to an unavailable remote leader or
-        // the cached remote leader actor is no longer available.
-        boolean retryCreateTransaction = primaryShardInfo != null
-                && (failure instanceof NoShardLeaderException || failure instanceof AskTimeoutException);
-
-        // Schedule a retry unless we're out of retries. Note: totalCreateTxTimeout is volatile as it may
-        // be written by different threads however not concurrently, therefore decrementing it
-        // non-atomically here is ok.
-        if (retryCreateTransaction && totalCreateTxTimeout > 0) {
-            long scheduleInterval = CREATE_TX_TRY_INTERVAL_IN_MS;
-            if (failure instanceof AskTimeoutException) {
-                // Since we use the createTxMessageTimeout for the CreateTransaction request and it timed
-                // out, subtract it from the total timeout. Also since the createTxMessageTimeout period
-                // has already elapsed, we can immediately schedule the retry (10 ms is virtually immediate).
-                totalCreateTxTimeout -= createTxMessageTimeout.duration().toMillis();
-                scheduleInterval = 10;
-            }
-
-            totalCreateTxTimeout -= scheduleInterval;
-
-            LOG.debug("Tx {}: create tx on shard {} failed with exception \"{}\" - scheduling retry in {} ms",
-                    getIdentifier(), shardName, failure, scheduleInterval);
-
-            getActorUtils().getActorSystem().scheduler().scheduleOnce(
-                    FiniteDuration.create(scheduleInterval, TimeUnit.MILLISECONDS),
-                    this::tryFindPrimaryShard, getActorUtils().getClientDispatcher());
-            return;
-        }
-
-        createTransactionContext(failure, response);
-    }
-
-    private void createTransactionContext(final Throwable failure, final Object response) {
-        // Create the TransactionContext from the response or failure. Store the new
-        // TransactionContext locally until we've completed invoking the
-        // TransactionOperations. This avoids thread timing issues which could cause
-        // out-of-order TransactionOperations. Eg, on a modification operation, if the
-        // TransactionContext is non-null, then we directly call the TransactionContext.
-        // However, at the same time, the code may be executing the cached
-        // TransactionOperations. So to avoid thus timing, we don't publish the
-        // TransactionContext until after we've executed all cached TransactionOperations.
-        TransactionContext localTransactionContext;
-        if (failure != null) {
-            LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
-
-            Throwable resultingEx = failure;
-            if (failure instanceof AskTimeoutException) {
-                resultingEx = new ShardLeaderNotRespondingException(String.format(
-                        "Could not create a %s transaction on shard %s. The shard leader isn't responding.",
-                        parent.getType(), shardName), failure);
-            } else if (!(failure instanceof NoShardLeaderException)) {
-                resultingEx = new Exception(String.format(
-                    "Error creating %s transaction on shard %s", parent.getType(), shardName), failure);
-            }
-
-            localTransactionContext = new NoOpTransactionContext(resultingEx, getIdentifier());
-        } else if (CreateTransactionReply.isSerializedType(response)) {
-            localTransactionContext = createValidTransactionContext(
-                    CreateTransactionReply.fromSerializable(response));
-        } else {
-            IllegalArgumentException exception = new IllegalArgumentException(String.format(
-                    "Invalid reply type %s for CreateTransaction", response.getClass()));
-
-            localTransactionContext = new NoOpTransactionContext(exception, getIdentifier());
-        }
-
-        transactionContextWrapper.executePriorTransactionOperations(localTransactionContext);
-    }
-
-    private TransactionContext createValidTransactionContext(final CreateTransactionReply reply) {
-        LOG.debug("Tx {} Received {}", getIdentifier(), reply);
-
-        return createValidTransactionContext(getActorUtils().actorSelection(reply.getTransactionPath()),
-                reply.getTransactionPath(), primaryShardInfo.getPrimaryShardVersion());
-    }
-
-    private TransactionContext createValidTransactionContext(final ActorSelection transactionActor,
-            final String transactionPath, final short remoteTransactionVersion) {
-        final TransactionContext ret = new RemoteTransactionContext(transactionContextWrapper.getIdentifier(),
-                transactionActor, getActorUtils(), remoteTransactionVersion, transactionContextWrapper.getLimiter());
-
-        if (parent.getType() == TransactionType.READ_ONLY) {
-            TransactionContextCleanup.track(parent, ret);
-        }
-
-        return ret;
-    }
-}
-
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerActor.java
new file mode 100644 (file)
index 0000000..857c284
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import com.google.common.collect.Iterables;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.OnInitialData;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.DataContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+final class RootDataTreeChangeListenerActor extends DataTreeChangeListenerActor {
+    private final int shardCount;
+
+    // Initial messages, retaining order in which we have received them
+    private Map<ActorRef, Object> initialMessages = new LinkedHashMap<>();
+    private Deque<DataTreeChanged> otherMessages = new ArrayDeque<>();
+
+    private RootDataTreeChangeListenerActor(final DOMDataTreeChangeListener listener, final int shardCount) {
+        super(listener, YangInstanceIdentifier.of());
+        this.shardCount = shardCount;
+    }
+
+    @Override
+    void onInitialData(final OnInitialData message) {
+        final ActorRef sender = getSender();
+        verifyNotNull(initialMessages, "Received OnInitialData from %s after initial convergence", sender);
+
+        final Object prev = initialMessages.put(sender, message);
+        verify(prev == null, "Received OnInitialData from %s after %s", sender, prev);
+        checkInitialConvergence();
+    }
+
+    @Override
+    void dataTreeChanged(final DataTreeChanged message) {
+        if (initialMessages == null) {
+            super.dataTreeChanged(message);
+        } else {
+            processMessage(message);
+        }
+    }
+
+    private void processMessage(final DataTreeChanged message) {
+        // Put the message into initial messages if we do not have a message from that actor yet. If we do, just stash
+        // it to other messages for later processing.
+        if (initialMessages.putIfAbsent(getSender(), message) == null) {
+            checkInitialConvergence();
+        } else {
+            otherMessages.addLast(message);
+        }
+    }
+
+    private void checkInitialConvergence() {
+        if (initialMessages.size() != shardCount) {
+            // We do not have initial state from all shards yet
+            return;
+        }
+
+        /*
+         * We need to make-pretend that the data coming into the listener is coming from a single logical entity, where
+         * ordering is partially guaranteed (on shard boundaries). The data layout in shards is such that each DataTree
+         * is rooted at YangInstanceIdentifier.of(), but their contents vary:
+         *
+         * 1) non-default shards contain immediate children of root from one module
+         * 2) default shard contains everything else
+         * 3) there is no overlap between shards
+         *
+         * When we subscribe to each of the shards, each of them will report root as being written, which is an accurate
+         * view from each shard's perspective, but it does not reflect the aggregate reality.
+         *
+         * Construct an overall NormalizedNode view of the entire datastore by combining first-level children from all
+         * reported initial state reports, report that node as written and then report any additional deltas.
+         */
+        final List<DataTreeCandidate> initialChanges = new ArrayList<>();
+        // Reserve first item
+        initialChanges.add(null);
+
+        final DataContainerNodeBuilder<NodeIdentifier, ContainerNode> rootBuilder = Builders.containerBuilder()
+                .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME));
+        for (Object message : initialMessages.values()) {
+            if (message instanceof DataTreeChanged) {
+                final Collection<DataTreeCandidate> changes = ((DataTreeChanged) message).getChanges();
+                final DataTreeCandidate initial;
+                if (changes.size() != 1) {
+                    final Iterator<DataTreeCandidate> it = changes.iterator();
+                    initial = it.next();
+                    // Append to changes to report as initial. This should not be happening (often?).
+                    it.forEachRemaining(initialChanges::add);
+                } else {
+                    initial = Iterables.get(changes, 0);
+                }
+
+                final NormalizedNode root = initial.getRootNode().getDataAfter();
+                verify(root instanceof ContainerNode, "Unexpected root node %s", root);
+                ((ContainerNode) root).body().forEach(rootBuilder::withChild);
+            }
+        }
+        // We will not be intercepting any other messages, allow initial state to be reclaimed as soon as possible
+        initialMessages = null;
+
+        // Replace first element with the combined initial change, report initial changes and clear the map
+        initialChanges.set(0, DataTreeCandidates.newDataTreeCandidate(YangInstanceIdentifier.of(),
+            DataTreeCandidateNodes.written(rootBuilder.build())));
+        super.dataTreeChanged(new DataTreeChanged(initialChanges));
+
+        // Now go through all messages we have held back and report them. Note we are removing them from the queue
+        // to allow them to be reclaimed as soon as possible.
+        for (DataTreeChanged message = otherMessages.poll(); message != null; message = otherMessages.poll()) {
+            super.dataTreeChanged(message);
+        }
+        otherMessages = null;
+    }
+
+    static Props props(final DOMDataTreeChangeListener instance, final int shardCount) {
+        return Props.create(RootDataTreeChangeListenerActor.class, instance, shardCount);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxy.java
new file mode 100644 (file)
index 0000000..43cbb7e
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import com.google.common.collect.Maps;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import org.checkerframework.checker.lock.qual.GuardedBy;
+import org.checkerframework.checker.lock.qual.Holding;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class RootDataTreeChangeListenerProxy<L extends DOMDataTreeChangeListener> extends AbstractObjectRegistration<L> {
+    private abstract static class State {
+
+    }
+
+    private static final class ResolveShards extends State {
+        final Map<String, Object> localShards = new HashMap<>();
+        final int shardCount;
+
+        ResolveShards(final int shardCount) {
+            this.shardCount = shardCount;
+        }
+    }
+
+    private static final class Subscribed extends State {
+        final List<ActorSelection> subscriptions;
+        final ActorRef dtclActor;
+
+        Subscribed(final ActorRef dtclActor, final int shardCount) {
+            this.dtclActor = requireNonNull(dtclActor);
+            subscriptions = new ArrayList<>(shardCount);
+        }
+    }
+
+    private static final class Terminated extends State {
+
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(RootDataTreeChangeListenerProxy.class);
+
+    private final ActorUtils actorUtils;
+
+    @GuardedBy("this")
+    private State state;
+
+    RootDataTreeChangeListenerProxy(final ActorUtils actorUtils, final @NonNull L listener,
+            final Set<String> shardNames) {
+        super(listener);
+        this.actorUtils = requireNonNull(actorUtils);
+        state = new ResolveShards(shardNames.size());
+
+        for (String shardName : shardNames) {
+            actorUtils.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
+                @Override
+                public void onComplete(final Throwable failure, final ActorRef success) {
+                    onFindLocalShardComplete(shardName, failure, success);
+                }
+            }, actorUtils.getClientDispatcher());
+        }
+    }
+
+    @Override
+    protected synchronized void removeRegistration() {
+        if (state instanceof Terminated) {
+            // Trivial case: we have already terminated on a failure, so this is a no-op
+        } else if (state instanceof ResolveShards) {
+            // Simple case: just mark the fact we were closed, terminating when resolution finishes
+            state = new Terminated();
+        } else if (state instanceof Subscribed subscribed) {
+            terminate(subscribed);
+        } else {
+            throw new IllegalStateException("Unhandled close in state " + state);
+        }
+    }
+
+    private synchronized void onFindLocalShardComplete(final String shardName, final Throwable failure,
+            final ActorRef shard) {
+        if (state instanceof ResolveShards resolveShards) {
+            localShardsResolved(resolveShards, shardName, failure, shard);
+        } else {
+            LOG.debug("{}: lookup for shard {} turned into a noop on state {}", logContext(), shardName, state);
+        }
+    }
+
+    @Holding("this")
+    private void localShardsResolved(final ResolveShards current, final String shardName, final Throwable failure,
+            final ActorRef shard) {
+        final Object result = failure != null ? failure : verifyNotNull(shard);
+        LOG.debug("{}: lookup for shard {} resulted in {}", logContext(), shardName, result);
+        current.localShards.put(shardName, result);
+
+        if (current.localShards.size() == current.shardCount) {
+            // We have all the responses we need
+            if (current.localShards.values().stream().anyMatch(Throwable.class::isInstance)) {
+                reportFailure(current.localShards);
+            } else {
+                subscribeToShards(current.localShards);
+            }
+        }
+    }
+
+    @Holding("this")
+    private void reportFailure(final Map<String, Object> localShards) {
+        for (Entry<String, Object> entry : Maps.filterValues(localShards, Throwable.class::isInstance).entrySet()) {
+            final Throwable cause = (Throwable) entry.getValue();
+            LOG.error("{}: Failed to find local shard {}, cannot register {} at root", logContext(), entry.getKey(),
+                getInstance(), cause);
+        }
+        state = new Terminated();
+    }
+
+    @Holding("this")
+    private void subscribeToShards(final Map<String, Object> localShards) {
+        // Safety check before we start doing anything
+        for (Entry<String, Object> entry : localShards.entrySet()) {
+            final Object obj = entry.getValue();
+            verify(obj instanceof ActorRef, "Unhandled response %s for shard %s", obj, entry.getKey());
+        }
+
+        // Instantiate the DTCL actor and update state
+        final ActorRef dtclActor = actorUtils.getActorSystem().actorOf(
+            RootDataTreeChangeListenerActor.props(getInstance(), localShards.size())
+              .withDispatcher(actorUtils.getNotificationDispatcherPath()));
+        state = new Subscribed(dtclActor, localShards.size());
+
+        // Subscribe to all shards
+        final RegisterDataTreeChangeListener regMessage = new RegisterDataTreeChangeListener(
+            YangInstanceIdentifier.of(), dtclActor, true);
+        for (Entry<String, Object> entry : localShards.entrySet()) {
+            // Do not retain references to localShards
+            final String shardName = entry.getKey();
+            final ActorRef shard = (ActorRef) entry.getValue();
+
+            actorUtils.executeOperationAsync(shard, regMessage,
+                actorUtils.getDatastoreContext().getShardInitializationTimeout()).onComplete(new OnComplete<>() {
+                    @Override
+                    public void onComplete(final Throwable failure, final Object result) {
+                        onShardSubscribed(shardName, failure, result);
+                    }
+                }, actorUtils.getClientDispatcher());
+        }
+    }
+
+    private synchronized void onShardSubscribed(final String shardName, final Throwable failure, final Object result) {
+        if (state instanceof Subscribed current) {
+            if (failure != null) {
+                LOG.error("{}: Shard {} failed to subscribe, terminating listener {}", logContext(),
+                    shardName,getInstance(), failure);
+                terminate(current);
+            } else {
+                onSuccessfulSubscription(current, shardName, (RegisterDataTreeNotificationListenerReply) result);
+            }
+        } else {
+            terminateSubscription(shardName, failure, result);
+        }
+    }
+
+    @Holding("this")
+    private void onSuccessfulSubscription(final Subscribed current, final String shardName,
+            final RegisterDataTreeNotificationListenerReply reply) {
+        final ActorSelection regActor = actorUtils.actorSelection(reply.getListenerRegistrationPath());
+        LOG.debug("{}: Shard {} subscribed at {}", logContext(), shardName, regActor);
+        current.subscriptions.add(regActor);
+    }
+
+    @Holding("this")
+    private void terminate(final Subscribed current) {
+        // Terminate the listener
+        current.dtclActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        // Terminate all subscriptions
+        for (ActorSelection regActor : current.subscriptions) {
+            regActor.tell(CloseDataTreeNotificationListenerRegistration.getInstance(), ActorRef.noSender());
+        }
+        state = new Terminated();
+    }
+
+    // This method should not modify internal state
+    private void terminateSubscription(final String shardName, final Throwable failure, final Object result) {
+        if (failure == null) {
+            final ActorSelection regActor = actorUtils.actorSelection(
+                ((RegisterDataTreeNotificationListenerReply) result).getListenerRegistrationPath());
+            LOG.debug("{}: Shard {} registered late, terminating subscription at {}", logContext(), shardName,
+                regActor);
+            regActor.tell(CloseDataTreeNotificationListenerRegistration.getInstance(), ActorRef.noSender());
+        } else {
+            LOG.debug("{}: Shard {} reported late failure", logContext(), shardName, failure);
+        }
+    }
+
+    private String logContext() {
+        return actorUtils.getDatastoreContext().getLogicalStoreType().toString();
+    }
+}
index 51ee4d7b80ef7c9396930984b22e44c18bbb392c..1fcaa9d64d6b4a9eaee3377aa4fbcaea5ab5ebc4 100644 (file)
@@ -7,31 +7,42 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
 import akka.actor.ExtendedActorSystem;
+import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.actor.Status;
 import akka.actor.Status.Failure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.SnapshotOffer;
 import akka.serialization.JavaSerializer;
 import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Ticker;
-import com.google.common.base.Verify;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Range;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import java.util.function.Supplier;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
@@ -41,8 +52,6 @@ import org.opendaylight.controller.cluster.access.commands.OutOfSequenceEnvelope
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.Request;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
@@ -55,13 +64,10 @@ import org.opendaylight.controller.cluster.common.actor.CommonConfig;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers.DispatcherType;
 import org.opendaylight.controller.cluster.common.actor.MessageTracker;
-import org.opendaylight.controller.cluster.common.actor.MessageTracker.Error;
 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
+import org.opendaylight.controller.cluster.datastore.actors.JsonExportActor;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBeanImpl;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
@@ -70,19 +76,21 @@ import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionCh
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.GetKnownClients;
+import org.opendaylight.controller.cluster.datastore.messages.GetKnownClientsReply;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
 import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PersistAbortTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.DisableTrackingPayload;
 import org.opendaylight.controller.cluster.messaging.MessageAssembler;
 import org.opendaylight.controller.cluster.messaging.MessageSlicer;
 import org.opendaylight.controller.cluster.messaging.SliceOptions;
@@ -94,18 +102,19 @@ import org.opendaylight.controller.cluster.raft.RaftActor;
 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestLeadership;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -114,6 +123,7 @@ import scala.concurrent.duration.FiniteDuration;
  * <p>
  * Our Shard uses InMemoryDataTree as it's internal representation and delegates all requests it
  */
+// FIXME: non-final for testing?
 public class Shard extends RaftActor {
 
     @VisibleForTesting
@@ -144,6 +154,9 @@ public class Shard extends RaftActor {
 
     private static final Collection<ABIVersion> SUPPORTED_ABIVERSIONS;
 
+    // Make sure to keep this in sync with the journal configuration in factory-akka.conf
+    public static final String NON_PERSISTENT_JOURNAL_ID = "akka.persistence.non-persistent.journal";
+
     static {
         final ABIVersion[] values = ABIVersion.values();
         final ABIVersion[] real = Arrays.copyOfRange(values, 1, values.length - 1);
@@ -167,6 +180,7 @@ public class Shard extends RaftActor {
 
     private DatastoreContext datastoreContext;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardCommitCoordinator commitCoordinator;
 
     private long transactionCommitTimeout;
@@ -177,6 +191,7 @@ public class Shard extends RaftActor {
 
     private final MessageTracker appendEntriesReplyTracker;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardTransactionActorFactory transactionActorFactory;
 
     private final ShardSnapshotCohort snapshotCohort;
@@ -185,9 +200,12 @@ public class Shard extends RaftActor {
 
     private ShardSnapshot restoreFromSnapshot;
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private final ShardTransactionMessageRetrySupport messageRetrySupport;
 
-    private final FrontendMetadata frontendMetadata;
+    @VisibleForTesting
+    final FrontendMetadata frontendMetadata;
+
     private Map<FrontendIdentifier, LeaderFrontendState> knownFrontends = ImmutableMap.of();
     private boolean paused;
 
@@ -196,15 +214,27 @@ public class Shard extends RaftActor {
 
     private final MessageAssembler requestMessageAssembler;
 
-    protected Shard(final AbstractBuilder<?, ?> builder) {
+    private final ExportOnRecovery exportOnRecovery;
+
+    private final ActorRef exportActor;
+
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
+    Shard(final AbstractBuilder<?, ?> builder) {
         super(builder.getId().toString(), builder.getPeerAddresses(),
                 Optional.of(builder.getDatastoreContext().getShardRaftConfig()), DataStoreVersions.CURRENT_VERSION);
 
-        this.name = builder.getId().toString();
-        this.shardName = builder.getId().getShardName();
-        this.datastoreContext = builder.getDatastoreContext();
-        this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
-        this.frontendMetadata = new FrontendMetadata(name);
+        name = builder.getId().toString();
+        shardName = builder.getId().getShardName();
+        datastoreContext = builder.getDatastoreContext();
+        restoreFromSnapshot = builder.getRestoreFromSnapshot();
+        frontendMetadata = new FrontendMetadata(name);
+        exportOnRecovery = datastoreContext.getExportOnRecovery();
+
+        exportActor = switch (exportOnRecovery) {
+            case Json -> getContext().actorOf(JsonExportActor.props(builder.getSchemaContext(),
+                datastoreContext.getRecoveryExportBaseDir()));
+            case Off -> null;
+        };
 
         setPersistence(datastoreContext.isPersistent());
 
@@ -214,19 +244,21 @@ public class Shard extends RaftActor {
                 new ShardDataTreeChangeListenerPublisherActorProxy(getContext(), name + "-DTCL-publisher", name);
         if (builder.getDataTree() != null) {
             store = new ShardDataTree(this, builder.getSchemaContext(), builder.getDataTree(),
-                    treeChangeListenerPublisher, name, frontendMetadata);
+                    treeChangeListenerPublisher, name,
+                    frontendMetadata);
         } else {
             store = new ShardDataTree(this, builder.getSchemaContext(), builder.getTreeType(),
-                    builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name, frontendMetadata);
+                    builder.getDatastoreContext().getStoreRoot(), treeChangeListenerPublisher, name,
+                    frontendMetadata);
         }
 
-        shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
+        shardMBean = ShardStats.create(name, datastoreContext.getDataStoreMXBeanType(), this);
 
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
         }
 
-        commitCoordinator = new ShardCommitCoordinator(store, LOG, this.name);
+        commitCoordinator = new ShardCommitCoordinator(store, LOG, name);
 
         setTransactionCommitTimeout();
 
@@ -242,16 +274,16 @@ public class Shard extends RaftActor {
                 self(), getContext(), shardMBean, builder.getId().getShardName());
 
         snapshotCohort = ShardSnapshotCohort.create(getContext(), builder.getId().getMemberName(), store, LOG,
-            this.name);
+            name, datastoreContext);
 
         messageRetrySupport = new ShardTransactionMessageRetrySupport(this);
 
-        responseMessageSlicer = MessageSlicer.builder().logContext(this.name)
+        responseMessageSlicer = MessageSlicer.builder().logContext(name)
                 .messageSliceSize(datastoreContext.getMaximumMessageSliceSize())
                 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
                 .expireStateAfterInactivity(2, TimeUnit.MINUTES).build();
 
-        requestMessageAssembler = MessageAssembler.builder().logContext(this.name)
+        requestMessageAssembler = MessageAssembler.builder().logContext(name)
                 .fileBackedStreamFactory(getRaftActorContext().getFileBackedOutputStreamFactory())
                 .assembledMessageCallback((message, sender) -> self().tell(message, sender))
                 .expireStateAfterInactivity(datastoreContext.getRequestTimeout(), TimeUnit.NANOSECONDS).build();
@@ -267,13 +299,13 @@ public class Shard extends RaftActor {
     }
 
     private Optional<ActorRef> createRoleChangeNotifier(final String shardId) {
-        ActorRef shardRoleChangeNotifier = this.getContext().actorOf(
+        ActorRef shardRoleChangeNotifier = getContext().actorOf(
             RoleChangeNotifier.getProps(shardId), shardId + "-notifier");
         return Optional.of(shardRoleChangeNotifier);
     }
 
     @Override
-    public void postStop() {
+    public final void postStop() throws Exception {
         LOG.info("Stopping Shard {}", persistenceId());
 
         super.postStop();
@@ -291,64 +323,68 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected void handleRecover(final Object message) {
+    protected final void handleRecover(final Object message) {
         LOG.debug("{}: onReceiveRecover: Received message {} from {}", persistenceId(), message.getClass(),
             getSender());
 
         super.handleRecover(message);
+
+        switch (exportOnRecovery) {
+            case Json:
+                if (message instanceof SnapshotOffer) {
+                    exportActor.tell(new JsonExportActor.ExportSnapshot(store.readCurrentData().orElseThrow(), name),
+                            ActorRef.noSender());
+                } else if (message instanceof ReplicatedLogEntry replicatedLogEntry) {
+                    exportActor.tell(new JsonExportActor.ExportJournal(replicatedLogEntry), ActorRef.noSender());
+                } else if (message instanceof RecoveryCompleted) {
+                    exportActor.tell(new JsonExportActor.FinishExport(name), ActorRef.noSender());
+                    exportActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+                }
+                break;
+            case Off:
+            default:
+                break;
+        }
+
         if (LOG.isTraceEnabled()) {
             appendEntriesReplyTracker.begin();
         }
     }
 
     @Override
+    // non-final for TestShard
     protected void handleNonRaftCommand(final Object message) {
-        try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
-            final Optional<Error> maybeError = context.error();
+        try (var context = appendEntriesReplyTracker.received(message)) {
+            final var maybeError = context.error();
             if (maybeError.isPresent()) {
                 LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(),
-                    maybeError.get());
+                    maybeError.orElseThrow());
             }
 
             store.resetTransactionBatch();
 
-            if (message instanceof RequestEnvelope) {
-                handleRequestEnvelope((RequestEnvelope)message);
+            if (message instanceof RequestEnvelope request) {
+                handleRequestEnvelope(request);
             } else if (MessageAssembler.isHandledMessage(message)) {
                 handleRequestAssemblerMessage(message);
-            } else if (message instanceof ConnectClientRequest) {
-                handleConnectClient((ConnectClientRequest)message);
-            } else if (CreateTransaction.isSerializedType(message)) {
-                handleCreateTransaction(message);
-            } else if (message instanceof BatchedModifications) {
-                handleBatchedModifications((BatchedModifications)message);
-            } else if (message instanceof ForwardedReadyTransaction) {
-                handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
-            } else if (message instanceof ReadyLocalTransaction) {
-                handleReadyLocalTransaction((ReadyLocalTransaction)message);
-            } else if (CanCommitTransaction.isSerializedType(message)) {
-                handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
-            } else if (CommitTransaction.isSerializedType(message)) {
-                handleCommitTransaction(CommitTransaction.fromSerializable(message));
-            } else if (AbortTransaction.isSerializedType(message)) {
-                handleAbortTransaction(AbortTransaction.fromSerializable(message));
-            } else if (CloseTransactionChain.isSerializedType(message)) {
-                closeTransactionChain(CloseTransactionChain.fromSerializable(message));
-            } else if (message instanceof RegisterDataTreeChangeListener) {
-                treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
-            } else if (message instanceof UpdateSchemaContext) {
-                updateSchemaContext((UpdateSchemaContext) message);
-            } else if (message instanceof PeerAddressResolved) {
-                PeerAddressResolved resolved = (PeerAddressResolved) message;
+            } else if (message instanceof ConnectClientRequest request) {
+                handleConnectClient(request);
+            } else if (message instanceof DataTreeChangedReply) {
+                // Ignore reply
+            } else if (message instanceof RegisterDataTreeChangeListener request) {
+                treeChangeSupport.onMessage(request, isLeader(), hasLeader());
+            } else if (message instanceof UpdateSchemaContext request) {
+                updateSchemaContext(request);
+            } else if (message instanceof PeerAddressResolved resolved) {
                 setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
             } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
                 commitTimeoutCheck();
-            } else if (message instanceof DatastoreContext) {
-                onDatastoreContext((DatastoreContext)message);
+            } else if (message instanceof DatastoreContext request) {
+                onDatastoreContext(request);
             } else if (message instanceof RegisterRoleChangeListener) {
-                roleChangeNotifier.get().forward(message, context());
-            } else if (message instanceof FollowerInitialSyncUpStatus) {
-                shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
+                roleChangeNotifier.orElseThrow().forward(message, context());
+            } else if (message instanceof FollowerInitialSyncUpStatus request) {
+                shardMBean.setFollowerInitialSyncStatus(request.isInitialSyncDone());
                 context().parent().tell(message, self());
             } else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
                 sender().tell(getShardMBean(), self());
@@ -356,21 +392,37 @@ public class Shard extends RaftActor {
                 sender().tell(store.getDataTree(), self());
             } else if (message instanceof ServerRemoved) {
                 context().parent().forward(message, context());
-            } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
-                messageRetrySupport.onTimerMessage(message);
-            } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
-                store.processCohortRegistryCommand(getSender(),
-                        (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
-            } else if (message instanceof PersistAbortTransactionPayload) {
-                final TransactionIdentifier txId = ((PersistAbortTransactionPayload) message).getTransactionId();
-                persistPayload(txId, AbortTransactionPayload.create(
-                        txId, datastoreContext.getInitialPayloadSerializedBufferCapacity()), true);
+            } else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand request) {
+                store.processCohortRegistryCommand(getSender(), request);
             } else if (message instanceof MakeLeaderLocal) {
                 onMakeLeaderLocal();
             } else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
                 store.resumeNextPendingTransaction();
+            } else if (GetKnownClients.INSTANCE.equals(message)) {
+                handleGetKnownClients();
             } else if (!responseMessageSlicer.handleMessage(message)) {
-                super.handleNonRaftCommand(message);
+                // Ask-based protocol messages
+                if (CreateTransaction.isSerializedType(message)) {
+                    handleCreateTransaction(message);
+                } else if (message instanceof BatchedModifications request) {
+                    handleBatchedModifications(request);
+                } else if (message instanceof ForwardedReadyTransaction request) {
+                    handleForwardedReadyTransaction(request);
+                } else if (message instanceof ReadyLocalTransaction request) {
+                    handleReadyLocalTransaction(request);
+                } else if (CanCommitTransaction.isSerializedType(message)) {
+                    handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
+                } else if (CommitTransaction.isSerializedType(message)) {
+                    handleCommitTransaction(CommitTransaction.fromSerializable(message));
+                } else if (AbortTransaction.isSerializedType(message)) {
+                    handleAbortTransaction(AbortTransaction.fromSerializable(message));
+                } else if (CloseTransactionChain.isSerializedType(message)) {
+                    closeTransactionChain(CloseTransactionChain.fromSerializable(message));
+                } else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
+                    messageRetrySupport.onTimerMessage(message);
+                } else {
+                    super.handleNonRaftCommand(message);
+                }
             }
         }
     }
@@ -415,22 +467,50 @@ public class Shard extends RaftActor {
         requestMessageAssembler.checkExpiredAssembledMessageState();
     }
 
-    private Optional<Long> updateAccess(final SimpleShardDataTreeCohort cohort) {
-        final FrontendIdentifier frontend = cohort.getIdentifier().getHistoryId().getClientId().getFrontendId();
+    private OptionalLong updateAccess(final SimpleShardDataTreeCohort cohort) {
+        final FrontendIdentifier frontend = cohort.transactionId().getHistoryId().getClientId().getFrontendId();
         final LeaderFrontendState state = knownFrontends.get(frontend);
         if (state == null) {
             // Not tell-based protocol, do nothing
-            return Optional.absent();
+            return OptionalLong.empty();
         }
 
         if (isIsolatedLeader()) {
             // We are isolated and no new request can come through until we emerge from it. We are still updating
             // liveness of frontend when we see it attempting to communicate. Use the last access timer.
-            return Optional.of(state.getLastSeenTicks());
+            return OptionalLong.of(state.getLastSeenTicks());
         }
 
         // If this frontend has freshly connected, give it some time to catch up before killing its transactions.
-        return Optional.of(state.getLastConnectTicks());
+        return OptionalLong.of(state.getLastConnectTicks());
+    }
+
+    private void disableTracking(final DisableTrackingPayload payload) {
+        final ClientIdentifier clientId = payload.getIdentifier();
+        LOG.debug("{}: disabling tracking of {}", persistenceId(), clientId);
+        frontendMetadata.disableTracking(clientId);
+
+        if (isLeader()) {
+            final FrontendIdentifier frontendId = clientId.getFrontendId();
+            final LeaderFrontendState frontend = knownFrontends.get(frontendId);
+            if (frontend != null) {
+                if (clientId.equals(frontend.getIdentifier())) {
+                    if (!(frontend instanceof LeaderFrontendState.Disabled)) {
+                        verify(knownFrontends.replace(frontendId, frontend,
+                            new LeaderFrontendState.Disabled(persistenceId(), clientId, store)));
+                        LOG.debug("{}: leader state for {} disabled", persistenceId(), clientId);
+                    } else {
+                        LOG.debug("{}: leader state {} is already disabled", persistenceId(), frontend);
+                    }
+                } else {
+                    LOG.debug("{}: leader state {} does not match {}", persistenceId(), frontend, clientId);
+                }
+            } else {
+                LOG.debug("{}: leader state for {} not found", persistenceId(), clientId);
+                knownFrontends.put(frontendId, new LeaderFrontendState.Disabled(persistenceId(), clientId,
+                    getDataStore()));
+            }
+        }
     }
 
     private void onMakeLeaderLocal() {
@@ -460,8 +540,7 @@ public class Shard extends RaftActor {
     }
 
     // Acquire our frontend tracking handle and verify generation matches
-    @Nullable
-    private LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
+    private @Nullable LeaderFrontendState findFrontend(final ClientIdentifier clientId) throws RequestException {
         final LeaderFrontendState existing = knownFrontends.get(clientId.getFrontendId());
         if (existing != null) {
             final int cmp = Long.compareUnsigned(existing.getIdentifier().getGeneration(), clientId.getGeneration());
@@ -495,8 +574,7 @@ public class Shard extends RaftActor {
         throw new OutOfSequenceEnvelopeException(0);
     }
 
-    @Nonnull
-    private static ABIVersion selectVersion(final ConnectClientRequest message) {
+    private static @NonNull ABIVersion selectVersion(final ConnectClientRequest message) {
         final Range<ABIVersion> clientRange = Range.closed(message.getMinVersion(), message.getMaxVersion());
         for (ABIVersion v : SUPPORTED_ABIVERSIONS) {
             if (clientRange.contains(v)) {
@@ -528,7 +606,7 @@ public class Shard extends RaftActor {
             final ABIVersion selectedVersion = selectVersion(message);
             final LeaderFrontendState frontend;
             if (existing == null) {
-                frontend = new LeaderFrontendState(persistenceId(), clientId, store);
+                frontend = new LeaderFrontendState.Enabled(persistenceId(), clientId, store);
                 knownFrontends.put(clientId.getFrontendId(), frontend);
                 LOG.debug("{}: created state {} for client {}", persistenceId(), frontend, clientId);
             } else {
@@ -544,8 +622,7 @@ public class Shard extends RaftActor {
         }
     }
 
-    @Nullable
-    private RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
+    private @Nullable RequestSuccess<?, ?> handleRequest(final RequestEnvelope envelope, final long now)
             throws RequestException {
         // We are not the leader, hence we want to fail-fast.
         if (!isLeader() || paused || !isLeaderActive()) {
@@ -555,14 +632,12 @@ public class Shard extends RaftActor {
             throw new NotLeaderException(getSelf());
         }
 
-        final Request<?, ?> request = envelope.getMessage();
-        if (request instanceof TransactionRequest) {
-            final TransactionRequest<?> txReq = (TransactionRequest<?>)request;
-            final ClientIdentifier clientId = txReq.getTarget().getHistoryId().getClientId();
+        final var request = envelope.getMessage();
+        if (request instanceof TransactionRequest<?> txReq) {
+            final var clientId = txReq.getTarget().getHistoryId().getClientId();
             return getFrontend(clientId).handleTransactionRequest(txReq, envelope, now);
-        } else if (request instanceof LocalHistoryRequest) {
-            final LocalHistoryRequest<?> lhReq = (LocalHistoryRequest<?>)request;
-            final ClientIdentifier clientId = lhReq.getTarget().getClientId();
+        } else if (request instanceof LocalHistoryRequest<?> lhReq) {
+            final var clientId = lhReq.getTarget().getClientId();
             return getFrontend(clientId).handleLocalHistoryRequest(lhReq, envelope, now);
         } else {
             LOG.warn("{}: rejecting unsupported request {}", persistenceId(), request);
@@ -570,36 +645,48 @@ public class Shard extends RaftActor {
         }
     }
 
+    private void handleGetKnownClients() {
+        final ImmutableSet<ClientIdentifier> clients;
+        if (isLeader()) {
+            clients = knownFrontends.values().stream()
+                    .map(LeaderFrontendState::getIdentifier)
+                    .collect(ImmutableSet.toImmutableSet());
+        } else {
+            clients = frontendMetadata.getClients();
+        }
+        sender().tell(new GetKnownClientsReply(clients), self());
+    }
+
     private boolean hasLeader() {
         return getLeaderId() != null;
     }
 
-    public int getPendingTxCommitQueueSize() {
+    final int getPendingTxCommitQueueSize() {
         return store.getQueueSize();
     }
 
-    public int getCohortCacheSize() {
+    final int getCohortCacheSize() {
         return commitCoordinator.getCohortCacheSize();
     }
 
     @Override
-    protected Optional<ActorRef> getRoleChangeNotifier() {
+    protected final Optional<ActorRef> getRoleChangeNotifier() {
         return roleChangeNotifier;
     }
 
-    String getShardName() {
+    final String getShardName() {
         return shardName;
     }
 
     @Override
-    protected LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
+    protected final LeaderStateChanged newLeaderStateChanged(final String memberId, final String leaderId,
             final short leaderPayloadVersion) {
         return isLeader() ? new ShardLeaderStateChanged(memberId, leaderId, store.getDataTree(), leaderPayloadVersion)
                 : new ShardLeaderStateChanged(memberId, leaderId, leaderPayloadVersion);
     }
 
-    protected void onDatastoreContext(final DatastoreContext context) {
-        datastoreContext = context;
+    private void onDatastoreContext(final DatastoreContext context) {
+        datastoreContext = verifyNotNull(context);
 
         setTransactionCommitTimeout();
 
@@ -609,8 +696,9 @@ public class Shard extends RaftActor {
     }
 
     // applyState() will be invoked once consensus is reached on the payload
+    // non-final for mocking
     void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
-        boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
+        final boolean canSkipPayload = !hasFollowers() && !persistence().isRecoveryApplicable();
         if (canSkipPayload) {
             applyState(self(), id, payload);
         } else {
@@ -619,14 +707,16 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCommitTransaction(final CommitTransaction commit) {
+        final var txId = commit.getTransactionId();
         if (isLeader()) {
-            commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
+            askProtocolEncountered(txId);
+            commitCoordinator.handleCommit(txId, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (leader == null) {
-                messageRetrySupport.addMessageToRetry(commit, getSender(),
-                        "Could not commit transaction " + commit.getTransactionId());
+                messageRetrySupport.addMessageToRetry(commit, getSender(), "Could not commit transaction " + txId);
             } else {
                 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
                 leader.forward(commit, getContext());
@@ -634,16 +724,19 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
-        LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
+        final var txId = canCommit.getTransactionId();
+        LOG.debug("{}: Can committing transaction {}", persistenceId(), txId);
 
         if (isLeader()) {
-            commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
+            askProtocolEncountered(txId);
+            commitCoordinator.handleCanCommit(txId, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (leader == null) {
                 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
-                        "Could not canCommit transaction " + canCommit.getTransactionId());
+                        "Could not canCommit transaction " + txId);
             } else {
                 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
                 leader.forward(canCommit, getContext());
@@ -652,7 +745,10 @@ public class Shard extends RaftActor {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
+    @Deprecated(since = "9.0.0", forRemoval = true)
+    private void handleBatchedModificationsLocal(final BatchedModifications batched, final ActorRef sender) {
+        askProtocolEncountered(batched.getTransactionId());
+
         try {
             commitCoordinator.handleBatchedModifications(batched, sender, this);
         } catch (Exception e) {
@@ -662,6 +758,7 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleBatchedModifications(final BatchedModifications batched) {
         // This message is sent to prepare the modifications transaction directly on the Shard as an
         // optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
@@ -679,7 +776,7 @@ public class Shard extends RaftActor {
         if (isLeader() && isLeaderActive) {
             handleBatchedModificationsLocal(batched, getSender());
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(batched, getSender(),
                         "Could not process BatchedModifications " + batched.getTransactionId());
@@ -688,9 +785,8 @@ public class Shard extends RaftActor {
                 // we need to reconstruct previous BatchedModifications from the transaction
                 // DataTreeModification, honoring the max batched modification count, and forward all the
                 // previous BatchedModifications to the new leader.
-                Collection<BatchedModifications> newModifications = commitCoordinator
-                        .createForwardedBatchedModifications(batched,
-                                datastoreContext.getShardBatchedModificationCount());
+                final var newModifications = commitCoordinator.createForwardedBatchedModifications(batched,
+                    datastoreContext.getShardBatchedModificationCount());
 
                 LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(),
                         newModifications.size(), leader);
@@ -719,23 +815,25 @@ public class Shard extends RaftActor {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
-        LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
+    @Deprecated(since = "9.0.0", forRemoval = true)
+   private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
+        final var txId = message.getTransactionId();
+        LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), txId);
 
-        boolean isLeaderActive = isLeaderActive();
+        final var isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
+            askProtocolEncountered(txId);
             try {
                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
             } catch (Exception e) {
-                LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
-                        message.getTransactionId(), e);
+                LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(), txId, e);
                 getSender().tell(new Failure(e), getSelf());
             }
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(message, getSender(),
-                        "Could not process ready local transaction " + message.getTransactionId());
+                        "Could not process ready local transaction " + txId);
             } else {
                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
@@ -744,21 +842,23 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
         LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
 
-        boolean isLeaderActive = isLeaderActive();
+        final var isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
+            askProtocolEncountered(forwardedReady.getTransactionId());
             commitCoordinator.handleForwardedReadyTransaction(forwardedReady, getSender(), this);
         } else {
-            ActorSelection leader = getLeader();
+            final var leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
                         "Could not process forwarded ready transaction " + forwardedReady.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
 
-                ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
+                final var readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit(),
                         forwardedReady.getParticipatingShardNames());
                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
@@ -767,14 +867,18 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleAbortTransaction(final AbortTransaction abort) {
-        doAbortTransaction(abort.getTransactionId(), getSender());
+        final var transactionId = abort.getTransactionId();
+        askProtocolEncountered(transactionId);
+        doAbortTransaction(transactionId, getSender());
     }
 
-    void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
+    final void doAbortTransaction(final Identifier transactionID, final ActorRef sender) {
         commitCoordinator.handleAbort(transactionID, sender, this);
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void handleCreateTransaction(final Object message) {
         if (isLeader()) {
             createTransaction(CreateTransaction.fromSerializable(message));
@@ -786,12 +890,12 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private void closeTransactionChain(final CloseTransactionChain closeTransactionChain) {
         if (isLeader()) {
-            final LocalHistoryIdentifier id = closeTransactionChain.getIdentifier();
-            // FIXME: CONTROLLER-1628: stage purge once no transactions are present
-            store.closeTransactionChain(id, null);
-            store.purgeTransactionChain(id, null);
+            final var id = closeTransactionChain.getIdentifier();
+            askProtocolEncountered(id.getClientId());
+            store.closeTransactionChain(id);
         } else if (getLeader() != null) {
             getLeader().forward(closeTransactionChain, getContext());
         } else {
@@ -799,15 +903,18 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void createTransaction(final CreateTransaction createTransaction) {
+        askProtocolEncountered(createTransaction.getTransactionId());
+
         try {
             if (TransactionType.fromInt(createTransaction.getTransactionType()) != TransactionType.READ_ONLY
                     && failIfIsolatedLeader(getSender())) {
                 return;
             }
 
-            ActorRef transactionActor = createTransaction(createTransaction.getTransactionType(),
+            final var transactionActor = createTransaction(createTransaction.getTransactionType(),
                 createTransaction.getTransactionId());
 
             getSender().tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor),
@@ -817,18 +924,42 @@ public class Shard extends RaftActor {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     private ActorRef createTransaction(final int transactionType, final TransactionIdentifier transactionId) {
         LOG.debug("{}: Creating transaction : {} ", persistenceId(), transactionId);
         return transactionActorFactory.newShardTransaction(TransactionType.fromInt(transactionType),
             transactionId);
     }
 
+    // Called on leader only
+    @Deprecated(since = "9.0.0", forRemoval = true)
+    private void askProtocolEncountered(final TransactionIdentifier transactionId) {
+        askProtocolEncountered(transactionId.getHistoryId().getClientId());
+    }
+
+    // Called on leader only
+    @Deprecated(since = "9.0.0", forRemoval = true)
+    private void askProtocolEncountered(final ClientIdentifier clientId) {
+        final var frontend = clientId.getFrontendId();
+        final var state = knownFrontends.get(frontend);
+        if (!(state instanceof LeaderFrontendState.Disabled)) {
+            LOG.debug("{}: encountered ask-based client {}, disabling transaction tracking", persistenceId(), clientId);
+            if (knownFrontends.isEmpty()) {
+                knownFrontends = new HashMap<>();
+            }
+            knownFrontends.put(frontend, new LeaderFrontendState.Disabled(persistenceId(), clientId, getDataStore()));
+
+            persistPayload(clientId, DisableTrackingPayload.create(clientId,
+                datastoreContext.getInitialPayloadSerializedBufferCapacity()), false);
+        }
+    }
+
     private void updateSchemaContext(final UpdateSchemaContext message) {
-        updateSchemaContext(message.getSchemaContext());
+        updateSchemaContext(message.modelContext());
     }
 
     @VisibleForTesting
-    void updateSchemaContext(final SchemaContext schemaContext) {
+    void updateSchemaContext(final @NonNull EffectiveModelContext schemaContext) {
         store.updateSchemaContext(schemaContext);
     }
 
@@ -838,14 +969,12 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    @VisibleForTesting
-    public RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
+    protected final RaftActorSnapshotCohort getRaftActorSnapshotCohort() {
         return snapshotCohort;
     }
 
     @Override
-    @Nonnull
-    protected RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
+    protected final RaftActorRecoveryCohort getRaftActorRecoveryCohort() {
         if (restoreFromSnapshot == null) {
             return ShardRecoveryCoordinator.create(store, persistenceId(), LOG);
         }
@@ -854,17 +983,18 @@ public class Shard extends RaftActor {
     }
 
     @Override
+    // non-final for testing
     protected void onRecoveryComplete() {
         restoreFromSnapshot = null;
 
         //notify shard manager
-        getContext().parent().tell(new ActorInitialized(), getSelf());
+        getContext().parent().tell(new ActorInitialized(getSelf()), ActorRef.noSender());
 
         // Being paranoid here - this method should only be called once but just in case...
         if (txCommitTimeoutCheckSchedule == null) {
             // Schedule a message to be periodically sent to check if the current in-progress
             // transaction should be expired and aborted.
-            FiniteDuration period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
+            final var period = FiniteDuration.create(transactionCommitTimeout / 3, TimeUnit.MILLISECONDS);
             txCommitTimeoutCheckSchedule = getContext().system().scheduler().schedule(
                     period, period, getSelf(),
                     TX_COMMIT_TIMEOUT_CHECK_MESSAGE, getContext().dispatcher(), ActorRef.noSender());
@@ -872,10 +1002,15 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
-        if (data instanceof Payload) {
+    protected final void applyState(final ActorRef clientActor, final Identifier identifier, final Object data) {
+        if (data instanceof Payload payload) {
+            if (payload instanceof DisableTrackingPayload disableTracking) {
+                disableTracking(disableTracking);
+                return;
+            }
+
             try {
-                store.applyReplicatedPayload(identifier, (Payload)data);
+                store.applyReplicatedPayload(identifier, payload);
             } catch (DataValidationFailedException | IOException e) {
                 LOG.error("{}: Error applying replica {}", persistenceId(), identifier, e);
             }
@@ -885,7 +1020,7 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected void onStateChanged() {
+    protected final void onStateChanged() {
         boolean isLeader = isLeader();
         boolean hasLeader = hasLeader();
         treeChangeSupport.onLeadershipChange(isLeader, hasLeader);
@@ -908,7 +1043,7 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected void onLeaderChanged(final String oldLeader, final String newLeader) {
+    protected final void onLeaderChanged(final String oldLeader, final String newLeader) {
         shardMBean.incrementLeadershipChangeCount();
         paused = false;
 
@@ -929,7 +1064,9 @@ public class Shard extends RaftActor {
             // them to transaction messages and send to the new leader.
             ActorSelection leader = getLeader();
             if (leader != null) {
-                Collection<?> messagesToForward = convertPendingTransactionsToMessages();
+                // Clears all pending transactions and converts them to messages to be forwarded to a new leader.
+                Collection<?> messagesToForward = commitCoordinator.convertPendingTransactionsToMessages(
+                    datastoreContext.getShardBatchedModificationCount());
 
                 if (!messagesToForward.isEmpty()) {
                     LOG.debug("{}: Forwarding {} pending transaction messages to leader {}", persistenceId(),
@@ -947,7 +1084,7 @@ public class Shard extends RaftActor {
             }
         } else {
             // We have become the leader, we need to reconstruct frontend state
-            knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
+            knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
             LOG.debug("{}: became leader with frontend state for {}", persistenceId(), knownFrontends.keySet());
         }
 
@@ -956,18 +1093,8 @@ public class Shard extends RaftActor {
         }
     }
 
-    /**
-     * Clears all pending transactions and converts them to messages to be forwarded to a new leader.
-     *
-     * @return the converted messages
-     */
-    public Collection<?> convertPendingTransactionsToMessages() {
-        return commitCoordinator.convertPendingTransactionsToMessages(
-                datastoreContext.getShardBatchedModificationCount());
-    }
-
     @Override
-    protected void pauseLeader(final Runnable operation) {
+    protected final void pauseLeader(final Runnable operation) {
         LOG.debug("{}: In pauseLeader, operation: {}", persistenceId(), operation);
         paused = true;
 
@@ -979,42 +1106,55 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected void unpauseLeader() {
+    protected final void unpauseLeader() {
         LOG.debug("{}: In unpauseLeader", persistenceId());
         paused = false;
 
         store.setRunOnPendingTransactionsComplete(null);
 
         // Restore tell-based protocol state as if we were becoming the leader
-        knownFrontends = Verify.verifyNotNull(frontendMetadata.toLeaderState(this));
+        knownFrontends = verifyNotNull(frontendMetadata.toLeaderState(this));
+    }
+
+    @Override
+    protected final OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
+        return OnDemandShardState.newBuilder()
+            .treeChangeListenerActors(treeChangeSupport.getListenerActors())
+            .commitCohortActors(store.getCohortActors());
     }
 
     @Override
-    protected OnDemandRaftState.AbstractBuilder<?, ?> newOnDemandRaftStateBuilder() {
-        return OnDemandShardState.newBuilder().treeChangeListenerActors(treeChangeSupport.getListenerActors())
-                .commitCohortActors(store.getCohortActors());
+    public final String persistenceId() {
+        return name;
     }
 
     @Override
-    public String persistenceId() {
-        return this.name;
+    public final String journalPluginId() {
+        // This method may be invoked from super constructor (wonderful), hence we also need to handle the case of
+        // the field being uninitialized because our constructor is not finished.
+        if (datastoreContext != null && !datastoreContext.isPersistent()) {
+            return NON_PERSISTENT_JOURNAL_ID;
+        }
+        return super.journalPluginId();
     }
 
     @VisibleForTesting
-    ShardCommitCoordinator getCommitCoordinator() {
+    final ShardCommitCoordinator getCommitCoordinator() {
         return commitCoordinator;
     }
 
-    public DatastoreContext getDatastoreContext() {
+    // non-final for mocking
+    DatastoreContext getDatastoreContext() {
         return datastoreContext;
     }
 
     @VisibleForTesting
-    public ShardDataTree getDataStore() {
+    final ShardDataTree getDataStore() {
         return store;
     }
 
     @VisibleForTesting
+    // non-final for mocking
     ShardStats getShardMBean() {
         return shardMBean;
     }
@@ -1024,21 +1164,22 @@ public class Shard extends RaftActor {
     }
 
     public abstract static class AbstractBuilder<T extends AbstractBuilder<T, S>, S extends Shard> {
-        private final Class<S> shardClass;
+        private final Class<? extends S> shardClass;
         private ShardIdentifier id;
         private Map<String, String> peerAddresses = Collections.emptyMap();
         private DatastoreContext datastoreContext;
-        private SchemaContextProvider schemaContextProvider;
+        private Supplier<@NonNull EffectiveModelContext> schemaContextProvider;
         private DatastoreSnapshot.ShardSnapshot restoreFromSnapshot;
         private DataTree dataTree;
+
         private volatile boolean sealed;
 
-        protected AbstractBuilder(final Class<S> shardClass) {
+        AbstractBuilder(final Class<? extends S> shardClass) {
             this.shardClass = shardClass;
         }
 
-        protected void checkSealed() {
-            Preconditions.checkState(!sealed, "Builder isalready sealed - further modifications are not allowed");
+        final void checkSealed() {
+            checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
         }
 
         @SuppressWarnings("unchecked")
@@ -1048,37 +1189,37 @@ public class Shard extends RaftActor {
 
         public T id(final ShardIdentifier newId) {
             checkSealed();
-            this.id = newId;
+            id = newId;
             return self();
         }
 
         public T peerAddresses(final Map<String, String> newPeerAddresses) {
             checkSealed();
-            this.peerAddresses = newPeerAddresses;
+            peerAddresses = newPeerAddresses;
             return self();
         }
 
         public T datastoreContext(final DatastoreContext newDatastoreContext) {
             checkSealed();
-            this.datastoreContext = newDatastoreContext;
+            datastoreContext = newDatastoreContext;
             return self();
         }
 
-        public T schemaContextProvider(final SchemaContextProvider newSchemaContextProvider) {
+        public T schemaContextProvider(final Supplier<@NonNull EffectiveModelContext> newSchemaContextProvider) {
             checkSealed();
-            this.schemaContextProvider = Preconditions.checkNotNull(newSchemaContextProvider);
+            schemaContextProvider = requireNonNull(newSchemaContextProvider);
             return self();
         }
 
         public T restoreFromSnapshot(final DatastoreSnapshot.ShardSnapshot newRestoreFromSnapshot) {
             checkSealed();
-            this.restoreFromSnapshot = newRestoreFromSnapshot;
+            restoreFromSnapshot = newRestoreFromSnapshot;
             return self();
         }
 
         public T dataTree(final DataTree newDataTree) {
             checkSealed();
-            this.dataTree = newDataTree;
+            dataTree = newDataTree;
             return self();
         }
 
@@ -1094,8 +1235,8 @@ public class Shard extends RaftActor {
             return datastoreContext;
         }
 
-        public SchemaContext getSchemaContext() {
-            return Verify.verifyNotNull(schemaContextProvider.getSchemaContext());
+        public EffectiveModelContext getSchemaContext() {
+            return verifyNotNull(schemaContextProvider.get());
         }
 
         public DatastoreSnapshot.ShardSnapshot getRestoreFromSnapshot() {
@@ -1107,22 +1248,17 @@ public class Shard extends RaftActor {
         }
 
         public TreeType getTreeType() {
-            switch (datastoreContext.getLogicalStoreType()) {
-                case CONFIGURATION:
-                    return TreeType.CONFIGURATION;
-                case OPERATIONAL:
-                    return TreeType.OPERATIONAL;
-                default:
-                    throw new IllegalStateException("Unhandled logical store type "
-                            + datastoreContext.getLogicalStoreType());
-            }
+            return switch (datastoreContext.getLogicalStoreType()) {
+                case CONFIGURATION -> TreeType.CONFIGURATION;
+                case OPERATIONAL -> TreeType.OPERATIONAL;
+            };
         }
 
         protected void verify() {
-            Preconditions.checkNotNull(id, "id should not be null");
-            Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
-            Preconditions.checkNotNull(datastoreContext, "dataStoreContext should not be null");
-            Preconditions.checkNotNull(schemaContextProvider, "schemaContextProvider should not be null");
+            requireNonNull(id, "id should not be null");
+            requireNonNull(peerAddresses, "peerAddresses should not be null");
+            requireNonNull(datastoreContext, "dataStoreContext should not be null");
+            requireNonNull(schemaContextProvider, "schemaContextProvider should not be null");
         }
 
         public Props props() {
@@ -1134,7 +1270,11 @@ public class Shard extends RaftActor {
 
     public static class Builder extends AbstractBuilder<Builder, Shard> {
         Builder() {
-            super(Shard.class);
+            this(Shard.class);
+        }
+
+        Builder(final Class<? extends Shard> shardClass) {
+            super(shardClass);
         }
     }
 
index 403a96819f392726d7b24ef715827e1187f29841..946203b6b76aa5e2c4b4f94a849a9430f2d3fa06 100644 (file)
@@ -7,22 +7,23 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Status.Failure;
 import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.Deque;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.Map;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
@@ -37,7 +38,8 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionRe
 import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
 import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModificationsCursor;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 
 /**
@@ -45,6 +47,7 @@ import org.slf4j.Logger;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 final class ShardCommitCoordinator {
 
     // Interface hook for unit tests to replace or decorate the ShardDataTreeCohorts.
@@ -70,7 +73,7 @@ final class ShardCommitCoordinator {
     ShardCommitCoordinator(final ShardDataTree dataTree, final Logger log, final String name) {
         this.log = log;
         this.name = name;
-        this.dataTree = Preconditions.checkNotNull(dataTree);
+        this.dataTree = requireNonNull(dataTree);
     }
 
     int getCohortCacheSize() {
@@ -127,9 +130,10 @@ final class ShardCommitCoordinator {
      * @param batched the BatchedModifications message to process
      * @param sender the sender of the message
      */
+    @SuppressFBWarnings(value = "THROWS_METHOD_THROWS_RUNTIMEEXCEPTION", justification = "Replay of captured failure")
     void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) {
         CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId());
-        if (cohortEntry == null) {
+        if (cohortEntry == null || cohortEntry.isSealed()) {
             cohortEntry = CohortEntry.createOpen(dataTree.newReadWriteTransaction(batched.getTransactionId()),
                 batched.getVersion());
             cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
@@ -156,8 +160,8 @@ final class ShardCommitCoordinator {
             }
 
             if (log.isDebugEnabled()) {
-                log.debug("{}: Readying Tx {}, client version {}", name,
-                        batched.getTransactionId(), batched.getVersion());
+                log.debug("{}: Readying Tx {} of {} operations, client version {}", name,
+                        batched.getTransactionId(), cohortEntry.getTotalOperationsProcessed(), batched.getVersion());
             }
 
             cohortEntry.setDoImmediateCommit(batched.isDoCommitOnReady());
@@ -203,6 +207,7 @@ final class ShardCommitCoordinator {
         }
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
             final int maxModificationsPerBatch) {
         CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId());
@@ -235,9 +240,9 @@ final class ShardCommitCoordinator {
     }
 
     private void handleCanCommit(final CohortEntry cohortEntry) {
-        cohortEntry.canCommit(new FutureCallback<Void>() {
+        cohortEntry.canCommit(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final Empty result) {
                 log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId());
 
                 if (cohortEntry.isDoImmediateCommit()) {
@@ -311,7 +316,7 @@ final class ShardCommitCoordinator {
         });
     }
 
-    void finishCommit(@Nonnull final ActorRef sender, @Nonnull final CohortEntry cohortEntry) {
+    void finishCommit(final @NonNull ActorRef sender, final @NonNull CohortEntry cohortEntry) {
         log.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionId());
 
         cohortEntry.commit(new FutureCallback<UnsignedLong>() {
@@ -320,7 +325,6 @@ final class ShardCommitCoordinator {
                 final TransactionIdentifier txId = cohortEntry.getTransactionId();
                 log.debug("{}: Transaction {} committed as {}, sending response to {}", persistenceId(), txId, result,
                     sender);
-                cohortEntry.getShard().getDataStore().purgeTransaction(txId, null);
 
                 cohortCache.remove(cohortEntry.getTransactionId());
                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
@@ -331,7 +335,6 @@ final class ShardCommitCoordinator {
             public void onFailure(final Throwable failure) {
                 final TransactionIdentifier txId = cohortEntry.getTransactionId();
                 log.error("{}, An exception occurred while committing transaction {}", persistenceId(), txId, failure);
-                cohortEntry.getShard().getDataStore().purgeTransaction(txId, null);
 
                 cohortCache.remove(cohortEntry.getTransactionId());
                 sender.tell(new Failure(failure), cohortEntry.getShard().self());
@@ -372,11 +375,9 @@ final class ShardCommitCoordinator {
         log.debug("{}: Aborting transaction {}", name, transactionID);
 
         final ActorRef self = shard.getSelf();
-        cohortEntry.abort(new FutureCallback<Void>() {
+        cohortEntry.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
-                shard.getDataStore().purgeTransaction(cohortEntry.getTransactionId(), null);
-
+            public void onSuccess(final Empty result) {
                 if (sender != null) {
                     sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
                 }
@@ -385,7 +386,6 @@ final class ShardCommitCoordinator {
             @Override
             public void onFailure(final Throwable failure) {
                 log.error("{}: An exception happened during abort", name, failure);
-                shard.getDataStore().purgeTransaction(cohortEntry.getTransactionId(), null);
 
                 if (sender != null) {
                     sender.tell(new Failure(failure), self);
@@ -401,19 +401,18 @@ final class ShardCommitCoordinator {
     }
 
     void abortPendingTransactions(final String reason, final Shard shard) {
-        final Failure failure = new Failure(new RuntimeException(reason));
-        Collection<ShardDataTreeCohort> pending = dataTree.getAndClearPendingTransactions();
+        final var failure = new Failure(new RuntimeException(reason));
+        final var pending = dataTree.getAndClearPendingTransactions();
 
         log.debug("{}: Aborting {} pending queued transactions", name, pending.size());
 
-        for (ShardDataTreeCohort cohort : pending) {
-            CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
-            if (cohortEntry == null) {
-                continue;
-            }
-
-            if (cohortEntry.getReplySender() != null) {
-                cohortEntry.getReplySender().tell(failure, shard.self());
+        for (var cohort : pending) {
+            final var cohortEntry = cohortCache.remove(cohort.transactionId());
+            if (cohortEntry != null) {
+                final var replySender = cohortEntry.getReplySender();
+                if (replySender != null) {
+                    replySender.tell(failure, shard.self());
+                }
             }
         }
 
@@ -421,32 +420,31 @@ final class ShardCommitCoordinator {
     }
 
     Collection<?> convertPendingTransactionsToMessages(final int maxModificationsPerBatch) {
-        final Collection<VersionedExternalizableMessage> messages = new ArrayList<>();
-        for (ShardDataTreeCohort cohort : dataTree.getAndClearPendingTransactions()) {
-            CohortEntry cohortEntry = cohortCache.remove(cohort.getIdentifier());
+        final var messages = new ArrayList<VersionedExternalizableMessage>();
+        for (var cohort : dataTree.getAndClearPendingTransactions()) {
+            final var cohortEntry = cohortCache.remove(cohort.transactionId());
             if (cohortEntry == null) {
                 continue;
             }
 
-            final Deque<BatchedModifications> newMessages = new ArrayDeque<>();
+            final var newMessages = new ArrayDeque<BatchedModifications>();
             cohortEntry.getDataTreeModification().applyToCursor(new AbstractBatchedModificationsCursor() {
                 @Override
                 protected BatchedModifications getModifications() {
-                    final BatchedModifications lastBatch = newMessages.peekLast();
-
+                    final var lastBatch = newMessages.peekLast();
                     if (lastBatch != null && lastBatch.getModifications().size() >= maxModificationsPerBatch) {
                         return lastBatch;
                     }
 
                     // Allocate a new message
-                    final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(),
+                    final var ret = new BatchedModifications(cohortEntry.getTransactionId(),
                         cohortEntry.getClientVersion());
                     newMessages.add(ret);
                     return ret;
                 }
             });
 
-            final BatchedModifications last = newMessages.peekLast();
+            final var last = newMessages.peekLast();
             if (last != null) {
                 final boolean immediate = cohortEntry.isDoImmediateCommit();
                 last.setDoCommitOnReady(immediate);
index 8b826015c4eaf7df45572dd747b5da90ec3dce6e..72e7a545a7e36a41ea28f79e2b10e5dbfbb6c8e7 100644 (file)
@@ -7,14 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static akka.actor.ActorRef.noSender;
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+import static java.util.Objects.requireNonNullElse;
+
 import akka.actor.ActorRef;
 import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
-import com.google.common.base.Verify;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
@@ -32,7 +35,8 @@ import java.util.Deque;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Optional;
+import java.util.OptionalLong;
 import java.util.Queue;
 import java.util.SortedSet;
 import java.util.concurrent.TimeUnit;
@@ -40,73 +44,81 @@ import java.util.concurrent.TimeoutException;
 import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.UnaryOperator;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataTreeCohortActorRegistry.CohortRegistryCommand;
 import org.opendaylight.controller.cluster.datastore.ShardDataTreeCohort.State;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
 import org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CloseLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.CreateLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
 import org.opendaylight.controller.cluster.datastore.persisted.PurgeLocalHistoryPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.PurgeTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.datastore.persisted.SkipTransactionsPayload;
 import org.opendaylight.controller.cluster.datastore.utils.DataTreeModificationOutput;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.opendaylight.controller.cluster.datastore.utils.PruningDataTreeModification;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.concepts.Identifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
- * Internal shard state, similar to a DOMStore, but optimized for use in the actor system,
- * e.g. it does not expose public interfaces and assumes it is only ever called from a
- * single thread.
+ * Internal shard state, similar to a DOMStore, but optimized for use in the actor system, e.g. it does not expose
+ * public interfaces and assumes it is only ever called from a single thread.
  *
  * <p>
- * This class is not part of the API contract and is subject to change at any time.
+ * This class is not part of the API contract and is subject to change at any time. It is NOT thread-safe.
  */
-@NotThreadSafe
+@VisibleForTesting
+// non-final for mocking
 public class ShardDataTree extends ShardDataTreeTransactionParent {
     private static final class CommitEntry {
         final SimpleShardDataTreeCohort cohort;
         long lastAccess;
 
         CommitEntry(final SimpleShardDataTreeCohort cohort, final long now) {
-            this.cohort = Preconditions.checkNotNull(cohort);
+            this.cohort = requireNonNull(cohort);
             lastAccess = now;
         }
 
         @Override
         public String toString() {
-            return "CommitEntry [tx=" + cohort.getIdentifier() + ", state=" + cohort.getState() + "]";
+            return "CommitEntry [tx=" + cohort.transactionId() + ", state=" + cohort.getState() + "]";
         }
     }
 
@@ -147,25 +159,26 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      */
     private DataTreeTip tip;
 
-    private SchemaContext schemaContext;
+    private EffectiveModelContext schemaContext;
+    private DataSchemaContextTree dataSchemaContext;
 
     private int currentTransactionBatch;
 
-    ShardDataTree(final Shard shard, final SchemaContext schemaContext, final DataTree dataTree,
+    ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final DataTree dataTree,
             final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
             final String logContext,
             final ShardDataTreeMetadata<?>... metadata) {
-        this.dataTree = Preconditions.checkNotNull(dataTree);
+        this.dataTree = requireNonNull(dataTree);
         updateSchemaContext(schemaContext);
 
-        this.shard = Preconditions.checkNotNull(shard);
-        this.treeChangeListenerPublisher = Preconditions.checkNotNull(treeChangeListenerPublisher);
-        this.logContext = Preconditions.checkNotNull(logContext);
+        this.shard = requireNonNull(shard);
+        this.treeChangeListenerPublisher = requireNonNull(treeChangeListenerPublisher);
+        this.logContext = requireNonNull(logContext);
         this.metadata = ImmutableList.copyOf(metadata);
         tip = dataTree;
     }
 
-    ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType,
+    ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType,
             final YangInstanceIdentifier root,
             final ShardDataTreeChangeListenerPublisher treeChangeListenerPublisher,
             final String logContext,
@@ -183,8 +196,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @VisibleForTesting
-    public ShardDataTree(final Shard shard, final SchemaContext schemaContext, final TreeType treeType) {
-        this(shard, schemaContext, treeType, YangInstanceIdentifier.EMPTY,
+    public ShardDataTree(final Shard shard, final EffectiveModelContext schemaContext, final TreeType treeType) {
+        this(shard, schemaContext, treeType, YangInstanceIdentifier.of(),
                 new DefaultShardDataTreeChangeListenerPublisher(""), "");
     }
 
@@ -196,20 +209,22 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return shard.ticker().read();
     }
 
-    public DataTree getDataTree() {
+    final DataTree getDataTree() {
         return dataTree;
     }
 
-    SchemaContext getSchemaContext() {
+    @VisibleForTesting
+    final EffectiveModelContext getSchemaContext() {
         return schemaContext;
     }
 
-    void updateSchemaContext(final SchemaContext newSchemaContext) {
-        dataTree.setSchemaContext(newSchemaContext);
-        this.schemaContext = Preconditions.checkNotNull(newSchemaContext);
+    final void updateSchemaContext(final @NonNull EffectiveModelContext newSchemaContext) {
+        dataTree.setEffectiveModelContext(newSchemaContext);
+        schemaContext = newSchemaContext;
+        dataSchemaContext = DataSchemaContextTree.from(newSchemaContext);
     }
 
-    void resetTransactionBatch() {
+    final void resetTransactionBatch() {
         currentTransactionBatch = 0;
     }
 
@@ -218,8 +233,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      *
      * @return A state snapshot
      */
-    @Nonnull ShardDataTreeSnapshot takeStateSnapshot() {
-        final NormalizedNode<?, ?> rootNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY).get();
+    @NonNull ShardDataTreeSnapshot takeStateSnapshot() {
+        final NormalizedNode rootNode = takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
         final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metaBuilder =
                 ImmutableMap.builder();
 
@@ -237,7 +252,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return !pendingTransactions.isEmpty() || !pendingCommits.isEmpty() || !pendingFinishCommits.isEmpty();
     }
 
-    private void applySnapshot(@Nonnull final ShardDataTreeSnapshot snapshot,
+    private void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot,
             final UnaryOperator<DataTreeModification> wrapper) throws DataValidationFailedException {
         final Stopwatch elapsed = Stopwatch.createStarted();
 
@@ -246,14 +261,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
 
         final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> snapshotMeta;
-        if (snapshot instanceof MetadataShardDataTreeSnapshot) {
-            snapshotMeta = ((MetadataShardDataTreeSnapshot) snapshot).getMetadata();
+        if (snapshot instanceof MetadataShardDataTreeSnapshot metaSnapshot) {
+            snapshotMeta = metaSnapshot.getMetadata();
         } else {
             snapshotMeta = ImmutableMap.of();
         }
 
-        for (ShardDataTreeMetadata<?> m : metadata) {
-            final ShardDataTreeSnapshotMetadata<?> s = snapshotMeta.get(m.getSupportedType());
+        for (var m : metadata) {
+            final var s = snapshotMeta.get(m.getSupportedType());
             if (s != null) {
                 m.applySnapshot(s);
             } else {
@@ -261,18 +276,18 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             }
         }
 
-        final DataTreeModification mod = wrapper.apply(dataTree.takeSnapshot().newModification());
+        final DataTreeModification unwrapped = newModification();
+        final DataTreeModification mod = wrapper.apply(unwrapped);
         // delete everything first
-        mod.delete(YangInstanceIdentifier.EMPTY);
+        mod.delete(YangInstanceIdentifier.of());
 
-        final java.util.Optional<NormalizedNode<?, ?>> maybeNode = snapshot.getRootNode();
-        if (maybeNode.isPresent()) {
+        snapshot.getRootNode().ifPresent(rootNode -> {
             // Add everything from the remote node back
-            mod.write(YangInstanceIdentifier.EMPTY, maybeNode.get());
-        }
+            mod.write(YangInstanceIdentifier.of(), rootNode);
+        });
+
         mod.ready();
 
-        final DataTreeModification unwrapped = unwrap(mod);
         dataTree.validate(unwrapped);
         DataTreeCandidateTip candidate = dataTree.prepare(unwrapped);
         dataTree.commit(candidate);
@@ -288,21 +303,11 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param snapshot Snapshot that needs to be applied
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applySnapshot(@Nonnull final ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
+    final void applySnapshot(final @NonNull ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
+        // TODO: we should be taking ShardSnapshotState here and performing forward-compatibility translation
         applySnapshot(snapshot, UnaryOperator.identity());
     }
 
-    private PruningDataTreeModification wrapWithPruning(final DataTreeModification delegate) {
-        return new PruningDataTreeModification(delegate, dataTree, schemaContext);
-    }
-
-    private static DataTreeModification unwrap(final DataTreeModification modification) {
-        if (modification instanceof PruningDataTreeModification) {
-            return ((PruningDataTreeModification)modification).delegate();
-        }
-        return modification;
-    }
-
     /**
      * Apply a snapshot coming from recovery. This method does not assume the SchemaContexts match and performs data
      * pruning in an attempt to adjust the state to our current SchemaContext.
@@ -310,30 +315,53 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param snapshot Snapshot that needs to be applied
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyRecoverySnapshot(@Nonnull final ShardDataTreeSnapshot snapshot) throws DataValidationFailedException {
-        applySnapshot(snapshot, this::wrapWithPruning);
+    final void applyRecoverySnapshot(final @NonNull ShardSnapshotState snapshot) throws DataValidationFailedException {
+        // TODO: we should be able to reuse the pruner, provided we are not reentrant
+        final ReusableNormalizedNodePruner pruner = ReusableNormalizedNodePruner.forDataSchemaContext(
+            dataSchemaContext);
+        if (snapshot.needsMigration()) {
+            final ReusableNormalizedNodePruner uintPruner = pruner.withUintAdaption();
+            applySnapshot(snapshot.getSnapshot(),
+                delegate -> new PruningDataTreeModification.Proactive(delegate, dataTree, uintPruner));
+        } else {
+            applySnapshot(snapshot.getSnapshot(),
+                delegate -> new PruningDataTreeModification.Reactive(delegate, dataTree, pruner));
+        }
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void applyRecoveryCandidate(final DataTreeCandidate candidate) {
-        final PruningDataTreeModification mod = wrapWithPruning(dataTree.takeSnapshot().newModification());
-        DataTreeCandidates.applyToModification(mod, candidate);
-        mod.ready();
-
-        final DataTreeModification unwrapped = mod.delegate();
+    private void applyRecoveryCandidate(final CommitTransactionPayload payload) throws IOException {
+        final var entry = payload.acquireCandidate();
+        final var unwrapped = newModification();
+        final var pruningMod = createPruningModification(unwrapped,
+            NormalizedNodeStreamVersion.MAGNESIUM.compareTo(entry.streamVersion()) > 0);
+
+        DataTreeCandidates.applyToModification(pruningMod, entry.candidate());
+        pruningMod.ready();
         LOG.trace("{}: Applying recovery modification {}", logContext, unwrapped);
 
         try {
             dataTree.validate(unwrapped);
             dataTree.commit(dataTree.prepare(unwrapped));
         } catch (Exception e) {
-            File file = new File(System.getProperty("karaf.data", "."),
+            final var file = new File(System.getProperty("karaf.data", "."),
                     "failed-recovery-payload-" + logContext + ".out");
             DataTreeModificationOutput.toFile(file, unwrapped);
-            throw new IllegalStateException(String.format(
-                    "%s: Failed to apply recovery payload. Modification data was written to file %s",
-                    logContext, file), e);
+            throw new IllegalStateException(
+                "%s: Failed to apply recovery payload. Modification data was written to file %s".formatted(
+                    logContext, file),
+                e);
         }
+
+        allMetadataCommittedTransaction(entry.transactionId());
+    }
+
+    private PruningDataTreeModification createPruningModification(final DataTreeModification unwrapped,
+            final boolean uintAdapting) {
+        // TODO: we should be able to reuse the pruner, provided we are not reentrant
+        final var pruner = ReusableNormalizedNodePruner.forDataSchemaContext(dataSchemaContext);
+        return uintAdapting ? new PruningDataTreeModification.Proactive(unwrapped, dataTree, pruner.withUintAdaption())
+                : new PruningDataTreeModification.Reactive(unwrapped, dataTree, pruner);
     }
 
     /**
@@ -344,41 +372,43 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @throws IOException when the snapshot fails to deserialize
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyRecoveryPayload(@Nonnull final Payload payload) throws IOException {
-        if (payload instanceof CommitTransactionPayload) {
-            final Entry<TransactionIdentifier, DataTreeCandidate> e =
-                    ((CommitTransactionPayload) payload).getCandidate();
-            applyRecoveryCandidate(e.getValue());
-            allMetadataCommittedTransaction(e.getKey());
-        } else if (payload instanceof AbortTransactionPayload) {
-            allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeTransactionPayload) {
-            allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof CreateLocalHistoryPayload) {
-            allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof CloseLocalHistoryPayload) {
-            allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeLocalHistoryPayload) {
-            allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
+    final void applyRecoveryPayload(final @NonNull Payload payload) throws IOException {
+        if (payload instanceof CommitTransactionPayload commit) {
+            applyRecoveryCandidate(commit);
+        } else if (payload instanceof AbortTransactionPayload abort) {
+            allMetadataAbortedTransaction(abort.getIdentifier());
+        } else if (payload instanceof PurgeTransactionPayload purge) {
+            allMetadataPurgedTransaction(purge.getIdentifier());
+        } else if (payload instanceof CreateLocalHistoryPayload create) {
+            allMetadataCreatedLocalHistory(create.getIdentifier());
+        } else if (payload instanceof CloseLocalHistoryPayload close) {
+            allMetadataClosedLocalHistory(close.getIdentifier());
+        } else if (payload instanceof PurgeLocalHistoryPayload purge) {
+            allMetadataPurgedLocalHistory(purge.getIdentifier());
+        } else if (payload instanceof SkipTransactionsPayload skip) {
+            allMetadataSkipTransactions(skip);
         } else {
             LOG.debug("{}: ignoring unhandled payload {}", logContext, payload);
         }
     }
 
-    private void applyReplicatedCandidate(final TransactionIdentifier identifier, final DataTreeCandidate foreign)
-            throws DataValidationFailedException {
-        LOG.debug("{}: Applying foreign transaction {}", logContext, identifier);
+    private void applyReplicatedCandidate(final CommitTransactionPayload payload)
+            throws DataValidationFailedException, IOException {
+        final var payloadCandidate = payload.acquireCandidate();
+        final var transactionId = payloadCandidate.transactionId();
+        LOG.debug("{}: Applying foreign transaction {}", logContext, transactionId);
 
-        final DataTreeModification mod = dataTree.takeSnapshot().newModification();
-        DataTreeCandidates.applyToModification(mod, foreign);
+        final var mod = newModification();
+        // TODO: check version here, which will enable us to perform forward-compatibility transformations
+        DataTreeCandidates.applyToModification(mod, payloadCandidate.candidate());
         mod.ready();
 
         LOG.trace("{}: Applying foreign modification {}", logContext, mod);
         dataTree.validate(mod);
-        final DataTreeCandidate candidate = dataTree.prepare(mod);
+        final var candidate = dataTree.prepare(mod);
         dataTree.commit(candidate);
 
-        allMetadataCommittedTransaction(identifier);
+        allMetadataCommittedTransaction(transactionId);
         notifyListeners(candidate);
     }
 
@@ -391,7 +421,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @throws IOException when the snapshot fails to deserialize
      * @throws DataValidationFailedException when the snapshot fails to apply
      */
-    void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
+    final void applyReplicatedPayload(final Identifier identifier, final Payload payload) throws IOException,
             DataValidationFailedException {
         /*
          * This is a bit more involved than it needs to be due to to the fact we do not want to be touching the payload
@@ -404,46 +434,75 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
          * In any case, we know that this is an entry coming from replication, hence we can be sure we will not observe
          * pre-Boron state -- which limits the number of options here.
          */
-        if (payload instanceof CommitTransactionPayload) {
+        if (payload instanceof CommitTransactionPayload commit) {
             if (identifier == null) {
-                final Entry<TransactionIdentifier, DataTreeCandidate> e =
-                        ((CommitTransactionPayload) payload).getCandidate();
-                applyReplicatedCandidate(e.getKey(), e.getValue());
+                applyReplicatedCandidate(commit);
             } else {
-                Verify.verify(identifier instanceof TransactionIdentifier);
-                payloadReplicationComplete((TransactionIdentifier) identifier);
+                verify(identifier instanceof TransactionIdentifier);
+                // if we did not track this transaction before, it means that it came from another leader and we are in
+                // the process of commiting it while in PreLeader state. That means that it hasnt yet been committed to
+                // the local DataTree and would be lost if it was only applied via payloadReplicationComplete().
+                if (!payloadReplicationComplete((TransactionIdentifier) identifier)) {
+                    applyReplicatedCandidate(commit);
+                }
             }
-        } else if (payload instanceof AbortTransactionPayload) {
+
+            // make sure acquireCandidate() is the last call touching the payload data as we want it to be GC-ed.
+            checkRootOverwrite(commit.acquireCandidate().candidate());
+        } else if (payload instanceof AbortTransactionPayload abort) {
+            if (identifier != null) {
+                payloadReplicationComplete(abort);
+            }
+            allMetadataAbortedTransaction(abort.getIdentifier());
+        } else if (payload instanceof PurgeTransactionPayload purge) {
             if (identifier != null) {
-                payloadReplicationComplete((AbortTransactionPayload) payload);
+                payloadReplicationComplete(purge);
             }
-            allMetadataAbortedTransaction(((AbortTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeTransactionPayload) {
+            allMetadataPurgedTransaction(purge.getIdentifier());
+        } else if (payload instanceof CloseLocalHistoryPayload close) {
             if (identifier != null) {
-                payloadReplicationComplete((PurgeTransactionPayload) payload);
+                payloadReplicationComplete(close);
             }
-            allMetadataPurgedTransaction(((PurgeTransactionPayload) payload).getIdentifier());
-        } else if (payload instanceof CloseLocalHistoryPayload) {
+            allMetadataClosedLocalHistory(close.getIdentifier());
+        } else if (payload instanceof CreateLocalHistoryPayload create) {
             if (identifier != null) {
-                payloadReplicationComplete((CloseLocalHistoryPayload) payload);
+                payloadReplicationComplete(create);
             }
-            allMetadataClosedLocalHistory(((CloseLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof CreateLocalHistoryPayload) {
+            allMetadataCreatedLocalHistory(create.getIdentifier());
+        } else if (payload instanceof PurgeLocalHistoryPayload purge) {
             if (identifier != null) {
-                payloadReplicationComplete((CreateLocalHistoryPayload)payload);
+                payloadReplicationComplete(purge);
             }
-            allMetadataCreatedLocalHistory(((CreateLocalHistoryPayload) payload).getIdentifier());
-        } else if (payload instanceof PurgeLocalHistoryPayload) {
+            allMetadataPurgedLocalHistory(purge.getIdentifier());
+        } else if (payload instanceof SkipTransactionsPayload skip) {
             if (identifier != null) {
-                payloadReplicationComplete((PurgeLocalHistoryPayload)payload);
+                payloadReplicationComplete(skip);
             }
-            allMetadataPurgedLocalHistory(((PurgeLocalHistoryPayload) payload).getIdentifier());
+            allMetadataSkipTransactions(skip);
         } else {
             LOG.warn("{}: ignoring unhandled identifier {} payload {}", logContext, identifier, payload);
         }
     }
 
-    private void replicatePayload(final Identifier id, final Payload payload, @Nullable final Runnable callback) {
+    private void checkRootOverwrite(final DataTreeCandidate candidate) {
+        final DatastoreContext datastoreContext = shard.getDatastoreContext();
+        if (!datastoreContext.isSnapshotOnRootOverwrite()) {
+            return;
+        }
+
+        if (!datastoreContext.isPersistent()) {
+            // FIXME: why don't we want a snapshot in non-persistent state?
+            return;
+        }
+
+        // top level container ie "/"
+        if (candidate.getRootPath().isEmpty() && candidate.getRootNode().modificationType() == ModificationType.WRITE) {
+            LOG.debug("{}: shard root overwritten, enqueuing snapshot", logContext);
+            shard.self().tell(new InitiateCaptureSnapshot(), noSender());
+        }
+    }
+
+    private void replicatePayload(final Identifier id, final Payload payload, final @Nullable Runnable callback) {
         if (callback != null) {
             replicationCallbacks.put(payload, callback);
         }
@@ -460,22 +519,24 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    private void payloadReplicationComplete(final TransactionIdentifier txId) {
-        final CommitEntry current = pendingFinishCommits.peek();
+    private boolean payloadReplicationComplete(final TransactionIdentifier txId) {
+        final var current = pendingFinishCommits.peek();
         if (current == null) {
             LOG.warn("{}: No outstanding transactions, ignoring consensus on transaction {}", logContext, txId);
             allMetadataCommittedTransaction(txId);
-            return;
+            return false;
         }
 
-        if (!current.cohort.getIdentifier().equals(txId)) {
+        final var cohortTxId = current.cohort.transactionId();
+        if (!cohortTxId.equals(txId)) {
             LOG.debug("{}: Head of pendingFinishCommits queue is {}, ignoring consensus on transaction {}", logContext,
-                current.cohort.getIdentifier(), txId);
+                cohortTxId, txId);
             allMetadataCommittedTransaction(txId);
-            return;
+            return false;
         }
 
         finishCommit(current.cohort);
+        return true;
     }
 
     private void allMetadataAbortedTransaction(final TransactionIdentifier txId) {
@@ -514,6 +575,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
+    private void allMetadataSkipTransactions(final SkipTransactionsPayload payload) {
+        final var historyId = payload.getIdentifier();
+        final var txIds = payload.getTransactionIds();
+        for (ShardDataTreeMetadata<?> m : metadata) {
+            m.onTransactionsSkipped(historyId, txIds);
+        }
+    }
+
     /**
      * Create a transaction chain for specified history. Unlike {@link #ensureTransactionChain(LocalHistoryIdentifier)},
      * this method is used for re-establishing state when we are taking over
@@ -522,17 +591,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param closed True if the chain should be created in closed state (i.e. pending purge)
      * @return Transaction chain handle
      */
-    ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
+    final ShardDataTreeTransactionChain recreateTransactionChain(final LocalHistoryIdentifier historyId,
             final boolean closed) {
         final ShardDataTreeTransactionChain ret = new ShardDataTreeTransactionChain(historyId, this);
         final ShardDataTreeTransactionChain existing = transactionChains.putIfAbsent(historyId, ret);
-        Preconditions.checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId,
-                existing);
+        checkState(existing == null, "Attempted to recreate chain %s, but %s already exists", historyId, existing);
         return ret;
     }
 
-    ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
-            @Nullable final Runnable callback) {
+    final ShardDataTreeTransactionChain ensureTransactionChain(final LocalHistoryIdentifier historyId,
+            final @Nullable Runnable callback) {
         ShardDataTreeTransactionChain chain = transactionChains.get(historyId);
         if (chain == null) {
             chain = new ShardDataTreeTransactionChain(historyId, this);
@@ -546,25 +614,33 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return chain;
     }
 
-    ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
-        if (txId.getHistoryId().getHistoryId() == 0) {
-            return new ReadOnlyShardDataTreeTransaction(this, txId, dataTree.takeSnapshot());
-        }
+    final @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+        shard.getShardMBean().incrementReadOnlyTransactionCount();
 
-        return ensureTransactionChain(txId.getHistoryId(), null).newReadOnlyTransaction(txId);
+        final var historyId = txId.getHistoryId();
+        return historyId.getHistoryId() == 0 ? newStandaloneReadOnlyTransaction(txId)
+            : ensureTransactionChain(historyId, null).newReadOnlyTransaction(txId);
     }
 
-    ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
-        if (txId.getHistoryId().getHistoryId() == 0) {
-            return new ReadWriteShardDataTreeTransaction(ShardDataTree.this, txId, dataTree.takeSnapshot()
-                    .newModification());
-        }
+    final @NonNull ReadOnlyShardDataTreeTransaction newStandaloneReadOnlyTransaction(final TransactionIdentifier txId) {
+        return new ReadOnlyShardDataTreeTransaction(this, txId, takeSnapshot());
+    }
 
-        return ensureTransactionChain(txId.getHistoryId(), null).newReadWriteTransaction(txId);
+    final @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+        shard.getShardMBean().incrementReadWriteTransactionCount();
+
+        final var historyId = txId.getHistoryId();
+        return historyId.getHistoryId() == 0 ? newStandaloneReadWriteTransaction(txId)
+            : ensureTransactionChain(historyId, null).newReadWriteTransaction(txId);
+    }
+
+    final @NonNull ReadWriteShardDataTreeTransaction newStandaloneReadWriteTransaction(
+            final TransactionIdentifier txId) {
+        return new ReadWriteShardDataTreeTransaction(this, txId, newModification());
     }
 
     @VisibleForTesting
-    public void notifyListeners(final DataTreeCandidate candidate) {
+    final void notifyListeners(final DataTreeCandidate candidate) {
         treeChangeListenerPublisher.publishChanges(candidate);
     }
 
@@ -572,7 +648,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * Immediately purge all state relevant to leader. This includes all transaction chains and any scheduled
      * replication callbacks.
      */
-    void purgeLeaderState() {
+    final void purgeLeaderState() {
         for (ShardDataTreeTransactionChain chain : transactionChains.values()) {
             chain.close();
         }
@@ -587,19 +663,34 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param id History identifier
      * @param callback Callback to invoke upon completion, may be null
      */
-    void closeTransactionChain(final LocalHistoryIdentifier id, @Nullable final Runnable callback) {
+    final void closeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
+        if (commonCloseTransactionChain(id, callback)) {
+            replicatePayload(id, CloseLocalHistoryPayload.create(id,
+                shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+        }
+    }
+
+    /**
+     * Close a single transaction chain which is received through ask-based protocol. It does not keep a commit record.
+     *
+     * @param id History identifier
+     */
+    final void closeTransactionChain(final LocalHistoryIdentifier id) {
+        commonCloseTransactionChain(id, null);
+    }
+
+    private boolean commonCloseTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
         final ShardDataTreeTransactionChain chain = transactionChains.get(id);
         if (chain == null) {
             LOG.debug("{}: Closing non-existent transaction chain {}", logContext, id);
             if (callback != null) {
                 callback.run();
             }
-            return;
+            return false;
         }
 
         chain.close();
-        replicatePayload(id, CloseLocalHistoryPayload.create(
-                id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+        return true;
     }
 
     /**
@@ -608,7 +699,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
      * @param id History identifier
      * @param callback Callback to invoke upon completion, may be null
      */
-    void purgeTransactionChain(final LocalHistoryIdentifier id, @Nullable final Runnable callback) {
+    final void purgeTransactionChain(final LocalHistoryIdentifier id, final @Nullable Runnable callback) {
         final ShardDataTreeTransactionChain chain = transactionChains.remove(id);
         if (chain == null) {
             LOG.debug("{}: Purging non-existent transaction chain {}", logContext, id);
@@ -622,25 +713,37 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
     }
 
-    Optional<DataTreeCandidate> readCurrentData() {
-        final java.util.Optional<NormalizedNode<?, ?>> currentState =
-                dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
-        return currentState.isPresent() ? Optional.of(DataTreeCandidates.fromNormalizedNode(
-            YangInstanceIdentifier.EMPTY, currentState.get())) : Optional.<DataTreeCandidate>absent();
+    final void skipTransactions(final LocalHistoryIdentifier id, final ImmutableUnsignedLongSet transactionIds,
+            final Runnable callback) {
+        final ShardDataTreeTransactionChain chain = transactionChains.get(id);
+        if (chain == null) {
+            LOG.debug("{}: Skipping on non-existent transaction chain {}", logContext, id);
+            if (callback != null) {
+                callback.run();
+            }
+            return;
+        }
+
+        replicatePayload(id, SkipTransactionsPayload.create(id, transactionIds,
+            shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
+    }
+
+    final Optional<DataTreeCandidate> readCurrentData() {
+        return readNode(YangInstanceIdentifier.of())
+            .map(state -> DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), state));
     }
 
-    public void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
-            final Optional<DataTreeCandidate> initialState,
-            final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+    final void registerTreeChangeListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
+            final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
         treeChangeListenerPublisher.registerTreeChangeListener(path, listener, initialState, onRegistration);
     }
 
-    int getQueueSize() {
+    final int getQueueSize() {
         return pendingTransactions.size() + pendingCommits.size() + pendingFinishCommits.size();
     }
 
     @Override
-    void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
+    final void abortTransaction(final AbstractShardDataTreeTransaction<?> transaction, final Runnable callback) {
         final TransactionIdentifier id = transaction.getIdentifier();
         LOG.debug("{}: aborting transaction {}", logContext, id);
         replicatePayload(id, AbortTransactionPayload.create(
@@ -648,40 +751,43 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @Override
-    void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
+    final void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
         // No-op for free-standing transactions
-
     }
 
     @Override
-    ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    final ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
+            final Optional<SortedSet<String>> participatingShardNames) {
         final DataTreeModification snapshot = transaction.getSnapshot();
+        final TransactionIdentifier id = transaction.getIdentifier();
+        LOG.debug("{}: readying transaction {}", logContext, id);
         snapshot.ready();
+        LOG.debug("{}: transaction {} ready", logContext, id);
 
         return createReadyCohort(transaction.getIdentifier(), snapshot, participatingShardNames);
     }
 
-    void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
+    final void purgeTransaction(final TransactionIdentifier id, final Runnable callback) {
         LOG.debug("{}: purging transaction {}", logContext, id);
         replicatePayload(id, PurgeTransactionPayload.create(
                 id, shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity()), callback);
     }
 
-    public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier path) {
-        return Optional.fromJavaUtil(dataTree.takeSnapshot().readNode(path));
+    @VisibleForTesting
+    public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier path) {
+        return takeSnapshot().readNode(path);
     }
 
-    DataTreeSnapshot takeSnapshot() {
+    final DataTreeSnapshot takeSnapshot() {
         return dataTree.takeSnapshot();
     }
 
     @VisibleForTesting
-    public DataTreeModification newModification() {
-        return dataTree.takeSnapshot().newModification();
+    final DataTreeModification newModification() {
+        return takeSnapshot().newModification();
     }
 
-    public Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
+    final Collection<ShardDataTreeCohort> getAndClearPendingTransactions() {
         Collection<ShardDataTreeCohort> ret = new ArrayList<>(getQueueSize());
 
         for (CommitEntry entry: pendingFinishCommits) {
@@ -706,7 +812,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     /**
      * Called some time after {@link #processNextPendingTransaction()} decides to stop processing.
      */
-    void resumeNextPendingTransaction() {
+    final void resumeNextPendingTransaction() {
         LOG.debug("{}: attempting to resume transaction processing", logContext);
         processNextPending();
     }
@@ -724,26 +830,26 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             final SimpleShardDataTreeCohort cohort = entry.cohort;
             final DataTreeModification modification = cohort.getDataTreeModification();
 
-            LOG.debug("{}: Validating transaction {}", logContext, cohort.getIdentifier());
+            LOG.debug("{}: Validating transaction {}", logContext, cohort.transactionId());
             Exception cause;
             try {
                 tip.validate(modification);
-                LOG.debug("{}: Transaction {} validated", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Transaction {} validated", logContext, cohort.transactionId());
                 cohort.successfulCanCommit();
                 entry.lastAccess = readTime();
                 return;
             } catch (ConflictingModificationAppliedException e) {
-                LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.getIdentifier(),
+                LOG.warn("{}: Store Tx {}: Conflicting modification for path {}.", logContext, cohort.transactionId(),
                     e.getPath());
                 cause = new OptimisticLockFailedException("Optimistic lock failed for path " + e.getPath(), e);
             } catch (DataValidationFailedException e) {
-                LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.getIdentifier(),
+                LOG.warn("{}: Store Tx {}: Data validation failed for path {}.", logContext, cohort.transactionId(),
                     e.getPath(), e);
 
                 // For debugging purposes, allow dumping of the modification. Coupled with the above
                 // precondition log, it should allow us to understand what went on.
-                LOG.debug("{}: Store Tx {}: modifications: {} tree: {}", logContext, cohort.getIdentifier(),
-                    modification, dataTree);
+                LOG.debug("{}: Store Tx {}: modifications: {}", logContext, cohort.transactionId(), modification);
+                LOG.trace("{}: Current tree: {}", logContext, dataTree);
                 cause = new TransactionCommitFailedException("Data did not pass validation for path " + e.getPath(), e);
             } catch (Exception e) {
                 LOG.warn("{}: Unexpected failure in validation phase", logContext, e);
@@ -767,7 +873,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             final SimpleShardDataTreeCohort cohort = entry.cohort;
 
             if (cohort.isFailed()) {
-                LOG.debug("{}: Removing failed transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Removing failed transaction {}", logContext, cohort.transactionId());
                 queue.remove();
                 continue;
             }
@@ -792,6 +898,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return first != null && first.cohort.getState() == State.COMMIT_PENDING;
     }
 
+    // non-final for mocking
     void startCanCommit(final SimpleShardDataTreeCohort cohort) {
         final CommitEntry head = pendingTransactions.peek();
         if (head == null) {
@@ -812,12 +919,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
             Collection<String> precedingShardNames = extractPrecedingShardNames(cohort.getParticipatingShardNames());
             if (precedingShardNames.isEmpty()) {
-                LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Tx {} is scheduled for canCommit step", logContext, cohort.transactionId());
                 return;
             }
 
             LOG.debug("{}: Evaluating tx {} for canCommit -  preceding participating shard names {}",
-                    logContext, cohort.getIdentifier(), precedingShardNames);
+                    logContext, cohort.transactionId(), precedingShardNames);
             final Iterator<CommitEntry> iter = pendingTransactions.iterator();
             int index = -1;
             int moveToIndex = -1;
@@ -828,29 +935,29 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 if (cohort.equals(entry.cohort)) {
                     if (moveToIndex < 0) {
                         LOG.debug("{}: Not moving tx {} - cannot proceed with canCommit",
-                                logContext, cohort.getIdentifier());
+                                logContext, cohort.transactionId());
                         return;
                     }
 
                     LOG.debug("{}: Moving {} to index {} in the pendingTransactions queue",
-                            logContext, cohort.getIdentifier(), moveToIndex);
+                            logContext, cohort.transactionId(), moveToIndex);
                     iter.remove();
                     insertEntry(pendingTransactions, entry, moveToIndex);
 
                     if (!cohort.equals(pendingTransactions.peek().cohort)) {
                         LOG.debug("{}: Tx {} is not at the head of the queue - cannot proceed with canCommit",
-                                logContext, cohort.getIdentifier());
+                                logContext, cohort.transactionId());
                         return;
                     }
 
                     LOG.debug("{}: Tx {} is now at the head of the queue - proceeding with canCommit",
-                            logContext, cohort.getIdentifier());
+                            logContext, cohort.transactionId());
                     break;
                 }
 
                 if (entry.cohort.getState() != State.READY) {
                     LOG.debug("{}: Skipping pending transaction {} in state {}",
-                            logContext, entry.cohort.getIdentifier(), entry.cohort.getState());
+                            logContext, entry.cohort.transactionId(), entry.cohort.getState());
                     continue;
                 }
 
@@ -860,16 +967,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 if (precedingShardNames.equals(pendingPrecedingShardNames)) {
                     if (moveToIndex < 0) {
                         LOG.debug("{}: Preceding shard names {} for pending tx {} match - saving moveToIndex {}",
-                                logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), index);
+                                logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), index);
                         moveToIndex = index;
                     } else {
                         LOG.debug(
                             "{}: Preceding shard names {} for pending tx {} match but moveToIndex already set to {}",
-                            logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier(), moveToIndex);
+                            logContext, pendingPrecedingShardNames, entry.cohort.transactionId(), moveToIndex);
                     }
                 } else {
                     LOG.debug("{}: Preceding shard names {} for pending tx {} differ - skipping",
-                        logContext, pendingPrecedingShardNames, entry.cohort.getIdentifier());
+                        logContext, pendingPrecedingShardNames, entry.cohort.transactionId());
                 }
             }
         }
@@ -895,8 +1002,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         tempStack.forEach(queue::addFirst);
     }
 
-    private Collection<String> extractPrecedingShardNames(
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    private Collection<String> extractPrecedingShardNames(final Optional<SortedSet<String>> participatingShardNames) {
         return participatingShardNames.map((Function<SortedSet<String>, Collection<String>>)
             set -> set.headSet(shard.getShardName())).orElse(Collections.<String>emptyList());
     }
@@ -907,36 +1013,39 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         processNextPendingTransaction();
     }
 
+    // non-final for mocking
     @SuppressWarnings("checkstyle:IllegalCatch")
     void startPreCommit(final SimpleShardDataTreeCohort cohort) {
         final CommitEntry entry = pendingTransactions.peek();
-        Preconditions.checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
+        checkState(entry != null, "Attempted to pre-commit of %s when no transactions pending", cohort);
 
         final SimpleShardDataTreeCohort current = entry.cohort;
-        Verify.verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
+        verify(cohort.equals(current), "Attempted to pre-commit %s while %s is pending", cohort, current);
 
-        LOG.debug("{}: Preparing transaction {}", logContext, current.getIdentifier());
+        final TransactionIdentifier currentId = current.transactionId();
+        LOG.debug("{}: Preparing transaction {}", logContext, currentId);
 
         final DataTreeCandidateTip candidate;
         try {
             candidate = tip.prepare(cohort.getDataTreeModification());
-        } catch (RuntimeException e) {
+            LOG.debug("{}: Transaction {} candidate ready", logContext, currentId);
+        } catch (DataValidationFailedException | RuntimeException e) {
             failPreCommit(e);
             return;
         }
 
-        cohort.userPreCommit(candidate, new FutureCallback<Void>() {
+        cohort.userPreCommit(candidate, new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void noop) {
+            public void onSuccess(final Empty result) {
                 // Set the tip of the data tree.
-                tip = Verify.verifyNotNull(candidate);
+                tip = verifyNotNull(candidate);
 
                 entry.lastAccess = readTime();
 
                 pendingTransactions.remove();
                 pendingCommits.add(entry);
 
-                LOG.debug("{}: Transaction {} prepared", logContext, current.getIdentifier());
+                LOG.debug("{}: Transaction {} prepared", logContext, currentId);
 
                 cohort.successfulPreCommit(candidate);
 
@@ -958,7 +1067,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void finishCommit(final SimpleShardDataTreeCohort cohort) {
-        final TransactionIdentifier txId = cohort.getIdentifier();
+        final TransactionIdentifier txId = cohort.transactionId();
         final DataTreeCandidate candidate = cohort.getCandidate();
 
         LOG.debug("{}: Resuming commit of transaction {}", logContext, txId);
@@ -989,22 +1098,23 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         });
     }
 
+    // non-final for mocking
     void startCommit(final SimpleShardDataTreeCohort cohort, final DataTreeCandidate candidate) {
         final CommitEntry entry = pendingCommits.peek();
-        Preconditions.checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
+        checkState(entry != null, "Attempted to start commit of %s when no transactions pending", cohort);
 
         final SimpleShardDataTreeCohort current = entry.cohort;
         if (!cohort.equals(current)) {
-            LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.getIdentifier());
+            LOG.debug("{}: Transaction {} scheduled for commit step", logContext, cohort.transactionId());
             return;
         }
 
-        LOG.debug("{}: Starting commit for transaction {}", logContext, current.getIdentifier());
+        LOG.debug("{}: Starting commit for transaction {}", logContext, current.transactionId());
 
-        final TransactionIdentifier txId = cohort.getIdentifier();
+        final TransactionIdentifier txId = cohort.transactionId();
         final Payload payload;
         try {
-            payload = CommitTransactionPayload.create(txId, candidate,
+            payload = CommitTransactionPayload.create(txId, candidate, PayloadVersion.current(),
                     shard.getDatastoreContext().getInitialPayloadSerializedBufferCapacity());
         } catch (IOException e) {
             LOG.error("{}: Failed to encode transaction {} candidate {}", logContext, txId, candidate, e);
@@ -1043,16 +1153,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         processNextPendingCommit();
     }
 
-    Collection<ActorRef> getCohortActors() {
+    final Collection<ActorRef> getCohortActors() {
         return cohortRegistry.getCohortActors();
     }
 
-    void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
+    final void processCohortRegistryCommand(final ActorRef sender, final CohortRegistryCommand message) {
         cohortRegistry.process(sender, message);
     }
 
     @Override
-    ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+    final ShardDataTreeCohort createFailedCohort(final TransactionIdentifier txId, final DataTreeModification mod,
             final Exception failure) {
         final SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId, failure);
         pendingTransactions.add(new CommitEntry(cohort, readTime()));
@@ -1060,8 +1170,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
     }
 
     @Override
-    ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
+    final ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+            final Optional<SortedSet<String>> participatingShardNames) {
         SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
                 cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
                         COMMIT_STEP_TIMEOUT), participatingShardNames);
@@ -1071,18 +1181,18 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
 
     // Exposed for ShardCommitCoordinator so it does not have deal with local histories (it does not care), this mimics
     // the newReadWriteTransaction()
-    ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
-            final java.util.Optional<SortedSet<String>> participatingShardNames) {
-        if (txId.getHistoryId().getHistoryId() == 0) {
+    final ShardDataTreeCohort newReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
+            final Optional<SortedSet<String>> participatingShardNames) {
+        final var historyId = txId.getHistoryId();
+        if (historyId.getHistoryId() == 0) {
             return createReadyCohort(txId, mod, participatingShardNames);
         }
-
-        return ensureTransactionChain(txId.getHistoryId(), null).createReadyCohort(txId, mod, participatingShardNames);
+        return ensureTransactionChain(historyId, null).createReadyCohort(txId, mod, participatingShardNames);
     }
 
     @SuppressFBWarnings(value = "DB_DUPLICATE_SWITCH_CLAUSES", justification = "See inline comments below.")
-    void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
-            final Function<SimpleShardDataTreeCohort, Optional<Long>> accessTimeUpdater) {
+    final void checkForExpiredTransactions(final long transactionCommitTimeoutMillis,
+            final Function<SimpleShardDataTreeCohort, OptionalLong> accessTimeUpdater) {
         final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
         final long now = readTime();
 
@@ -1100,13 +1210,13 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             return;
         }
 
-        final Optional<Long> updateOpt = accessTimeUpdater.apply(currentTx.cohort);
+        final OptionalLong updateOpt = accessTimeUpdater.apply(currentTx.cohort);
         if (updateOpt.isPresent()) {
-            final long newAccess =  updateOpt.get().longValue();
+            final long newAccess =  updateOpt.orElseThrow();
             final long newDelta = now - newAccess;
             if (newDelta < delta) {
                 LOG.debug("{}: Updated current transaction {} access time", logContext,
-                    currentTx.cohort.getIdentifier());
+                    currentTx.cohort.transactionId());
                 currentTx.lastAccess = newAccess;
                 delta = newDelta;
             }
@@ -1121,7 +1231,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final State state = currentTx.cohort.getState();
 
         LOG.warn("{}: Current transaction {} has timed out after {} ms in state {}", logContext,
-            currentTx.cohort.getIdentifier(), deltaMillis, state);
+            currentTx.cohort.transactionId(), deltaMillis, state);
         boolean processNext = true;
         final TimeoutException cohortFailure = new TimeoutException("Backend timeout in state " + state + " after "
                 + deltaMillis + "ms");
@@ -1161,7 +1271,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 break;
             case COMMIT_PENDING:
                 LOG.warn("{}: Transaction {} is still committing, cannot abort", logContext,
-                    currentTx.cohort.getIdentifier());
+                    currentTx.cohort.transactionId());
                 currentTx.lastAccess = now;
                 processNext = false;
                 return;
@@ -1180,11 +1290,12 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
+    // non-final for mocking
     boolean startAbort(final SimpleShardDataTreeCohort cohort) {
         final Iterator<CommitEntry> it = Iterables.concat(pendingFinishCommits, pendingCommits,
                 pendingTransactions).iterator();
         if (!it.hasNext()) {
-            LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.getIdentifier());
+            LOG.debug("{}: no open transaction while attempting to abort {}", logContext, cohort.transactionId());
             return true;
         }
 
@@ -1192,8 +1303,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         final CommitEntry first = it.next();
         if (cohort.equals(first.cohort)) {
             if (cohort.getState() != State.COMMIT_PENDING) {
-                LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.getIdentifier(),
-                    cohort.getIdentifier());
+                LOG.debug("{}: aborting head of queue {} in state {}", logContext, cohort.transactionId(),
+                    cohort.transactionId());
 
                 it.remove();
                 if (cohort.getCandidate() != null) {
@@ -1204,15 +1315,15 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 return true;
             }
 
-            LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.getIdentifier());
+            LOG.warn("{}: transaction {} is committing, skipping abort", logContext, cohort.transactionId());
             return false;
         }
 
-        DataTreeTip newTip = MoreObjects.firstNonNull(first.cohort.getCandidate(), dataTree);
+        DataTreeTip newTip = requireNonNullElse(first.cohort.getCandidate(), dataTree);
         while (it.hasNext()) {
             final CommitEntry e = it.next();
             if (cohort.equals(e.cohort)) {
-                LOG.debug("{}: aborting queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: aborting queued transaction {}", logContext, cohort.transactionId());
 
                 it.remove();
                 if (cohort.getCandidate() != null) {
@@ -1220,31 +1331,31 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 }
 
                 return true;
-            } else {
-                newTip = MoreObjects.firstNonNull(e.cohort.getCandidate(), newTip);
             }
+
+            newTip = requireNonNullElse(e.cohort.getCandidate(), newTip);
         }
 
-        LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.getIdentifier());
+        LOG.debug("{}: aborted transaction {} not found in the queue", logContext, cohort.transactionId());
         return true;
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void rebaseTransactions(final Iterator<CommitEntry> iter, @Nonnull final DataTreeTip newTip) {
-        tip = Preconditions.checkNotNull(newTip);
+    private void rebaseTransactions(final Iterator<CommitEntry> iter, final @NonNull DataTreeTip newTip) {
+        tip = requireNonNull(newTip);
         while (iter.hasNext()) {
             final SimpleShardDataTreeCohort cohort = iter.next().cohort;
             if (cohort.getState() == State.CAN_COMMIT_COMPLETE) {
-                LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Revalidating queued transaction {}", logContext, cohort.transactionId());
 
                 try {
                     tip.validate(cohort.getDataTreeModification());
                 } catch (DataValidationFailedException | RuntimeException e) {
-                    LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.getIdentifier(), e);
+                    LOG.debug("{}: Failed to revalidate queued transaction {}", logContext, cohort.transactionId(), e);
                     cohort.reportFailure(e);
                 }
             } else if (cohort.getState() == State.PRE_COMMIT_COMPLETE) {
-                LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.getIdentifier());
+                LOG.debug("{}: Repreparing queued transaction {}", logContext, cohort.transactionId());
 
                 try {
                     tip.validate(cohort.getDataTreeModification());
@@ -1253,14 +1364,14 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                     cohort.setNewCandidate(candidate);
                     tip = candidate;
                 } catch (RuntimeException | DataValidationFailedException e) {
-                    LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.getIdentifier(), e);
+                    LOG.debug("{}: Failed to reprepare queued transaction {}", logContext, cohort.transactionId(), e);
                     cohort.reportFailure(e);
                 }
             }
         }
     }
 
-    void setRunOnPendingTransactionsComplete(final Runnable operation) {
+    final void setRunOnPendingTransactionsComplete(final Runnable operation) {
         runOnPendingTransactionsComplete = operation;
         maybeRunOperationOnPendingTransactionsComplete();
     }
@@ -1275,16 +1386,16 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
-    ShardStats getStats() {
+    final ShardStats getStats() {
         return shard.getShardMBean();
     }
 
-    Iterator<SimpleShardDataTreeCohort> cohortIterator() {
+    final Iterator<SimpleShardDataTreeCohort> cohortIterator() {
         return Iterables.transform(Iterables.concat(pendingFinishCommits, pendingCommits, pendingTransactions),
             e -> e.cohort).iterator();
     }
 
-    void removeTransactionChain(final LocalHistoryIdentifier id) {
+    final void removeTransactionChain(final LocalHistoryIdentifier id) {
         if (transactionChains.remove(id) != null) {
             LOG.debug("{}: Removed transaction chain {}", logContext, id);
         }
index 3b4fdc9578c2e34d29a90a01a08cc4090fb7d839..4c67c3fc23b4bfa2036b207f5704eae400cfa1f5 100644 (file)
@@ -7,12 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Interface for a class that generates and publishes notifications for DataTreeChangeListeners.
@@ -21,6 +21,5 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
  */
 interface ShardDataTreeChangeListenerPublisher extends ShardDataTreeNotificationPublisher {
     void registerTreeChangeListener(YangInstanceIdentifier treeId, DOMDataTreeChangeListener listener,
-            Optional<DataTreeCandidate> initialState,
-            Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration);
+            Optional<DataTreeCandidate> initialState, Consumer<Registration> onRegistration);
 }
index f43975832d9c74df723f620b963127e601b8555e..83209731c4abfd59b5b090eb4719a1c4894b9707 100644 (file)
@@ -10,32 +10,31 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorContext;
 import akka.actor.ActorRef;
 import akka.actor.Props;
-import com.google.common.base.Optional;
+import java.util.Optional;
 import java.util.function.Consumer;
-import javax.annotation.concurrent.NotThreadSafe;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
- * Implementation of ShardDataTreeChangeListenerPublisher that offloads the generation and publication
- * of data tree change notifications to an actor.
+ * Implementation of ShardDataTreeChangeListenerPublisher that offloads the generation and publication of data tree
+ * change notifications to an actor. This class is NOT thread-safe.
  *
  * @author Thomas Pantelis
  */
-@NotThreadSafe
 class ShardDataTreeChangeListenerPublisherActorProxy extends AbstractShardDataTreeNotificationPublisherActorProxy
         implements ShardDataTreeChangeListenerPublisher {
 
-    ShardDataTreeChangeListenerPublisherActorProxy(ActorContext actorContext, String actorName, String logContext) {
+    ShardDataTreeChangeListenerPublisherActorProxy(final ActorContext actorContext, final String actorName,
+        final String logContext) {
         super(actorContext, actorName, logContext);
     }
 
     @Override
-    public void registerTreeChangeListener(YangInstanceIdentifier treeId,
-            DOMDataTreeChangeListener listener, Optional<DataTreeCandidate> currentState,
-            Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
+    public void registerTreeChangeListener(final YangInstanceIdentifier treeId,
+            final DOMDataTreeChangeListener listener, final Optional<DataTreeCandidate> currentState,
+            final Consumer<Registration> onRegistration) {
         final ShardDataTreeChangePublisherActor.RegisterListener regMessage =
                 new ShardDataTreeChangePublisherActor.RegisterListener(treeId, listener, currentState, onRegistration);
         log.debug("{}: Sending {} to publisher actor {}", logContext(), regMessage, publisherActor());
index 927e39ff01bf8be284648abdeda81651b77c1aef..5e96133aaa116cda6d68a3a5f42bb0551d524026 100644 (file)
@@ -7,14 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Props;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
+import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Actor used to generate and publish DataTreeChange notifications.
@@ -30,12 +31,11 @@ public final class ShardDataTreeChangePublisherActor
 
     @Override
     protected void handleReceive(final Object message) {
-        if (message instanceof RegisterListener) {
-            RegisterListener reg = (RegisterListener)message;
+        if (message instanceof RegisterListener reg) {
             LOG.debug("{}: Received {}", logContext(), reg);
             if (reg.initialState.isPresent()) {
                 DefaultShardDataTreeChangeListenerPublisher.notifySingleListener(reg.path, reg.listener,
-                        reg.initialState.get(), logContext());
+                        reg.initialState.orElseThrow(), logContext());
             } else {
                 reg.listener.onInitialData();
             }
@@ -54,15 +54,14 @@ public final class ShardDataTreeChangePublisherActor
         private final YangInstanceIdentifier path;
         private final DOMDataTreeChangeListener listener;
         private final Optional<DataTreeCandidate> initialState;
-        private final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration;
+        private final Consumer<Registration> onRegistration;
 
         RegisterListener(final YangInstanceIdentifier path, final DOMDataTreeChangeListener listener,
-                final Optional<DataTreeCandidate> initialState,
-                final Consumer<ListenerRegistration<DOMDataTreeChangeListener>> onRegistration) {
-            this.path = Preconditions.checkNotNull(path);
-            this.listener = Preconditions.checkNotNull(listener);
-            this.initialState = Preconditions.checkNotNull(initialState);
-            this.onRegistration = Preconditions.checkNotNull(onRegistration);
+                final Optional<DataTreeCandidate> initialState, final Consumer<Registration> onRegistration) {
+            this.path = requireNonNull(path);
+            this.listener = requireNonNull(listener);
+            this.initialState = requireNonNull(initialState);
+            this.onRegistration = requireNonNull(onRegistration);
         }
 
         @Override
index 581768c0ed73352d5c1b80fc5e32033dfe266cf2..03cc77f0e064ff34880a5c1ae664572435588344 100644 (file)
@@ -14,14 +14,15 @@ import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 @VisibleForTesting
-public abstract class ShardDataTreeCohort implements Identifiable<TransactionIdentifier> {
+public abstract class ShardDataTreeCohort {
     public enum State {
         READY,
         CAN_COMMIT_PENDING,
@@ -39,6 +40,8 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
         // Prevent foreign instantiation
     }
 
+    abstract @NonNull TransactionIdentifier transactionId();
+
     // FIXME: This leaks internal state generated in preCommit,
     // should be result of canCommit
     abstract DataTreeCandidateTip getCandidate();
@@ -49,13 +52,13 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
 
     // FIXME: Should return rebased DataTreeCandidateTip
     @VisibleForTesting
-    public abstract void canCommit(FutureCallback<Void> callback);
+    public abstract void canCommit(FutureCallback<Empty> callback);
 
     @VisibleForTesting
     public abstract void preCommit(FutureCallback<DataTreeCandidate> callback);
 
     @VisibleForTesting
-    public abstract void abort(FutureCallback<Void> callback);
+    public abstract void abort(FutureCallback<Empty> callback);
 
     @VisibleForTesting
     public abstract void commit(FutureCallback<UnsignedLong> callback);
@@ -70,6 +73,6 @@ public abstract class ShardDataTreeCohort implements Identifiable<TransactionIde
     }
 
     ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("id", getIdentifier()).add("state", getState());
+        return toStringHelper.add("id", transactionId()).add("state", getState());
     }
 }
@@ -5,22 +5,28 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.dispatch.Futures;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
+import com.google.common.collect.Streams;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.datastore.messages.DataTreeListenerInfo;
+import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardDataTreeListenerInfoMXBean;
 import org.opendaylight.controller.cluster.datastore.messages.GetInfo;
 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import scala.concurrent.Await;
 import scala.concurrent.ExecutionContext;
@@ -31,15 +37,14 @@ import scala.concurrent.Future;
  *
  * @author Thomas Pantelis
  */
-public class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implements ShardDataTreeListenerInfoMXBean {
+final class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implements ShardDataTreeListenerInfoMXBean {
     private static final String JMX_CATEGORY = "ShardDataTreeListenerInfo";
 
     private final OnDemandShardStateCache stateCache;
 
-    public ShardDataTreeListenerInfoMXBeanImpl(final String shardName, final String mxBeanType,
-            final ActorRef shardActor) {
+    ShardDataTreeListenerInfoMXBeanImpl(final String shardName, final String mxBeanType, final ActorRef shardActor) {
         super(shardName, mxBeanType, JMX_CATEGORY);
-        stateCache = new OnDemandShardStateCache(shardName, Preconditions.checkNotNull(shardActor));
+        stateCache = new OnDemandShardStateCache(shardName, requireNonNull(shardActor));
     }
 
     @Override
@@ -53,25 +58,27 @@ public class ShardDataTreeListenerInfoMXBeanImpl extends AbstractMXBean implemen
             return stateCache.get();
         } catch (Exception e) {
             Throwables.throwIfUnchecked(e);
-            throw new RuntimeException(e);
+            throw new IllegalStateException(e);
         }
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private List<DataTreeListenerInfo> getListenerActorsInfo(Collection<ActorSelection> actors) {
+    @SuppressFBWarnings(value = "REC_CATCH_EXCEPTION", justification = "Akka's Await.result() API contract")
+    private static List<DataTreeListenerInfo> getListenerActorsInfo(final Collection<ActorSelection> actors) {
         final Timeout timeout = new Timeout(20, TimeUnit.SECONDS);
         final List<Future<Object>> futureList = new ArrayList<>(actors.size());
-        for (ActorSelection actor: actors) {
+        for (ActorSelection actor : actors) {
             futureList.add(Patterns.ask(actor, GetInfo.INSTANCE, timeout));
         }
 
+        final Iterable<Object> listenerInfos;
         try {
-            final List<DataTreeListenerInfo> listenerInfoList = new ArrayList<>();
-            Await.result(Futures.sequence(futureList, ExecutionContext.Implicits$.MODULE$.global()),
-                    timeout.duration()).forEach(obj -> listenerInfoList.add((DataTreeListenerInfo) obj));
-            return listenerInfoList;
-        } catch (Exception e) {
-            throw new RuntimeException(e);
+            listenerInfos = Await.result(Futures.sequence(futureList, ExecutionContext.Implicits$.MODULE$.global()),
+                timeout.duration());
+        } catch (TimeoutException | InterruptedException e) {
+            throw new IllegalStateException("Failed to acquire listeners", e);
         }
+
+        return Streams.stream(listenerInfos).map(DataTreeListenerInfo.class::cast).collect(Collectors.toList());
     }
 }
index e2c1b27db1406ad0e6d2f3e1b405d1f6db33f464..76719f94f839f13f127dc3ed180fa6c4fffdf733 100644 (file)
@@ -8,11 +8,12 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import com.google.common.base.Verify;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshotMetadata;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 
 abstract class ShardDataTreeMetadata<T extends ShardDataTreeSnapshotMetadata<T>> {
     /**
@@ -20,7 +21,7 @@ abstract class ShardDataTreeMetadata<T extends ShardDataTreeSnapshotMetadata<T>>
      *
      * @param snapshot Metadata snapshot
      */
-    final void applySnapshot(@Nonnull final ShardDataTreeSnapshotMetadata<?> snapshot) {
+    final void applySnapshot(final @NonNull ShardDataTreeSnapshotMetadata<?> snapshot) {
         Verify.verify(getSupportedType().isInstance(snapshot), "Snapshot %s misrouted to handler of %s", snapshot,
             getSupportedType());
         doApplySnapshot(getSupportedType().cast(snapshot));
@@ -37,23 +38,21 @@ abstract class ShardDataTreeMetadata<T extends ShardDataTreeSnapshotMetadata<T>>
      *
      * @param snapshot Metadata snapshot
      */
-    abstract void doApplySnapshot(@Nonnull T snapshot);
+    abstract void doApplySnapshot(@NonNull T snapshot);
 
     /**
      * Return the type of metadata snapshot this object supports.
      *
      * @return Metadata type
      */
-    @Nonnull
-    abstract Class<T> getSupportedType();
+    abstract @NonNull Class<T> getSupportedType();
 
     /**
      * Take a snapshot of current metadata state.
      *
      * @return Metadata snapshot, or null if the metadata is empty.
      */
-    @Nullable
-    abstract T toSnapshot();
+    abstract @Nullable T toSnapshot();
 
     // Lifecycle events
 
@@ -63,6 +62,8 @@ abstract class ShardDataTreeMetadata<T extends ShardDataTreeSnapshotMetadata<T>>
 
     abstract void onTransactionPurged(TransactionIdentifier txId);
 
+    abstract void onTransactionsSkipped(LocalHistoryIdentifier historyId, ImmutableUnsignedLongSet txIds);
+
     abstract void onHistoryCreated(LocalHistoryIdentifier historyId);
 
     abstract void onHistoryClosed(LocalHistoryIdentifier historyId);
index 4dbd818488c29243116f191469f959d6eb3a42ed..373a130004521d835430da24db5d3b1d875dd62e 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Interface for a class the publishes data tree notifications.
index c22bc3bd98930c2e561a7b4f9b06083da1dfb8d0..095a542f6c69417b03b9d17f78dc1e9db5d17e4c 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import com.google.common.base.Stopwatch;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * Actor used to generate and publish data tree notifications. This is used to offload the potentially
@@ -40,7 +40,7 @@ public class ShardDataTreeNotificationPublisherActor<T extends ShardDataTreeNoti
     }
 
     @Override
-    protected void handleReceive(Object message) {
+    protected void handleReceive(final Object message) {
         if (message instanceof PublishNotifications) {
             PublishNotifications toPublish = (PublishNotifications)message;
             timer.start();
@@ -65,7 +65,7 @@ public class ShardDataTreeNotificationPublisherActor<T extends ShardDataTreeNoti
     static class PublishNotifications {
         private final DataTreeCandidate candidate;
 
-        PublishNotifications(DataTreeCandidate candidate) {
+        PublishNotifications(final DataTreeCandidate candidate) {
             this.candidate = candidate;
         }
     }
index a774d647d3d6c6e8cbecea7eb1b91b5ccfe0e6df..6be13ae1295de42282ac34b0713e8bb1ed27e662 100644 (file)
@@ -7,23 +7,24 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import java.util.Optional;
 import java.util.SortedSet;
-import javax.annotation.concurrent.NotThreadSafe;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A transaction chain attached to a Shard.
+ * A transaction chain attached to a Shard. This class is NOT thread-safe.
  */
-@NotThreadSafe
 final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
         implements Identifiable<LocalHistoryIdentifier> {
 
@@ -36,13 +37,13 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
     private boolean closed;
 
     ShardDataTreeTransactionChain(final LocalHistoryIdentifier localHistoryIdentifier, final ShardDataTree dataTree) {
-        this.chainId = Preconditions.checkNotNull(localHistoryIdentifier);
-        this.dataTree = Preconditions.checkNotNull(dataTree);
+        chainId = requireNonNull(localHistoryIdentifier);
+        this.dataTree = requireNonNull(dataTree);
     }
 
     private DataTreeSnapshot getSnapshot() {
-        Preconditions.checkState(!closed, "TransactionChain %s has been closed", this);
-        Preconditions.checkState(openTransaction == null, "Transaction %s is open", openTransaction);
+        checkState(!closed, "TransactionChain %s has been closed", this);
+        checkState(openTransaction == null, "Transaction %s is open", openTransaction);
 
         if (previousTx == null) {
             LOG.debug("Opening an unchained snapshot in {}", chainId);
@@ -53,19 +54,20 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
         return previousTx.getSnapshot();
     }
 
-    ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
+    @NonNull ReadOnlyShardDataTreeTransaction newReadOnlyTransaction(final TransactionIdentifier txId) {
         final DataTreeSnapshot snapshot = getSnapshot();
         LOG.debug("Allocated read-only transaction {} snapshot {}", txId, snapshot);
 
         return new ReadOnlyShardDataTreeTransaction(this, txId, snapshot);
     }
 
-    ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
+    @NonNull ReadWriteShardDataTreeTransaction newReadWriteTransaction(final TransactionIdentifier txId) {
         final DataTreeSnapshot snapshot = getSnapshot();
         LOG.debug("Allocated read-write transaction {} snapshot {}", txId, snapshot);
 
-        openTransaction = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
-        return openTransaction;
+        final var ret = new ReadWriteShardDataTreeTransaction(this, txId, snapshot.newModification());
+        openTransaction = ret;
+        return ret;
     }
 
     void close() {
@@ -76,8 +78,8 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
     @Override
     void abortFromTransactionActor(final AbstractShardDataTreeTransaction<?> transaction) {
         if (transaction instanceof ReadWriteShardDataTreeTransaction) {
-            Preconditions.checkState(openTransaction != null,
-                    "Attempted to abort transaction %s while none is outstanding", transaction);
+            checkState(openTransaction != null, "Attempted to abort transaction %s while none is outstanding",
+                    transaction);
             LOG.debug("Aborted open transaction {}", transaction);
             openTransaction = null;
         }
@@ -92,8 +94,8 @@ final class ShardDataTreeTransactionChain extends ShardDataTreeTransactionParent
     @Override
     ShardDataTreeCohort finishTransaction(final ReadWriteShardDataTreeTransaction transaction,
             final Optional<SortedSet<String>> participatingShardNames) {
-        Preconditions.checkState(openTransaction != null,
-                "Attempted to finish transaction %s while none is outstanding", transaction);
+        checkState(openTransaction != null, "Attempted to finish transaction %s while none is outstanding",
+                transaction);
 
         // dataTree is finalizing ready the transaction, we just record it for the next
         // transaction in chain
index 0db6f083ac655908ae57de374e348d480d430d60..d7992596e0e37fa783290a5066f909de71f6be1d 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import java.util.Optional;
 import java.util.SortedSet;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 abstract class ShardDataTreeTransactionParent {
 
index fc3bfda1eb5582b06cf02f41c155fad5ab31681a..ac751fb33d3eb5f8ba22f22fcc86cdf6fb30b139 100644 (file)
@@ -5,12 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 
@@ -19,17 +18,18 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData;
  *
  * @author syedbahm
  */
-public class ShardReadTransaction extends ShardTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ShardReadTransaction extends ShardTransaction {
     private final AbstractShardDataTreeTransaction<?> transaction;
 
-    public ShardReadTransaction(AbstractShardDataTreeTransaction<?> transaction, ActorRef shardActor,
-            ShardStats shardStats) {
+    public ShardReadTransaction(final AbstractShardDataTreeTransaction<?> transaction, final ActorRef shardActor,
+            final ShardStats shardStats) {
         super(shardActor, shardStats, transaction.getIdentifier());
-        this.transaction = Preconditions.checkNotNull(transaction);
+        this.transaction = requireNonNull(transaction);
     }
 
     @Override
-    public void handleReceive(Object message) {
+    public void handleReceive(final Object message) {
         if (ReadData.isSerializedType(message)) {
             readData(transaction, ReadData.fromSerializable(message));
         } else if (DataExists.isSerializedType(message)) {
index 5e5a2a42cf49fadf1874fb65c79e88c1acfd1fa7..84c346def864f3dc2627eb98e137472771e90939 100644 (file)
@@ -5,11 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 
@@ -18,14 +16,15 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadData;
  *
  * @author syedbahm
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardReadWriteTransaction extends ShardWriteTransaction {
-    public ShardReadWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
-            ShardStats shardStats) {
+    public ShardReadWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor,
+            final ShardStats shardStats) {
         super(transaction, shardActor, shardStats);
     }
 
     @Override
-    public void handleReceive(Object message) {
+    public void handleReceive(final Object message) {
         if (ReadData.isSerializedType(message)) {
             readData(ReadData.fromSerializable(message));
         } else if (DataExists.isSerializedType(message)) {
index 7ece110d0143c4e1261ba511a9f73f0dbb8b3905..3baad570b7f4a83a6fc14ce2bfdcdfcd0e8d8669 100644 (file)
@@ -7,14 +7,16 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import java.io.File;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
 import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeXMLOutput;
 import org.opendaylight.controller.cluster.raft.RaftActorRecoveryCohort;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 
@@ -44,7 +46,7 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
 
         WithSnapshot(final ShardDataTree store, final String shardName, final Logger log, final Snapshot snapshot) {
             super(store, shardName, log);
-            this.restoreFromSnapshot = Preconditions.checkNotNull(snapshot);
+            restoreFromSnapshot = requireNonNull(snapshot);
         }
 
         @Override
@@ -60,9 +62,9 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
     private boolean open;
 
     ShardRecoveryCoordinator(final ShardDataTree store, final String shardName, final Logger log) {
-        this.store = Preconditions.checkNotNull(store);
-        this.shardName = Preconditions.checkNotNull(shardName);
-        this.log = Preconditions.checkNotNull(log);
+        this.store = requireNonNull(store);
+        this.shardName = requireNonNull(shardName);
+        this.log = requireNonNull(log);
     }
 
     static ShardRecoveryCoordinator create(final ShardDataTree store, final String shardName, final Logger log) {
@@ -83,7 +85,7 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
     @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void appendRecoveredLogEntry(final Payload payload) {
-        Preconditions.checkState(open, "call startLogRecovery before calling appendRecoveredLogEntry");
+        checkState(open, "call startLogRecovery before calling appendRecoveredLogEntry");
 
         try {
             store.applyRecoveryPayload(payload);
@@ -99,11 +101,11 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
      */
     @Override
     public void applyCurrentLogRecoveryBatch() {
-        Preconditions.checkState(open, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
+        checkState(open, "call startLogRecovery before calling applyCurrentLogRecoveryBatch");
         open = false;
     }
 
-    private File writeRoot(final String kind, final NormalizedNode<?, ?> node) {
+    private File writeRoot(final String kind, final NormalizedNode node) {
         final File file = new File(System.getProperty("karaf.data", "."),
             "failed-recovery-" + kind + "-" + shardName + ".xml");
         NormalizedNodeXMLOutput.toFile(file, node);
@@ -120,14 +122,15 @@ abstract class ShardRecoveryCoordinator implements RaftActorRecoveryCohort {
     public void applyRecoverySnapshot(final Snapshot.State snapshotState) {
         if (!(snapshotState instanceof ShardSnapshotState)) {
             log.debug("{}: applyRecoverySnapshot ignoring snapshot: {}", shardName, snapshotState);
+            return;
         }
 
         log.debug("{}: Applying recovered snapshot", shardName);
-
-        ShardDataTreeSnapshot shardSnapshot = ((ShardSnapshotState)snapshotState).getSnapshot();
+        final ShardSnapshotState shardSnapshotState = (ShardSnapshotState)snapshotState;
         try {
-            store.applyRecoverySnapshot(shardSnapshot);
+            store.applyRecoverySnapshot(shardSnapshotState);
         } catch (Exception e) {
+            final ShardDataTreeSnapshot shardSnapshot = shardSnapshotState.getSnapshot();
             final File f = writeRoot("snapshot", shardSnapshot.getRootNode().orElse(null));
             throw new IllegalStateException(String.format(
                     "%s: Failed to apply recovery snapshot %s. Node data was written to file %s",
index 3ed3a48770054dc709d7076b9c79bd8c30de1bf1..c7bc20f7546599876d4f99a9ca07fc174ed836ed 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorContext;
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import com.google.common.io.ByteSource;
 import java.io.IOException;
 import java.io.ObjectInputStream;
@@ -23,6 +24,7 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.actors.ShardSnapshotActor;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.io.InputOutputStreamFactory;
 import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State;
@@ -36,30 +38,35 @@ import org.slf4j.Logger;
 final class ShardSnapshotCohort implements RaftActorSnapshotCohort {
     private static final FrontendType SNAPSHOT_APPLY = FrontendType.forName("snapshot-apply");
 
+    private final InputOutputStreamFactory streamFactory;
     private final ActorRef snapshotActor;
     private final ShardDataTree store;
     private final String logId;
     private final Logger log;
 
-    private ShardSnapshotCohort(final LocalHistoryIdentifier applyHistoryId, final ActorRef snapshotActor,
-            final ShardDataTree store, final Logger log, final String logId) {
-        this.snapshotActor = Preconditions.checkNotNull(snapshotActor);
-        this.store = Preconditions.checkNotNull(store);
+    ShardSnapshotCohort(final InputOutputStreamFactory streamFactory, final LocalHistoryIdentifier applyHistoryId,
+            final ActorRef snapshotActor, final ShardDataTree store, final Logger log, final String logId) {
+        this.streamFactory = requireNonNull(streamFactory);
+        this.snapshotActor = requireNonNull(snapshotActor);
+        this.store = requireNonNull(store);
         this.log = log;
         this.logId = logId;
     }
 
     static ShardSnapshotCohort create(final ActorContext actorContext, final MemberName memberName,
-            final ShardDataTree store, final Logger log, final String logId) {
+            final ShardDataTree store, final Logger log, final String logId, final DatastoreContext context) {
         final LocalHistoryIdentifier applyHistoryId = new LocalHistoryIdentifier(ClientIdentifier.create(
             FrontendIdentifier.create(memberName, SNAPSHOT_APPLY), 0), 0);
         final String snapshotActorName = "shard-" + memberName.getName() + ':' + "snapshot-read";
 
+        final InputOutputStreamFactory streamFactory = context.isUseLz4Compression()
+                ? InputOutputStreamFactory.lz4("256KB") : InputOutputStreamFactory.simple();
         // Create a snapshot actor. This actor will act as a worker to offload snapshot serialization for all
         // requests.
-        final ActorRef snapshotActor = actorContext.actorOf(ShardSnapshotActor.props(), snapshotActorName);
+        final ActorRef snapshotActor = actorContext.actorOf(ShardSnapshotActor.props(streamFactory),
+                snapshotActorName);
 
-        return new ShardSnapshotCohort(applyHistoryId, snapshotActor, store, log, logId);
+        return new ShardSnapshotCohort(streamFactory, applyHistoryId, snapshotActor, store, log, logId);
     }
 
     @Override
@@ -98,8 +105,8 @@ final class ShardSnapshotCohort implements RaftActorSnapshotCohort {
 
     @Override
     public State deserializeSnapshot(final ByteSource snapshotBytes) throws IOException {
-        try (ObjectInputStream in = new ObjectInputStream(snapshotBytes.openStream())) {
-            return new ShardSnapshotState(ShardDataTreeSnapshot.deserialize(in));
+        try (ObjectInputStream in = new ObjectInputStream(streamFactory.createInputStream(snapshotBytes))) {
+            return ShardDataTreeSnapshot.deserialize(in);
         }
     }
 }
@@ -5,21 +5,21 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner.MapJoiner;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.time.Instant;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.datastore.Shard;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStatsMXBean;
+import org.opendaylight.controller.cluster.mgmt.api.FollowerInfo;
 import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
-import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 
@@ -28,11 +28,11 @@ import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
  *
  * @author  Basheeruddin syedbahm@cisco.com
  */
-public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
+final class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
     public static final String JMX_CATEGORY_SHARD = "Shards";
 
-    @GuardedBy("DATE_FORMAT")
-    private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+    private static final DateTimeFormatter DATE_FORMATTER = DateTimeFormatter.ofPattern("uuuu-MM-dd HH:mm:ss.SSS")
+        .withZone(ZoneId.systemDefault());
 
     private static final MapJoiner MAP_JOINER = Joiner.on(", ").withKeyValueSeparator(": ");
 
@@ -44,8 +44,6 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     private long readOnlyTransactionCount;
 
-    private long writeOnlyTransactionCount;
-
     private long readWriteTransactionCount;
 
     private long lastCommittedTransactionTime;
@@ -64,12 +62,19 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     private long lastLeadershipChangeTime;
 
-    public ShardStats(final String shardName, final String mxBeanType, @Nullable final Shard shard) {
+    ShardStats(final String shardName, final String mxBeanType, final @Nullable Shard shard) {
         super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
         this.shard = shard;
         stateCache = new OnDemandShardStateCache(shardName, shard != null ? shard.self() : null);
     }
 
+    static ShardStats create(final String shardName, final String mxBeanType, final @NonNull Shard shard) {
+        String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
+        ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
+        shardStatsMBeanImpl.registerMBean();
+        return shardStatsMBeanImpl;
+    }
+
     @SuppressWarnings("checkstyle:IllegalCatch")
     private OnDemandRaftState getOnDemandRaftState() {
         try {
@@ -83,9 +88,7 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
     }
 
     private static String formatMillis(final long timeMillis) {
-        synchronized (DATE_FORMAT) {
-            return DATE_FORMAT.format(new Date(timeMillis));
-        }
+        return DATE_FORMATTER.format(Instant.ofEpochMilli(timeMillis));
     }
 
     @Override
@@ -113,11 +116,6 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
         return readOnlyTransactionCount;
     }
 
-    @Override
-    public long getWriteOnlyTransactionCount() {
-        return writeOnlyTransactionCount;
-    }
-
     @Override
     public long getReadWriteTransactionCount() {
         return readWriteTransactionCount;
@@ -221,10 +219,6 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
         return ++readOnlyTransactionCount;
     }
 
-    public long incrementWriteOnlyTransactionCount() {
-        return ++writeOnlyTransactionCount;
-    }
-
     public long incrementReadWriteTransactionCount() {
         return ++readWriteTransactionCount;
     }
@@ -264,8 +258,6 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
         readOnlyTransactionCount = 0;
 
-        writeOnlyTransactionCount = 0;
-
         readWriteTransactionCount = 0;
 
         lastCommittedTransactionTime = 0;
index 8961a1a7553edcf91691f9e29f40a4056c41b2b5..3b3462884f85b6987b8a1777e797503e58255976 100644 (file)
@@ -5,24 +5,22 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.actor.ReceiveTimeout;
 import akka.japi.Creator;
-import com.google.common.base.Preconditions;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.PersistAbortTransactionPayload;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
 import org.opendaylight.mdsal.common.api.ReadFailedException;
@@ -31,6 +29,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 /**
  * The ShardTransaction Actor represents a remote transaction that delegates all actions to DOMDataReadWriteTransaction.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
     private final ActorRef shardActor;
     private final ShardStats shardStats;
@@ -42,12 +41,13 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
         super("shard-tx");
         this.shardActor = shardActor;
         this.shardStats = shardStats;
-        this.transactionId = Preconditions.checkNotNull(transactionId);
+        this.transactionId = requireNonNull(transactionId);
     }
 
     public static Props props(final TransactionType type, final AbstractShardDataTreeTransaction<?> transaction,
             final ActorRef shardActor, final DatastoreContext datastoreContext, final ShardStats shardStats) {
-        return Props.create(new ShardTransactionCreator(type, transaction, shardActor, datastoreContext, shardStats));
+        return Props.create(ShardTransaction.class,
+            new ShardTransactionCreator(type, transaction, shardActor, datastoreContext, shardStats));
     }
 
     protected abstract AbstractShardDataTreeTransaction<?> getDOMStoreTransaction();
@@ -78,7 +78,6 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
 
     private void closeTransaction(final boolean sendReply) {
         getDOMStoreTransaction().abortFromTransactionActor();
-        shardActor.tell(new PersistAbortTransactionPayload(transactionId), ActorRef.noSender());
 
         if (sendReply && returnCloseTransactionReply()) {
             getSender().tell(new CloseTransactionReply(), getSelf());
@@ -121,7 +120,7 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't "
             + "create remote instances of this actor and thus don't need it to be Serializable.")
     private static class ShardTransactionCreator implements Creator<ShardTransaction> {
-
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final AbstractShardDataTreeTransaction<?> transaction;
@@ -132,7 +131,7 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
 
         ShardTransactionCreator(final TransactionType type, final AbstractShardDataTreeTransaction<?> transaction,
                 final ActorRef shardActor, final DatastoreContext datastoreContext, final ShardStats shardStats) {
-            this.transaction = Preconditions.checkNotNull(transaction);
+            this.transaction = requireNonNull(transaction);
             this.shardActor = shardActor;
             this.shardStats = shardStats;
             this.datastoreContext = datastoreContext;
@@ -141,23 +140,14 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
 
         @Override
         public ShardTransaction create() {
-            final ShardTransaction tx;
-            switch (type) {
-                case READ_ONLY:
-                    tx = new ShardReadTransaction(transaction, shardActor, shardStats);
-                    break;
-                case READ_WRITE:
-                    tx = new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
-                            shardStats);
-                    break;
-                case WRITE_ONLY:
-                    tx = new ShardWriteTransaction((ReadWriteShardDataTreeTransaction)transaction, shardActor,
-                            shardStats);
-                    break;
-                default:
-                    throw new IllegalArgumentException("Unhandled transaction type " + type);
-            }
-
+            final var tx = switch (type) {
+                case READ_ONLY -> new ShardReadTransaction(transaction, shardActor, shardStats);
+                case READ_WRITE -> new ShardReadWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+                    shardActor, shardStats);
+                case WRITE_ONLY -> new ShardWriteTransaction((ReadWriteShardDataTreeTransaction) transaction,
+                    shardActor, shardStats);
+                default -> throw new IllegalArgumentException("Unhandled transaction type " + type);
+            };
             tx.getContext().setReceiveTimeout(datastoreContext.getShardTransactionIdleTimeout());
             return tx;
         }
index 74c75dc2542b1e5b31370786874e74cfd1dc72bf..122c43592aeaf8b9099f59e98f4eefa3fe9588b4 100644 (file)
@@ -7,42 +7,43 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.AbstractActor.ActorContext;
 import akka.actor.ActorRef;
-import akka.actor.UntypedActorContext;
-import com.google.common.base.Preconditions;
 import java.util.concurrent.atomic.AtomicLong;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 
 /**
  * A factory for creating ShardTransaction actors.
  *
  * @author Thomas Pantelis
  */
-class ShardTransactionActorFactory {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ShardTransactionActorFactory {
     private static final AtomicLong ACTOR_NAME_COUNTER = new AtomicLong();
 
     private final ShardDataTree dataTree;
     private final DatastoreContext datastoreContext;
     private final String txnDispatcherPath;
     private final ShardStats shardMBean;
-    private final UntypedActorContext actorContext;
+    private final ActorContext actorContext;
     private final ActorRef shardActor;
     private final String shardName;
 
-    ShardTransactionActorFactory(ShardDataTree dataTree, DatastoreContext datastoreContext,
-            String txnDispatcherPath, ActorRef shardActor, UntypedActorContext actorContext, ShardStats shardMBean,
-            String shardName) {
-        this.dataTree = Preconditions.checkNotNull(dataTree);
-        this.datastoreContext = Preconditions.checkNotNull(datastoreContext);
-        this.txnDispatcherPath = Preconditions.checkNotNull(txnDispatcherPath);
-        this.shardMBean = Preconditions.checkNotNull(shardMBean);
-        this.actorContext = Preconditions.checkNotNull(actorContext);
-        this.shardActor = Preconditions.checkNotNull(shardActor);
-        this.shardName = Preconditions.checkNotNull(shardName);
+    ShardTransactionActorFactory(final ShardDataTree dataTree, final DatastoreContext datastoreContext,
+            final String txnDispatcherPath, final ActorRef shardActor, final ActorContext actorContext,
+            final ShardStats shardMBean, final String shardName) {
+        this.dataTree = requireNonNull(dataTree);
+        this.datastoreContext = requireNonNull(datastoreContext);
+        this.txnDispatcherPath = requireNonNull(txnDispatcherPath);
+        this.shardMBean = requireNonNull(shardMBean);
+        this.actorContext = requireNonNull(actorContext);
+        this.shardActor = requireNonNull(shardActor);
+        this.shardName = requireNonNull(shardName);
     }
 
     private String actorNameFor(final TransactionIdentifier txId) {
@@ -62,25 +63,12 @@ class ShardTransactionActorFactory {
         return sb.append(txId.getTransactionId()).append('_').append(ACTOR_NAME_COUNTER.incrementAndGet()).toString();
     }
 
-    ActorRef newShardTransaction(TransactionType type, TransactionIdentifier transactionID) {
-        final AbstractShardDataTreeTransaction<?> transaction;
-        switch (type) {
-            case READ_ONLY:
-                transaction = dataTree.newReadOnlyTransaction(transactionID);
-                shardMBean.incrementReadOnlyTransactionCount();
-                break;
-            case READ_WRITE:
-                transaction = dataTree.newReadWriteTransaction(transactionID);
-                shardMBean.incrementReadWriteTransactionCount();
-                break;
-            case WRITE_ONLY:
-                transaction = dataTree.newReadWriteTransaction(transactionID);
-                shardMBean.incrementWriteOnlyTransactionCount();
-                break;
-            default:
-                throw new IllegalArgumentException("Unsupported transaction type " + type);
-        }
-
+    ActorRef newShardTransaction(final TransactionType type, final TransactionIdentifier transactionID) {
+        final AbstractShardDataTreeTransaction<?> transaction = switch (type) {
+            case READ_ONLY -> dataTree.newReadOnlyTransaction(transactionID);
+            case READ_WRITE, WRITE_ONLY -> dataTree.newReadWriteTransaction(transactionID);
+            default -> throw new IllegalArgumentException("Unsupported transaction type " + type);
+        };
         return actorContext.actorOf(ShardTransaction.props(type, transaction, shardActor, datastoreContext, shardMBean)
             .withDispatcher(txnDispatcherPath), actorNameFor(transactionID));
     }
index eea201e56d55b1ecdab037d221beb1389c0c5311..bfd7802213e05b1146e32c9031db0fe87ca1e141 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
 import akka.actor.Status.Failure;
@@ -23,6 +25,7 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 class ShardTransactionMessageRetrySupport implements Closeable {
     private static final Logger LOG = LoggerFactory.getLogger(ShardTransactionMessageRetrySupport.class);
 
@@ -31,11 +34,11 @@ class ShardTransactionMessageRetrySupport implements Closeable {
     private final Set<MessageInfo> messagesToRetry = new LinkedHashSet<>();
     private final Shard shard;
 
-    ShardTransactionMessageRetrySupport(Shard shard) {
+    ShardTransactionMessageRetrySupport(final Shard shard) {
         this.shard = shard;
     }
 
-    void addMessageToRetry(Object message, ActorRef replyTo, String failureMessage) {
+    void addMessageToRetry(final Object message, final ActorRef replyTo, final String failureMessage) {
         LOG.debug("{}: Adding message {} to retry", shard.persistenceId(), message);
 
         MessageInfo messageInfo = new MessageInfo(message, replyTo, failureMessage);
@@ -61,7 +64,7 @@ class ShardTransactionMessageRetrySupport implements Closeable {
         }
     }
 
-    void onTimerMessage(Object message) {
+    void onTimerMessage(final Object message) {
         MessageInfo messageInfo = (MessageInfo)message;
 
         LOG.debug("{}: Timer expired for message {}", shard.persistenceId(), messageInfo.message);
@@ -79,24 +82,24 @@ class ShardTransactionMessageRetrySupport implements Closeable {
         messagesToRetry.clear();
     }
 
-    private static class MessageInfo {
+    private static final class MessageInfo {
         final Object message;
         final ActorRef replyTo;
         final String failureMessage;
         Cancellable timer;
 
-        MessageInfo(Object message, ActorRef replyTo, String failureMessage) {
+        MessageInfo(final Object message, final ActorRef replyTo, final String failureMessage) {
             this.message = message;
             this.replyTo = replyTo;
-            this.failureMessage = failureMessage;
+            this.failureMessage = requireNonNull(failureMessage);
         }
 
-        void retry(Shard shard) {
+        void retry(final Shard shard) {
             timer.cancel();
             shard.getSelf().tell(message, replyTo);
         }
 
-        void timedOut(Shard shard) {
+        void timedOut(final Shard shard) {
             replyTo.tell(new Failure(new NoShardLeaderException(failureMessage, shard.persistenceId())),
                     shard.getSelf());
         }
index b3f4b0b0d6d1f28f669b3fb48cfb02002447b1d7..764361a016dd56a2074dca2b3201927290f6da29 100644 (file)
@@ -6,13 +6,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import akka.actor.PoisonPill;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
@@ -25,14 +23,14 @@ import org.opendaylight.controller.cluster.datastore.modification.Modification;
  *
  * @author syedbahm
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardWriteTransaction extends ShardTransaction {
-
     private int totalBatchedModificationsReceived;
     private Exception lastBatchedModificationsException;
     private final ReadWriteShardDataTreeTransaction transaction;
 
-    public ShardWriteTransaction(ReadWriteShardDataTreeTransaction transaction, ActorRef shardActor,
-            ShardStats shardStats) {
+    public ShardWriteTransaction(final ReadWriteShardDataTreeTransaction transaction, final ActorRef shardActor,
+            final ShardStats shardStats) {
         super(shardActor, shardStats, transaction.getIdentifier());
         this.transaction = transaction;
     }
@@ -43,7 +41,7 @@ public class ShardWriteTransaction extends ShardTransaction {
     }
 
     @Override
-    public void handleReceive(Object message) {
+    public void handleReceive(final Object message) {
         if (message instanceof BatchedModifications) {
             batchedModifications((BatchedModifications)message);
         } else {
@@ -52,7 +50,7 @@ public class ShardWriteTransaction extends ShardTransaction {
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private void batchedModifications(BatchedModifications batched) {
+    private void batchedModifications(final BatchedModifications batched) {
         if (checkClosed()) {
             if (batched.isReady()) {
                 getSelf().tell(PoisonPill.getInstance(), getSelf());
@@ -91,25 +89,24 @@ public class ShardWriteTransaction extends ShardTransaction {
         }
     }
 
-    protected final void dataExists(DataExists message) {
+    protected final void dataExists(final DataExists message) {
         super.dataExists(transaction, message);
     }
 
-    protected final void readData(ReadData message) {
+    protected final void readData(final ReadData message) {
         super.readData(transaction, message);
     }
 
     private boolean checkClosed() {
-        if (transaction.isClosed()) {
+        final boolean ret = transaction.isClosed();
+        if (ret) {
             getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
                     "Transaction is closed, no modifications allowed")), getSelf());
-            return true;
-        } else {
-            return false;
         }
+        return ret;
     }
 
-    private void readyTransaction(BatchedModifications batched) {
+    private void readyTransaction(final BatchedModifications batched) {
         TransactionIdentifier transactionID = getTransactionId();
 
         LOG.debug("readyTransaction : {}", transactionID);
index b5b49c2396c772a0fbfdd75e4bcc1ff8e14ca1cb..2c7d13189f61a832fe386ed383cb1b4b193120ad 100644 (file)
@@ -7,21 +7,23 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static com.google.common.base.Verify.verifyNotNull;
 import static java.util.Objects.requireNonNull;
 
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
 import java.util.Optional;
 import java.util.SortedSet;
 import java.util.concurrent.CompletionStage;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,10 +32,9 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private final DataTreeModification transaction;
     private final ShardDataTree dataTree;
-    private final TransactionIdentifier transactionId;
+    private final @NonNull TransactionIdentifier transactionId;
     private final CompositeDataTreeCohort userCohorts;
-    @Nullable
-    private final SortedSet<String> participatingShardNames;
+    private final @Nullable SortedSet<String> participatingShardNames;
 
     private State state = State.READY;
     private DataTreeCandidateTip candidate;
@@ -55,13 +56,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
         this.dataTree = requireNonNull(dataTree);
         this.transaction = requireNonNull(transaction);
         this.transactionId = requireNonNull(transactionId);
-        this.userCohorts = null;
-        this.participatingShardNames = null;
+        userCohorts = null;
+        participatingShardNames = null;
         this.nextFailure = requireNonNull(nextFailure);
     }
 
     @Override
-    public TransactionIdentifier getIdentifier() {
+    TransactionIdentifier transactionId() {
         return transactionId;
     }
 
@@ -82,17 +83,17 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private void checkState(final State expected) {
         Preconditions.checkState(state == expected, "State %s does not match expected state %s for %s",
-                state, expected, getIdentifier());
+                state, expected, transactionId());
     }
 
     @Override
-    public void canCommit(final FutureCallback<Void> newCallback) {
+    public void canCommit(final FutureCallback<Empty> newCallback) {
         if (state == State.CAN_COMMIT_PENDING) {
             return;
         }
 
         checkState(State.READY);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.CAN_COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -105,7 +106,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     @Override
     public void preCommit(final FutureCallback<DataTreeCandidate> newCallback) {
         checkState(State.CAN_COMMIT_COMPLETE);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.PRE_COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -116,9 +117,9 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     }
 
     @Override
-    public void abort(final FutureCallback<Void> abortCallback) {
+    public void abort(final FutureCallback<Empty> abortCallback) {
         if (!dataTree.startAbort(this)) {
-            abortCallback.onSuccess(null);
+            abortCallback.onSuccess(Empty.value());
             return;
         }
 
@@ -127,15 +128,15 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
         final Optional<CompletionStage<?>> maybeAborts = userCohorts.abort();
         if (!maybeAborts.isPresent()) {
-            abortCallback.onSuccess(null);
+            abortCallback.onSuccess(Empty.value());
             return;
         }
 
-        maybeAborts.get().whenComplete((noop, failure) -> {
+        maybeAborts.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 abortCallback.onFailure(failure);
             } else {
-                abortCallback.onSuccess(null);
+                abortCallback.onSuccess(Empty.value());
             }
         });
     }
@@ -143,7 +144,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     @Override
     public void commit(final FutureCallback<UnsignedLong> newCallback) {
         checkState(State.PRE_COMMIT_COMPLETE);
-        this.callback = requireNonNull(newCallback);
+        callback = requireNonNull(newCallback);
         state = State.COMMIT_PENDING;
 
         if (nextFailure == null) {
@@ -155,20 +156,20 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     private <T> FutureCallback<T> switchState(final State newState) {
         @SuppressWarnings("unchecked")
-        final FutureCallback<T> ret = (FutureCallback<T>) this.callback;
-        this.callback = null;
+        final FutureCallback<T> ret = (FutureCallback<T>) callback;
+        callback = null;
         LOG.debug("Transaction {} changing state from {} to {}", transactionId, state, newState);
-        this.state = newState;
+        state = newState;
         return ret;
     }
 
     void setNewCandidate(final DataTreeCandidateTip dataTreeCandidate) {
         checkState(State.PRE_COMMIT_COMPLETE);
-        this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+        candidate = verifyNotNull(dataTreeCandidate);
     }
 
     void successfulCanCommit() {
-        switchState(State.CAN_COMMIT_COMPLETE).onSuccess(null);
+        switchState(State.CAN_COMMIT_COMPLETE).onSuccess(Empty.value());
     }
 
     void failedCanCommit(final Exception cause) {
@@ -182,16 +183,16 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
      * @param dataTreeCandidate {@link DataTreeCandidate} under consideration
      * @param futureCallback the callback to invoke on completion, which may be immediate or async.
      */
-    void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Void> futureCallback) {
+    void userPreCommit(final DataTreeCandidate dataTreeCandidate, final FutureCallback<Empty> futureCallback) {
         userCohorts.reset();
 
-        final Optional<CompletionStage<Void>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
+        final Optional<CompletionStage<Empty>> maybeCanCommitFuture = userCohorts.canCommit(dataTreeCandidate);
         if (!maybeCanCommitFuture.isPresent()) {
             doUserPreCommit(futureCallback);
             return;
         }
 
-        maybeCanCommitFuture.get().whenComplete((noop, failure) -> {
+        maybeCanCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 futureCallback.onFailure(failure);
             } else {
@@ -200,25 +201,25 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
         });
     }
 
-    private void doUserPreCommit(final FutureCallback<Void> futureCallback) {
-        final Optional<CompletionStage<Void>> maybePreCommitFuture = userCohorts.preCommit();
+    private void doUserPreCommit(final FutureCallback<Empty> futureCallback) {
+        final Optional<CompletionStage<Empty>> maybePreCommitFuture = userCohorts.preCommit();
         if (!maybePreCommitFuture.isPresent()) {
-            futureCallback.onSuccess(null);
+            futureCallback.onSuccess(Empty.value());
             return;
         }
 
-        maybePreCommitFuture.get().whenComplete((noop, failure) -> {
+        maybePreCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 futureCallback.onFailure(failure);
             } else {
-                futureCallback.onSuccess(null);
+                futureCallback.onSuccess(Empty.value());
             }
         });
     }
 
     void successfulPreCommit(final DataTreeCandidateTip dataTreeCandidate) {
         LOG.trace("Transaction {} prepared candidate {}", transaction, dataTreeCandidate);
-        this.candidate = Verify.verifyNotNull(dataTreeCandidate);
+        candidate = verifyNotNull(dataTreeCandidate);
         switchState(State.PRE_COMMIT_COMPLETE).onSuccess(dataTreeCandidate);
     }
 
@@ -234,13 +235,13 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     }
 
     void successfulCommit(final UnsignedLong journalIndex, final Runnable onComplete) {
-        final Optional<CompletionStage<Void>> maybeCommitFuture = userCohorts.commit();
+        final Optional<CompletionStage<Empty>> maybeCommitFuture = userCohorts.commit();
         if (!maybeCommitFuture.isPresent()) {
             finishSuccessfulCommit(journalIndex, onComplete);
             return;
         }
 
-        maybeCommitFuture.get().whenComplete((noop, failure) -> {
+        maybeCommitFuture.orElseThrow().whenComplete((noop, failure) -> {
             if (failure != null) {
                 LOG.error("User cohorts failed to commit", failure);
             }
@@ -272,7 +273,7 @@ final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
 
     void reportFailure(final Exception cause) {
         if (nextFailure == null) {
-            this.nextFailure = requireNonNull(cause);
+            nextFailure = requireNonNull(cause);
         } else {
             LOG.debug("Transaction {} already has a set failure, not updating it", transactionId, cause);
         }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SingleCommitCohortProxy.java
deleted file mode 100644 (file)
index 5e8a954..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Arrays;
-import java.util.List;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A cohort proxy implementation for a single-shard transaction commit. If the transaction was a direct commit
- * to the shard, this implementation elides the CanCommitTransaction and CommitTransaction messages to the
- * shard as an optimization.
- *
- * @author Thomas Pantelis
- */
-class SingleCommitCohortProxy extends AbstractThreePhaseCommitCohort<Object> {
-    private static final Logger LOG = LoggerFactory.getLogger(SingleCommitCohortProxy.class);
-
-    private final ActorUtils actorUtils;
-    private final Future<Object> cohortFuture;
-    private final TransactionIdentifier transactionId;
-    private volatile DOMStoreThreePhaseCommitCohort delegateCohort = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
-    private final OperationCallback.Reference operationCallbackRef;
-
-    SingleCommitCohortProxy(ActorUtils actorUtils, Future<Object> cohortFuture, TransactionIdentifier transactionId,
-            OperationCallback.Reference operationCallbackRef) {
-        this.actorUtils = actorUtils;
-        this.cohortFuture = cohortFuture;
-        this.transactionId = requireNonNull(transactionId);
-        this.operationCallbackRef = operationCallbackRef;
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        LOG.debug("Tx {} canCommit", transactionId);
-
-        final SettableFuture<Boolean> returnFuture = SettableFuture.create();
-
-        cohortFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(Throwable failure, Object cohortResponse) {
-                if (failure != null) {
-                    operationCallbackRef.get().failure();
-                    returnFuture.setException(failure);
-                    return;
-                }
-
-                operationCallbackRef.get().success();
-
-                LOG.debug("Tx {} successfully completed direct commit", transactionId);
-
-                // The Future was the result of a direct commit to the shard, essentially eliding the
-                // front-end 3PC coordination. We don't really care about the specific Future
-                // response object, only that it completed successfully. At this point the Tx is complete
-                // so return true. The subsequent preCommit and commit phases will be no-ops, ie return
-                // immediate success, to complete the 3PC for the front-end.
-                returnFuture.set(Boolean.TRUE);
-            }
-        }, actorUtils.getClientDispatcher());
-
-        return returnFuture;
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegateCohort.preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegateCohort.abort();
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return delegateCohort.commit();
-    }
-
-    @Override
-    List<Future<Object>> getCohortFutures() {
-        return Arrays.asList(cohortFuture);
-    }
-}
index 8beb44d50f89d3e6a7af4bec4d0b9afa838b5c3d..57c680da2a6f0d673b7ee1861eaecd05e6c07eba 100644 (file)
@@ -7,19 +7,20 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
 import com.google.common.primitives.UnsignedLong;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Optional;
 import java.util.SortedSet;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Standalone transaction specialization of {@link AbstractFrontendHistory}. There can be multiple open transactions
@@ -28,28 +29,28 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
  * @author Robert Varga
  */
 final class StandaloneFrontendHistory extends AbstractFrontendHistory {
-    private final LocalHistoryIdentifier identifier;
-    private final ShardDataTree tree;
+    private final @NonNull LocalHistoryIdentifier identifier;
+    private final @NonNull ShardDataTree tree;
 
     private StandaloneFrontendHistory(final String persistenceId, final ClientIdentifier clientId,
             final ShardDataTree tree, final Map<UnsignedLong, Boolean> closedTransactions,
-            final RangeSet<UnsignedLong> purgedTransactions) {
+            final MutableUnsignedLongSet purgedTransactions) {
         super(persistenceId, tree, closedTransactions, purgedTransactions);
-        this.identifier = new LocalHistoryIdentifier(clientId, 0);
-        this.tree = Preconditions.checkNotNull(tree);
+        identifier = new LocalHistoryIdentifier(clientId, 0);
+        this.tree = requireNonNull(tree);
     }
 
-    static StandaloneFrontendHistory create(final String persistenceId, final ClientIdentifier clientId,
+    static @NonNull StandaloneFrontendHistory create(final String persistenceId, final ClientIdentifier clientId,
             final ShardDataTree tree) {
         return new StandaloneFrontendHistory(persistenceId, clientId, tree, ImmutableMap.of(),
-            TreeRangeSet.create());
+            MutableUnsignedLongSet.of());
     }
 
-    static StandaloneFrontendHistory recreate(final String persistenceId, final ClientIdentifier clientId,
+    static @NonNull StandaloneFrontendHistory recreate(final String persistenceId, final ClientIdentifier clientId,
             final ShardDataTree tree, final Map<UnsignedLong, Boolean> closedTransactions,
-            final RangeSet<UnsignedLong> purgedTransactions) {
+            final MutableUnsignedLongSet purgedTransactions) {
         return new StandaloneFrontendHistory(persistenceId, clientId, tree, new HashMap<>(closedTransactions),
-            purgedTransactions);
+            purgedTransactions.mutableCopy());
     }
 
     @Override
@@ -59,12 +60,12 @@ final class StandaloneFrontendHistory extends AbstractFrontendHistory {
 
     @Override
     FrontendTransaction createOpenSnapshot(final TransactionIdentifier id) {
-        return FrontendReadOnlyTransaction.create(this, tree.newReadOnlyTransaction(id));
+        return FrontendReadOnlyTransaction.create(this, tree.newStandaloneReadOnlyTransaction(id));
     }
 
     @Override
     FrontendTransaction createOpenTransaction(final TransactionIdentifier id) {
-        return FrontendReadWriteTransaction.createOpen(this, tree.newReadWriteTransaction(id));
+        return FrontendReadWriteTransaction.createOpen(this, tree.newStandaloneReadWriteTransaction(id));
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java
deleted file mode 100644 (file)
index ad6cc3e..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import akka.dispatch.OnComplete;
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A chain of {@link TransactionProxy}s. It allows a single open transaction to be open
- * at a time. For remote transactions, it also tracks the outstanding readiness requests
- * towards the shard and unblocks operations only after all have completed.
- */
-final class TransactionChainProxy extends AbstractTransactionContextFactory<LocalTransactionChain>
-        implements DOMStoreTransactionChain {
-    private abstract static class State {
-        /**
-         * Check if it is okay to allocate a new transaction.
-         * @throws IllegalStateException if a transaction may not be allocated.
-         */
-        abstract void checkReady();
-
-        /**
-         * Return the future which needs to be waited for before shard information
-         * is returned (which unblocks remote transactions).
-         * @return Future to wait for, or null of no wait is necessary
-         */
-        abstract Future<?> previousFuture();
-    }
-
-    private abstract static class Pending extends State {
-        private final TransactionIdentifier transaction;
-        private final Future<?> previousFuture;
-
-        Pending(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            this.previousFuture = previousFuture;
-            this.transaction = Preconditions.checkNotNull(transaction);
-        }
-
-        @Override
-        final Future<?> previousFuture() {
-            return previousFuture;
-        }
-
-        final TransactionIdentifier getIdentifier() {
-            return transaction;
-        }
-    }
-
-    private static final class Allocated extends Pending {
-        Allocated(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            super(transaction, previousFuture);
-        }
-
-        @Override
-        void checkReady() {
-            throw new IllegalStateException(String.format("Previous transaction %s is not ready yet", getIdentifier()));
-        }
-    }
-
-    private static final class Submitted extends Pending {
-        Submitted(final TransactionIdentifier transaction, final Future<?> previousFuture) {
-            super(transaction, previousFuture);
-        }
-
-        @Override
-        void checkReady() {
-            // Okay to allocate
-        }
-    }
-
-    private abstract static class DefaultState extends State {
-        @Override
-        final Future<?> previousFuture() {
-            return null;
-        }
-    }
-
-    private static final State IDLE_STATE = new DefaultState() {
-        @Override
-        void checkReady() {
-            // Okay to allocate
-        }
-    };
-
-    private static final State CLOSED_STATE = new DefaultState() {
-        @Override
-        void checkReady() {
-            throw new DOMTransactionChainClosedException("Transaction chain has been closed");
-        }
-    };
-
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
-    private static final AtomicReferenceFieldUpdater<TransactionChainProxy, State> STATE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(TransactionChainProxy.class, State.class, "currentState");
-
-    private final TransactionContextFactory parent;
-    private volatile State currentState = IDLE_STATE;
-
-    /**
-     * This map holds Promise instances for each read-only tx. It is used to maintain ordering of tx creates
-     * wrt to read-only tx's between this class and a LocalTransactionChain since they're bridged by
-     * asynchronous futures. Otherwise, in the following scenario, eg:
-     * <p/>
-     *   1) Create write tx1 on chain
-     *   2) do write and submit
-     *   3) Create read-only tx2 on chain and issue read
-     *   4) Create write tx3 on chain, do write but do not submit
-     * <p/>
-     * if the sequence/timing is right, tx3 may create its local tx on the LocalTransactionChain before tx2,
-     * which results in tx2 failing b/c tx3 isn't ready yet. So maintaining ordering prevents this issue
-     * (see Bug 4774).
-     * <p/>
-     * A Promise is added via newReadOnlyTransaction. When the parent class completes the primary shard
-     * lookup and creates the TransactionContext (either success or failure), onTransactionContextCreated is
-     * called which completes the Promise. A write tx that is created prior to completion will wait on the
-     * Promise's Future via findPrimaryShard.
-     */
-    private final ConcurrentMap<TransactionIdentifier, Promise<Object>> priorReadOnlyTxPromises =
-            new ConcurrentHashMap<>();
-
-    TransactionChainProxy(final TransactionContextFactory parent, final LocalHistoryIdentifier historyId) {
-        super(parent.getActorUtils(), historyId);
-        this.parent = parent;
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        currentState.checkReady();
-        TransactionProxy transactionProxy = new TransactionProxy(this, TransactionType.READ_ONLY);
-        priorReadOnlyTxPromises.put(transactionProxy.getIdentifier(), Futures.<Object>promise());
-        return transactionProxy;
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return allocateWriteTransaction(TransactionType.READ_WRITE);
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        getActorUtils().acquireTxCreationPermit();
-        return allocateWriteTransaction(TransactionType.WRITE_ONLY);
-    }
-
-    @Override
-    public void close() {
-        currentState = CLOSED_STATE;
-
-        // Send a close transaction chain request to each and every shard
-
-        getActorUtils().broadcast(version -> new CloseTransactionChain(getHistoryId(), version).toSerializable(),
-                CloseTransactionChain.class);
-    }
-
-    private TransactionProxy allocateWriteTransaction(final TransactionType type) {
-        State localState = currentState;
-        localState.checkReady();
-
-        final TransactionProxy ret = new TransactionProxy(this, type);
-        currentState = new Allocated(ret.getIdentifier(), localState.previousFuture());
-        return ret;
-    }
-
-    @Override
-    protected LocalTransactionChain factoryForShard(final String shardName, final ActorSelection shardLeader,
-            final DataTree dataTree) {
-        final LocalTransactionChain ret = new LocalTransactionChain(this, shardLeader, dataTree);
-        LOG.debug("Allocated transaction chain {} for shard {} leader {}", ret, shardName, shardLeader);
-        return ret;
-    }
-
-    /**
-     * This method is overridden to ensure the previous Tx's ready operations complete
-     * before we initiate the next Tx in the chain to avoid creation failures if the
-     * previous Tx's ready operations haven't completed yet.
-     */
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    @Override
-    protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, final TransactionIdentifier txId) {
-        // Read current state atomically
-        final State localState = currentState;
-
-        // There are no outstanding futures, shortcut
-        Future<?> previous = localState.previousFuture();
-        if (previous == null) {
-            return combineFutureWithPossiblePriorReadOnlyTxFutures(parent.findPrimaryShard(shardName, txId), txId);
-        }
-
-        final String previousTransactionId;
-
-        if (localState instanceof Pending) {
-            previousTransactionId = ((Pending) localState).getIdentifier().toString();
-            LOG.debug("Tx: {} - waiting for ready futures with pending Tx {}", txId, previousTransactionId);
-        } else {
-            previousTransactionId = "";
-            LOG.debug("Waiting for ready futures on chain {}", getHistoryId());
-        }
-
-        previous = combineFutureWithPossiblePriorReadOnlyTxFutures(previous, txId);
-
-        // Add a callback for completion of the combined Futures.
-        final Promise<PrimaryShardInfo> returnPromise = Futures.promise();
-
-        final OnComplete onComplete = new OnComplete() {
-            @Override
-            public void onComplete(final Throwable failure, final Object notUsed) {
-                if (failure != null) {
-                    // A Ready Future failed so fail the returned Promise.
-                    LOG.error("Tx: {} - ready future failed for previous Tx {}", txId, previousTransactionId);
-                    returnPromise.failure(failure);
-                } else {
-                    LOG.debug("Tx: {} - previous Tx {} readied - proceeding to FindPrimaryShard",
-                            txId, previousTransactionId);
-
-                    // Send the FindPrimaryShard message and use the resulting Future to complete the
-                    // returned Promise.
-                    returnPromise.completeWith(parent.findPrimaryShard(shardName, txId));
-                }
-            }
-        };
-
-        previous.onComplete(onComplete, getActorUtils().getClientDispatcher());
-        return returnPromise.future();
-    }
-
-    private <T> Future<T> combineFutureWithPossiblePriorReadOnlyTxFutures(final Future<T> future,
-            final TransactionIdentifier txId) {
-        if (!priorReadOnlyTxPromises.containsKey(txId) && !priorReadOnlyTxPromises.isEmpty()) {
-            Collection<Entry<TransactionIdentifier, Promise<Object>>> priorReadOnlyTxPromiseEntries =
-                    new ArrayList<>(priorReadOnlyTxPromises.entrySet());
-            if (priorReadOnlyTxPromiseEntries.isEmpty()) {
-                return future;
-            }
-
-            List<Future<Object>> priorReadOnlyTxFutures = new ArrayList<>(priorReadOnlyTxPromiseEntries.size());
-            for (Entry<TransactionIdentifier, Promise<Object>> entry: priorReadOnlyTxPromiseEntries) {
-                LOG.debug("Tx: {} - waiting on future for prior read-only Tx {}", txId, entry.getKey());
-                priorReadOnlyTxFutures.add(entry.getValue().future());
-            }
-
-            Future<Iterable<Object>> combinedFutures = Futures.sequence(priorReadOnlyTxFutures,
-                    getActorUtils().getClientDispatcher());
-
-            final Promise<T> returnPromise = Futures.promise();
-            final OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
-                @Override
-                public void onComplete(final Throwable failure, final Iterable<Object> notUsed) {
-                    LOG.debug("Tx: {} - prior read-only Tx futures complete", txId);
-
-                    // Complete the returned Promise with the original Future.
-                    returnPromise.completeWith(future);
-                }
-            };
-
-            combinedFutures.onComplete(onComplete, getActorUtils().getClientDispatcher());
-            return returnPromise.future();
-        } else {
-            return future;
-        }
-    }
-
-    @Override
-    protected <T> void onTransactionReady(final TransactionIdentifier transaction,
-            final Collection<Future<T>> cohortFutures) {
-        final State localState = currentState;
-        Preconditions.checkState(localState instanceof Allocated, "Readying transaction %s while state is %s",
-                transaction, localState);
-        final TransactionIdentifier currentTx = ((Allocated)localState).getIdentifier();
-        Preconditions.checkState(transaction.equals(currentTx), "Readying transaction %s while %s is allocated",
-                transaction, currentTx);
-
-        // Transaction ready and we are not waiting for futures -- go to idle
-        if (cohortFutures.isEmpty()) {
-            currentState = IDLE_STATE;
-            return;
-        }
-
-        // Combine the ready Futures into 1
-        final Future<Iterable<T>> combined = Futures.sequence(cohortFutures, getActorUtils().getClientDispatcher());
-
-        // Record the we have outstanding futures
-        final State newState = new Submitted(transaction, combined);
-        currentState = newState;
-
-        // Attach a completion reset, but only if we do not allocate a transaction
-        // in-between
-        combined.onComplete(new OnComplete<Iterable<T>>() {
-            @Override
-            public void onComplete(final Throwable arg0, final Iterable<T> arg1) {
-                STATE_UPDATER.compareAndSet(TransactionChainProxy.this, newState, IDLE_STATE);
-            }
-        }, getActorUtils().getClientDispatcher());
-    }
-
-    @Override
-    protected void onTransactionContextCreated(@Nonnull TransactionIdentifier transactionId) {
-        Promise<Object> promise = priorReadOnlyTxPromises.remove(transactionId);
-        if (promise != null) {
-            promise.success(null);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java
deleted file mode 100644 (file)
index d9a53ab..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Optional;
-import java.util.SortedSet;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import scala.concurrent.Future;
-
-/*
- * FIXME: why do we need this interface? It should be possible to integrate it with
- *        AbstractTransactionContext, which is the only implementation anyway.
- */
-interface TransactionContext {
-    void closeTransaction();
-
-    Future<ActorSelection> readyTransaction(Boolean havePermit, Optional<SortedSet<String>> participatingShardNames);
-
-    void executeModification(AbstractModification modification, Boolean havePermit);
-
-    <T> void executeRead(AbstractRead<T> readCmd, SettableFuture<T> promise, Boolean havePermit);
-
-    Future<Object> directCommit(Boolean havePermit);
-
-    /**
-     * Invoked by {@link TransactionContextWrapper} when it has finished handing
-     * off operations to this context. From this point on, the context is responsible
-     * for throttling operations.
-     *
-     * <p>
-     * Implementations can rely on the wrapper calling this operation in a synchronized
-     * block, so they do not need to ensure visibility of this state transition themselves.
-     */
-    void operationHandOffComplete();
-
-    /**
-     * A TransactionContext that uses operation limiting should return true else false.
-     *
-     * @return true if operation limiting is used, false otherwise
-     */
-    boolean usesOperationLimiting();
-
-    short getTransactionVersion();
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextCleanup.java
deleted file mode 100644 (file)
index ef8cc49..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A PhantomReference that closes remote transactions for a TransactionContext when it's
- * garbage collected. This is used for read-only transactions as they're not explicitly closed
- * by clients. So the only way to detect that a transaction is no longer in use and it's safe
- * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
- * but TransactionProxy instances should generally be short-lived enough to avoid being moved
- * to the old generation space and thus should be cleaned up in a timely manner as the GC
- * runs on the young generation (eden, swap1...) space much more frequently.
- */
-final class TransactionContextCleanup extends FinalizablePhantomReference<TransactionProxy> {
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionContextCleanup.class);
-    /**
-     * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
-     * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
-     * trickery to clean up its internal thread when the bundle is unloaded.
-     */
-    private static final FinalizableReferenceQueue QUEUE = new FinalizableReferenceQueue();
-
-    /**
-     * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
-     * necessary because PhantomReferences need a hard reference so they're not garbage collected.
-     * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
-     * and thus becomes eligible for garbage collection.
-     */
-    private static final Map<TransactionContext, TransactionContextCleanup> CACHE = new ConcurrentHashMap<>();
-
-    private final TransactionContext cleanup;
-
-    private TransactionContextCleanup(final TransactionProxy referent, final TransactionContext cleanup) {
-        super(referent, QUEUE);
-        this.cleanup = cleanup;
-    }
-
-    static void track(final TransactionProxy referent, final TransactionContext cleanup) {
-        final TransactionContextCleanup ret = new TransactionContextCleanup(referent, cleanup);
-        CACHE.put(cleanup, ret);
-    }
-
-    @Override
-    public void finalizeReferent() {
-        LOG.trace("Cleaning up {} Tx actors", cleanup);
-
-        if (CACHE.remove(cleanup) != null) {
-            cleanup.closeTransaction();
-        }
-    }
-
-    static void untrack(final TransactionContext cleanup) {
-        CACHE.remove(cleanup);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextFactory.java
deleted file mode 100644 (file)
index 8655c68..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import scala.concurrent.Future;
-
-/**
- * An {@link AbstractTransactionContextFactory} which produces TransactionContext instances for single
- * transactions (ie not chained).
- */
-final class TransactionContextFactory extends AbstractTransactionContextFactory<LocalTransactionFactoryImpl> {
-    private final AtomicLong nextHistory = new AtomicLong(1);
-
-    TransactionContextFactory(final ActorUtils actorUtils, final ClientIdentifier clientId) {
-        super(actorUtils, new LocalHistoryIdentifier(clientId, 0));
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected LocalTransactionFactoryImpl factoryForShard(final String shardName, final ActorSelection shardLeader,
-            final DataTree dataTree) {
-        return new LocalTransactionFactoryImpl(getActorUtils(), shardLeader, dataTree);
-    }
-
-    @Override
-    protected Future<PrimaryShardInfo> findPrimaryShard(final String shardName, TransactionIdentifier txId) {
-        return getActorUtils().findPrimaryShardAsync(shardName);
-    }
-
-    @Override
-    protected <T> void onTransactionReady(final TransactionIdentifier transaction,
-            final Collection<Future<T>> cohortFutures) {
-        // Transactions are disconnected, this is a no-op
-    }
-
-    DOMStoreTransactionChain createTransactionChain() {
-        return new TransactionChainProxy(this, new LocalHistoryIdentifier(getHistoryId().getClientId(),
-                nextHistory.getAndIncrement()));
-    }
-
-    @Override
-    protected void onTransactionContextCreated(final TransactionIdentifier transactionId) {
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapper.java
deleted file mode 100644 (file)
index 60628b0..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Futures;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A helper class that wraps an eventual TransactionContext instance. Operations destined for the target
- * TransactionContext instance are cached until the TransactionContext instance becomes available at which
- * time they are executed.
- *
- * @author Thomas Pantelis
- */
-class TransactionContextWrapper {
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionContextWrapper.class);
-
-    /**
-     * The list of transaction operations to execute once the TransactionContext becomes available.
-     */
-    @GuardedBy("queuedTxOperations")
-    private final List<Entry<TransactionOperation, Boolean>> queuedTxOperations = new ArrayList<>();
-    private final TransactionIdentifier identifier;
-    private final OperationLimiter limiter;
-    private final String shardName;
-
-    /**
-     * The resulting TransactionContext.
-     */
-    private volatile TransactionContext transactionContext;
-    @GuardedBy("queuedTxOperations")
-    private TransactionContext deferredTransactionContext;
-    @GuardedBy("queuedTxOperations")
-    private boolean pendingEnqueue;
-
-    TransactionContextWrapper(final TransactionIdentifier identifier, final ActorUtils actorUtils,
-            final String shardName) {
-        this.identifier = requireNonNull(identifier);
-        this.limiter = new OperationLimiter(identifier,
-                // 1 extra permit for the ready operation
-                actorUtils.getDatastoreContext().getShardBatchedModificationCount() + 1,
-                TimeUnit.MILLISECONDS.toSeconds(actorUtils.getDatastoreContext().getOperationTimeoutInMillis()));
-        this.shardName = requireNonNull(shardName);
-    }
-
-    TransactionContext getTransactionContext() {
-        return transactionContext;
-    }
-
-    TransactionIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    /**
-     * Adds a TransactionOperation to be executed once the TransactionContext becomes available. This method is called
-     * only after the caller has checked (without synchronizing with executePriorTransactionOperations()) that the
-     * context is not available.
-     */
-    private void enqueueTransactionOperation(final TransactionOperation operation) {
-        // We have three things to do here:
-        // - synchronize with executePriorTransactionOperations() so that logical operation ordering is maintained
-        // - acquire a permit for the operation if we still need to enqueue it
-        // - enqueue the operation
-        //
-        // Since each operation needs to acquire a permit exactly once and the limiter is shared between us and the
-        // TransactionContext, we need to know whether an operation has a permit before we enqueue it. Further
-        // complications are:
-        // - this method may be called from the thread invoking executePriorTransactionOperations()
-        // - user may be violating API contract of using the transaction from a single thread
-
-        // As a first step, we will synchronize on the queue and check if the handoff has completed. While we have
-        // the lock, we will assert that we will be enqueing another operation.
-        final TransactionContext contextOnEntry;
-        synchronized (queuedTxOperations) {
-            contextOnEntry = transactionContext;
-            if (contextOnEntry == null) {
-                checkState(pendingEnqueue == false, "Concurrent access to transaction %s detected", identifier);
-                pendingEnqueue = true;
-            }
-        }
-
-        // Short-circuit if there is a context
-        if (contextOnEntry != null) {
-            operation.invoke(transactionContext, null);
-            return;
-        }
-
-        boolean cleanupEnqueue = true;
-        TransactionContext finishHandoff = null;
-        try {
-            // Acquire the permit,
-            final boolean havePermit = limiter.acquire();
-            if (!havePermit) {
-                LOG.warn("Failed to acquire enqueue operation permit for transaction {} on shard {}", identifier,
-                    shardName);
-            }
-
-            // Ready to enqueue, take the lock again and append the operation
-            synchronized (queuedTxOperations) {
-                LOG.debug("Tx {} Queuing TransactionOperation", identifier);
-                queuedTxOperations.add(new SimpleImmutableEntry<>(operation, havePermit));
-                pendingEnqueue = false;
-                cleanupEnqueue = false;
-                finishHandoff = deferredTransactionContext;
-                deferredTransactionContext = null;
-            }
-        } finally {
-            if (cleanupEnqueue) {
-                synchronized (queuedTxOperations) {
-                    pendingEnqueue = false;
-                    finishHandoff = deferredTransactionContext;
-                    deferredTransactionContext = null;
-                }
-            }
-            if (finishHandoff != null) {
-                executePriorTransactionOperations(finishHandoff);
-            }
-        }
-    }
-
-    void maybeExecuteTransactionOperation(final TransactionOperation op) {
-        final TransactionContext localContext = transactionContext;
-        if (localContext != null) {
-            op.invoke(localContext, null);
-        } else {
-            // The shard Tx hasn't been created yet so add the Tx operation to the Tx Future
-            // callback to be executed after the Tx is created.
-            enqueueTransactionOperation(op);
-        }
-    }
-
-    void executePriorTransactionOperations(final TransactionContext localTransactionContext) {
-        while (true) {
-            // Access to queuedTxOperations and transactionContext must be protected and atomic
-            // (ie synchronized) with respect to #addTxOperationOnComplete to handle timing
-            // issues and ensure no TransactionOperation is missed and that they are processed
-            // in the order they occurred.
-
-            // We'll make a local copy of the queuedTxOperations list to handle re-entrancy
-            // in case a TransactionOperation results in another transaction operation being
-            // queued (eg a put operation from a client read Future callback that is notified
-            // synchronously).
-            final Collection<Entry<TransactionOperation, Boolean>> operationsBatch;
-            synchronized (queuedTxOperations) {
-                if (queuedTxOperations.isEmpty()) {
-                    if (!pendingEnqueue) {
-                        // We're done invoking the TransactionOperations so we can now publish the TransactionContext.
-                        localTransactionContext.operationHandOffComplete();
-
-                        // This is null-to-non-null transition after which we are releasing the lock and not doing
-                        // any further processing.
-                        transactionContext = localTransactionContext;
-                    } else {
-                        deferredTransactionContext = localTransactionContext;
-                    }
-                    return;
-                }
-
-                operationsBatch = new ArrayList<>(queuedTxOperations);
-                queuedTxOperations.clear();
-            }
-
-            // Invoke TransactionOperations outside the sync block to avoid unnecessary blocking. A slight down-side is
-            // that we need to re-acquire the lock below but this should be negligible.
-            for (Entry<TransactionOperation, Boolean> oper : operationsBatch) {
-                final Boolean permit = oper.getValue();
-                if (permit.booleanValue() && !localTransactionContext.usesOperationLimiting()) {
-                    // If the context is not using limiting we need to release operations as we are queueing them, so
-                    // user threads are not charged for them.
-                    limiter.release();
-                }
-                oper.getKey().invoke(localTransactionContext, permit);
-            }
-        }
-    }
-
-    Future<ActorSelection> readyTransaction(Optional<SortedSet<String>> participatingShardNames) {
-        // avoid the creation of a promise and a TransactionOperation
-        final TransactionContext localContext = transactionContext;
-        if (localContext != null) {
-            return localContext.readyTransaction(null, participatingShardNames);
-        }
-
-        final Promise<ActorSelection> promise = Futures.promise();
-        enqueueTransactionOperation(new TransactionOperation() {
-            @Override
-            public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
-                promise.completeWith(newTransactionContext.readyTransaction(havePermit, participatingShardNames));
-            }
-        });
-
-        return promise.future();
-    }
-
-    OperationLimiter getLimiter() {
-        return limiter;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java
deleted file mode 100644 (file)
index 962d261..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Abstract superclass for transaction operations which should be executed
- * on a {@link TransactionContext} at a later point in time.
- */
-abstract class TransactionOperation {
-    /**
-     * Execute the delayed operation.
-     *
-     * @param transactionContext the TransactionContext
-     * @param havePermit Boolean indicator if this operation has tried and acquired a permit, null if there was no
-     *                   attempt to acquire a permit.
-     */
-    protected abstract void invoke(TransactionContext transactionContext, @Nullable Boolean havePermit);
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
deleted file mode 100644 (file)
index 83863de..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSelection;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
-import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-
-/**
- * A transaction potentially spanning multiple backend shards.
- */
-public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier>
-        implements DOMStoreReadWriteTransaction {
-    private enum TransactionState {
-        OPEN,
-        READY,
-        CLOSED,
-    }
-
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
-
-    private final Map<String, TransactionContextWrapper> txContextWrappers = new TreeMap<>();
-    private final AbstractTransactionContextFactory<?> txContextFactory;
-    private final TransactionType type;
-    private TransactionState state = TransactionState.OPEN;
-
-    @VisibleForTesting
-    public TransactionProxy(final AbstractTransactionContextFactory<?> txContextFactory, final TransactionType type) {
-        super(txContextFactory.nextIdentifier(), txContextFactory.getActorUtils().getDatastoreContext()
-                .isTransactionDebugContextEnabled());
-        this.txContextFactory = txContextFactory;
-        this.type = Preconditions.checkNotNull(type);
-
-        LOG.debug("New {} Tx - {}", type, getIdentifier());
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
-        return executeRead(shardNameFromIdentifier(path), new DataExists(path, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    private <T> FluentFuture<T> executeRead(final String shardName, final AbstractRead<T> readCmd) {
-        Preconditions.checkState(type != TransactionType.WRITE_ONLY,
-                "Reads from write-only transactions are not allowed");
-
-        LOG.trace("Tx {} {} {}", getIdentifier(), readCmd.getClass().getSimpleName(), readCmd.getPath());
-
-        final SettableFuture<T> proxyFuture = SettableFuture.create();
-        TransactionContextWrapper contextWrapper = getContextWrapper(shardName);
-        contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-            @Override
-            public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-                transactionContext.executeRead(readCmd, proxyFuture, havePermit);
-            }
-        });
-
-        return FluentFuture.from(proxyFuture);
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
-        Preconditions.checkState(type != TransactionType.WRITE_ONLY,
-                "Reads from write-only transactions are not allowed");
-        Preconditions.checkNotNull(path, "path should not be null");
-
-        LOG.trace("Tx {} read {}", getIdentifier(), path);
-        return path.isEmpty() ? readAllData() :  singleShardRead(shardNameFromIdentifier(path), path);
-    }
-
-    private FluentFuture<Optional<NormalizedNode<?, ?>>> singleShardRead(
-            final String shardName, final YangInstanceIdentifier path) {
-        return executeRead(shardName, new ReadData(path, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    private FluentFuture<Optional<NormalizedNode<?, ?>>> readAllData() {
-        final Set<String> allShardNames = txContextFactory.getActorUtils().getConfiguration().getAllShardNames();
-        final Collection<FluentFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
-
-        for (String shardName : allShardNames) {
-            futures.add(singleShardRead(shardName, YangInstanceIdentifier.EMPTY));
-        }
-
-        final ListenableFuture<List<Optional<NormalizedNode<?, ?>>>> listFuture = Futures.allAsList(futures);
-        final ListenableFuture<Optional<NormalizedNode<?, ?>>> aggregateFuture;
-
-        aggregateFuture = Futures.transform(listFuture,
-            (Function<List<Optional<NormalizedNode<?, ?>>>, Optional<NormalizedNode<?, ?>>>) input -> {
-                try {
-                    return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.EMPTY, input,
-                            txContextFactory.getActorUtils().getSchemaContext(),
-                            txContextFactory.getActorUtils().getDatastoreContext().getLogicalStoreType());
-                } catch (DataValidationFailedException e) {
-                    throw new IllegalArgumentException("Failed to aggregate", e);
-                }
-            }, MoreExecutors.directExecutor());
-
-        return FluentFuture.from(aggregateFuture);
-    }
-
-    @Override
-    public void delete(final YangInstanceIdentifier path) {
-        executeModification(new DeleteModification(path));
-    }
-
-    @Override
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        executeModification(new MergeModification(path, data));
-    }
-
-    @Override
-    public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        executeModification(new WriteModification(path, data));
-    }
-
-    private void executeModification(final AbstractModification modification) {
-        checkModificationState();
-
-        LOG.trace("Tx {} executeModification {} {}", getIdentifier(), modification.getClass().getSimpleName(),
-                modification.getPath());
-
-        TransactionContextWrapper contextWrapper = getContextWrapper(modification.getPath());
-        contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-            @Override
-            protected void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-                transactionContext.executeModification(modification, havePermit);
-            }
-        });
-    }
-
-    private void checkModificationState() {
-        Preconditions.checkState(type != TransactionType.READ_ONLY,
-                "Modification operation on read-only transaction is not allowed");
-        Preconditions.checkState(state == TransactionState.OPEN,
-                "Transaction is sealed - further modifications are not allowed");
-    }
-
-    private boolean seal(final TransactionState newState) {
-        if (state == TransactionState.OPEN) {
-            state = newState;
-            return true;
-        } else {
-            return false;
-        }
-    }
-
-    @Override
-    public final void close() {
-        if (!seal(TransactionState.CLOSED)) {
-            Preconditions.checkState(state == TransactionState.CLOSED, "Transaction %s is ready, it cannot be closed",
-                getIdentifier());
-            // Idempotent no-op as per AutoCloseable recommendation
-            return;
-        }
-
-        for (TransactionContextWrapper contextWrapper : txContextWrappers.values()) {
-            contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-                @Override
-                public void invoke(final TransactionContext transactionContext, final Boolean havePermit) {
-                    transactionContext.closeTransaction();
-                }
-            });
-        }
-
-
-        txContextWrappers.clear();
-    }
-
-    @Override
-    public final AbstractThreePhaseCommitCohort<?> ready() {
-        Preconditions.checkState(type != TransactionType.READ_ONLY, "Read-only transactions cannot be readied");
-
-        final boolean success = seal(TransactionState.READY);
-        Preconditions.checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
-
-        LOG.debug("Tx {} Readying {} components for commit", getIdentifier(), txContextWrappers.size());
-
-        final AbstractThreePhaseCommitCohort<?> ret;
-        switch (txContextWrappers.size()) {
-            case 0:
-                ret = NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
-                break;
-            case 1:
-                final Entry<String, TransactionContextWrapper> e = Iterables.getOnlyElement(
-                        txContextWrappers.entrySet());
-                ret = createSingleCommitCohort(e.getKey(), e.getValue());
-                break;
-            default:
-                ret = createMultiCommitCohort();
-        }
-
-        txContextFactory.onTransactionReady(getIdentifier(), ret.getCohortFutures());
-
-        final Throwable debugContext = getDebugContext();
-        return debugContext == null ? ret : new DebugThreePhaseCommitCohort(getIdentifier(), ret, debugContext);
-    }
-
-    @SuppressWarnings({ "rawtypes", "unchecked" })
-    private AbstractThreePhaseCommitCohort<?> createSingleCommitCohort(final String shardName,
-            final TransactionContextWrapper contextWrapper) {
-
-        LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), shardName);
-
-        final OperationCallback.Reference operationCallbackRef =
-                new OperationCallback.Reference(OperationCallback.NO_OP_CALLBACK);
-
-        final TransactionContext transactionContext = contextWrapper.getTransactionContext();
-        final Future future;
-        if (transactionContext == null) {
-            final Promise promise = akka.dispatch.Futures.promise();
-            contextWrapper.maybeExecuteTransactionOperation(new TransactionOperation() {
-                @Override
-                public void invoke(final TransactionContext newTransactionContext, final Boolean havePermit) {
-                    promise.completeWith(getDirectCommitFuture(newTransactionContext, operationCallbackRef,
-                        havePermit));
-                }
-            });
-            future = promise.future();
-        } else {
-            // avoid the creation of a promise and a TransactionOperation
-            future = getDirectCommitFuture(transactionContext, operationCallbackRef, null);
-        }
-
-        return new SingleCommitCohortProxy(txContextFactory.getActorUtils(), future, getIdentifier(),
-            operationCallbackRef);
-    }
-
-    private Future<?> getDirectCommitFuture(final TransactionContext transactionContext,
-            final OperationCallback.Reference operationCallbackRef, final Boolean havePermit) {
-        TransactionRateLimitingCallback rateLimitingCallback = new TransactionRateLimitingCallback(
-                txContextFactory.getActorUtils());
-        operationCallbackRef.set(rateLimitingCallback);
-        rateLimitingCallback.run();
-        return transactionContext.directCommit(havePermit);
-    }
-
-    private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
-
-        final List<ThreePhaseCommitCohortProxy.CohortInfo> cohorts = new ArrayList<>(txContextWrappers.size());
-        final Optional<SortedSet<String>> shardNames = Optional.of(new TreeSet<>(txContextWrappers.keySet()));
-        for (Entry<String, TransactionContextWrapper> e : txContextWrappers.entrySet()) {
-            LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey());
-
-            final TransactionContextWrapper wrapper = e.getValue();
-
-            // The remote tx version is obtained the via TransactionContext which may not be available yet so
-            // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the
-            // TransactionContext is available.
-            Supplier<Short> txVersionSupplier = () -> wrapper.getTransactionContext().getTransactionVersion();
-
-            cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(shardNames),
-                    txVersionSupplier));
-        }
-
-        return new ThreePhaseCommitCohortProxy(txContextFactory.getActorUtils(), cohorts, getIdentifier());
-    }
-
-    private String shardNameFromIdentifier(final YangInstanceIdentifier path) {
-        return txContextFactory.getActorUtils().getShardStrategyFactory().getStrategy(path).findShard(path);
-    }
-
-    private TransactionContextWrapper getContextWrapper(final YangInstanceIdentifier path) {
-        return getContextWrapper(shardNameFromIdentifier(path));
-    }
-
-    private TransactionContextWrapper getContextWrapper(final String shardName) {
-        final TransactionContextWrapper existing = txContextWrappers.get(shardName);
-        if (existing != null) {
-            return existing;
-        }
-
-        final TransactionContextWrapper fresh = txContextFactory.newTransactionContextWrapper(this, shardName);
-        txContextWrappers.put(shardName, fresh);
-        return fresh;
-    }
-
-    TransactionType getType() {
-        return type;
-    }
-
-    boolean isReady() {
-        return state != TransactionState.OPEN;
-    }
-
-    ActorUtils getActorUtils() {
-        return txContextFactory.getActorUtils();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionReadyReplyMapper.java
deleted file mode 100644 (file)
index f5eb0e4..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorSelection;
-import akka.dispatch.Mapper;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Future;
-
-/**
- * A {@link Mapper} extracting the {@link ActorSelection} pointing to the actor which
- * is backing a particular transaction.
- *
- * <p>
- * This class is not for general consumption. It is public only to support the pre-lithium compatibility
- * package.
- * TODO: once we remove compatibility, make this class package-private and final.
- */
-public class TransactionReadyReplyMapper extends Mapper<Object, ActorSelection> {
-    protected static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER = new Mapper<Throwable, Throwable>() {
-        @Override
-        public Throwable apply(final Throwable failure) {
-            return failure;
-        }
-    };
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionReadyReplyMapper.class);
-    private final TransactionIdentifier identifier;
-    private final ActorUtils actorUtils;
-
-    protected TransactionReadyReplyMapper(final ActorUtils actorUtils, final TransactionIdentifier identifier) {
-        this.actorUtils = requireNonNull(actorUtils);
-        this.identifier = requireNonNull(identifier);
-    }
-
-    protected final ActorUtils getActorUtils() {
-        return actorUtils;
-    }
-
-    protected String extractCohortPathFrom(final ReadyTransactionReply readyTxReply) {
-        return readyTxReply.getCohortPath();
-    }
-
-    @Override
-    public final ActorSelection checkedApply(final Object serializedReadyReply) {
-        LOG.debug("Tx {} readyTransaction", identifier);
-
-        // At this point the ready operation succeeded and we need to extract the cohort
-        // actor path from the reply.
-        if (ReadyTransactionReply.isSerializedType(serializedReadyReply)) {
-            ReadyTransactionReply readyTxReply = ReadyTransactionReply.fromSerializable(serializedReadyReply);
-            return actorUtils.actorSelection(extractCohortPathFrom(readyTxReply));
-        }
-
-        // Throwing an exception here will fail the Future.
-        throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
-                identifier, serializedReadyReply.getClass()));
-    }
-
-    static Future<ActorSelection> transform(final Future<Object> readyReplyFuture, final ActorUtils actorUtils,
-            final TransactionIdentifier identifier) {
-        return readyReplyFuture.transform(new TransactionReadyReplyMapper(actorUtils, identifier),
-            SAME_FAILURE_TRANSFORMER, actorUtils.getClientDispatcher());
-    }
-}
index 649dae528949397b73b1cb8367156e0057f85ba0..19ca628d262666d6e56a63ee37850e4ca137dbf8 100644 (file)
@@ -12,14 +12,12 @@ public enum TransactionType {
     WRITE_ONLY,
     READ_WRITE;
 
-    // Cache all values
-    private static final TransactionType[] VALUES = values();
-
     public static TransactionType fromInt(final int type) {
-        try {
-            return VALUES[type];
-        } catch (IndexOutOfBoundsException e) {
-            throw new IllegalArgumentException("In TransactionType enum value " + type, e);
-        }
+        return switch (type) {
+            case 0 -> READ_ONLY;
+            case 1 -> WRITE_ONLY;
+            case 2 -> READ_WRITE;
+            default -> throw new IllegalArgumentException("In TransactionType enum value " + type);
+        };
     }
 }
\ No newline at end of file
index 38c7620b63dd7dd4d86dcd83f599d6cf86236484..2fdf3a9d622a927a75272c9f123650cf74689059 100644 (file)
@@ -7,17 +7,19 @@
  */
 package org.opendaylight.controller.cluster.datastore.actors;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Cancellable;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import java.util.concurrent.TimeUnit;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -25,24 +27,23 @@ import scala.concurrent.duration.FiniteDuration;
  * asked to do so via {@link CloseDataTreeNotificationListenerRegistration}.
  */
 public final class DataTreeNotificationListenerRegistrationActor extends AbstractUntypedActor {
+    // FIXME: rework this constant to a duration and its injection
     @VisibleForTesting
     static long killDelay = TimeUnit.MILLISECONDS.convert(5, TimeUnit.SECONDS);
 
-    private ListenerRegistration<?> registration;
-    private Runnable onClose;
+    private SetRegistration registration = null;
+    private Cancellable killSchedule = null;
     private boolean closed;
-    private Cancellable killSchedule;
 
     @Override
-    protected void handleReceive(Object message) {
+    protected void handleReceive(final Object message) {
         if (message instanceof CloseDataTreeNotificationListenerRegistration) {
             closeListenerRegistration();
             if (isValidSender(getSender())) {
                 getSender().tell(CloseDataTreeNotificationListenerRegistrationReply.getInstance(), getSelf());
             }
-        } else if (message instanceof SetRegistration) {
-            registration = ((SetRegistration)message).registration;
-            onClose = ((SetRegistration)message).onClose;
+        } else if (message instanceof SetRegistration setRegistration) {
+            registration = setRegistration;
             if (closed) {
                 closeListenerRegistration();
             }
@@ -53,10 +54,12 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
 
     private void closeListenerRegistration() {
         closed = true;
-        if (registration != null) {
-            registration.close();
-            onClose.run();
+
+        final var reg = registration;
+        if (reg != null) {
             registration = null;
+            reg.registration.close();
+            reg.onClose.run();
 
             if (killSchedule == null) {
                 killSchedule = getContext().system().scheduler().scheduleOnce(FiniteDuration.create(killDelay,
@@ -70,13 +73,11 @@ public final class DataTreeNotificationListenerRegistrationActor extends Abstrac
         return Props.create(DataTreeNotificationListenerRegistrationActor.class);
     }
 
-    public static class SetRegistration {
-        private final ListenerRegistration<?> registration;
-        private final Runnable onClose;
-
-        public SetRegistration(final ListenerRegistration<?> registration, final Runnable onClose) {
-            this.registration = Preconditions.checkNotNull(registration);
-            this.onClose = Preconditions.checkNotNull(onClose);
+    @NonNullByDefault
+    public record SetRegistration(Registration registration, Runnable onClose) {
+        public SetRegistration {
+            requireNonNull(registration);
+            requireNonNull(onClose);
         }
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/actors/JsonExportActor.java
new file mode 100644 (file)
index 0000000..5eabe94
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.actors;
+
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Props;
+import com.google.gson.stream.JsonWriter;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+import org.opendaylight.yangtools.yang.model.util.SchemaInferenceStack;
+
+public final class JsonExportActor extends AbstractUntypedActor {
+    // Internal messages
+    public static final class ExportSnapshot {
+        private final String id;
+
+        private final DataTreeCandidate dataTreeCandidate;
+
+        public ExportSnapshot(final DataTreeCandidate candidate, final String id) {
+            dataTreeCandidate = requireNonNull(candidate);
+            this.id = requireNonNull(id);
+        }
+    }
+
+    public static final class ExportJournal {
+        private final ReplicatedLogEntry replicatedLogEntry;
+
+        public ExportJournal(final ReplicatedLogEntry replicatedLogEntry) {
+            this.replicatedLogEntry = requireNonNull(replicatedLogEntry);
+        }
+    }
+
+    public static final class FinishExport {
+        private final String id;
+
+        public FinishExport(final String id) {
+            this.id = requireNonNull(id);
+        }
+    }
+
+    private final List<ReplicatedLogEntry> entries = new ArrayList<>();
+    private final @NonNull EffectiveModelContext schemaContext;
+    private final @NonNull Path baseDirPath;
+
+    private JsonExportActor(final EffectiveModelContext schemaContext, final Path dirPath) {
+        this.schemaContext = requireNonNull(schemaContext);
+        baseDirPath = requireNonNull(dirPath);
+    }
+
+    public static Props props(final EffectiveModelContext schemaContext, final String dirPath) {
+        return Props.create(JsonExportActor.class, schemaContext, Paths.get(dirPath));
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        if (message instanceof ExportSnapshot) {
+            onExportSnapshot((ExportSnapshot) message);
+        } else if (message instanceof ExportJournal) {
+            onExportJournal((ExportJournal) message);
+        } else if (message instanceof FinishExport) {
+            onFinishExport((FinishExport)message);
+        } else {
+            unknownMessage(message);
+        }
+    }
+
+    private void onExportSnapshot(final ExportSnapshot exportSnapshot) {
+        final Path snapshotDir = baseDirPath.resolve("snapshots");
+        createDir(snapshotDir);
+
+        final Path filePath = snapshotDir.resolve(exportSnapshot.id + "-snapshot.json");
+        LOG.debug("Creating JSON file : {}", filePath);
+
+        final NormalizedNode root = exportSnapshot.dataTreeCandidate.getRootNode().getDataAfter();
+        checkState(root instanceof NormalizedNodeContainer, "Unexpected root %s", root);
+
+        writeSnapshot(filePath, (NormalizedNodeContainer<?>) root);
+        LOG.debug("Created JSON file: {}", filePath);
+    }
+
+    private void onExportJournal(final ExportJournal exportJournal) {
+        entries.add(exportJournal.replicatedLogEntry);
+    }
+
+    private void onFinishExport(final FinishExport finishExport) {
+        final Path journalDir = baseDirPath.resolve("journals");
+        createDir(journalDir);
+
+        final Path filePath = journalDir.resolve(finishExport.id + "-journal.json");
+        LOG.debug("Creating JSON file : {}", filePath);
+        writeJournal(filePath);
+        LOG.debug("Created JSON file: {}", filePath);
+    }
+
+    private void writeSnapshot(final Path path, final NormalizedNodeContainer<?> root) {
+        try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
+            jsonWriter.beginObject();
+
+            try (var nnWriter = NormalizedNodeWriter.forStreamWriter(JSONNormalizedNodeStreamWriter.createNestedWriter(
+                    JSONCodecFactorySupplier.RFC7951.getShared(schemaContext),
+                    SchemaInferenceStack.of(schemaContext).toInference(), null, jsonWriter),
+                true)) {
+                for (NormalizedNode node : root.body()) {
+                    nnWriter.write(node);
+                }
+            }
+
+            jsonWriter.endObject();
+        } catch (IOException e) {
+            LOG.error("Failed to export stapshot to {}", path, e);
+        }
+    }
+
+    private void writeJournal(final Path path) {
+        try (JsonWriter jsonWriter = new JsonWriter(Files.newBufferedWriter(path))) {
+            jsonWriter.beginObject().name("Entries");
+            jsonWriter.beginArray();
+            for (var entry : entries) {
+                final var data = entry.getData();
+                if (data instanceof CommitTransactionPayload payload) {
+                    final var candidate = payload.getCandidate().candidate();
+                    writeNode(jsonWriter, candidate);
+                } else {
+                    jsonWriter.beginObject().name("Payload").value(data.toString()).endObject();
+                }
+            }
+            jsonWriter.endArray();
+            jsonWriter.endObject();
+        } catch (IOException e) {
+            LOG.error("Failed to export journal to {}", path, e);
+        }
+    }
+
+    private static void writeNode(final JsonWriter writer, final DataTreeCandidate candidate) throws IOException {
+        writer.beginObject().name("Entry").beginArray();
+        doWriteNode(writer, candidate.getRootPath(), candidate.getRootNode());
+        writer.endArray().endObject();
+    }
+
+    private static void doWriteNode(final JsonWriter writer, final YangInstanceIdentifier path,
+            final DataTreeCandidateNode node) throws IOException {
+        switch (node.modificationType()) {
+            case APPEARED:
+            case DISAPPEARED:
+            case SUBTREE_MODIFIED:
+                NodeIterator iterator = new NodeIterator(null, path, node.childNodes().iterator());
+                do {
+                    iterator = iterator.next(writer);
+                } while (iterator != null);
+                break;
+            case DELETE:
+            case UNMODIFIED:
+            case WRITE:
+                outputNodeInfo(writer, path, node);
+                break;
+            default:
+                outputDefault(writer, path, node);
+        }
+    }
+
+    private static void outputNodeInfo(final JsonWriter writer, final YangInstanceIdentifier path,
+                                       final DataTreeCandidateNode node) throws IOException {
+        final ModificationType modificationType = node.modificationType();
+
+        writer.beginObject().name("Node");
+        writer.beginArray();
+        writer.beginObject().name("Path").value(path.toString()).endObject();
+        writer.beginObject().name("ModificationType").value(modificationType.toString()).endObject();
+        if (modificationType == ModificationType.WRITE) {
+            writer.beginObject().name("Data").value(node.getDataAfter().body().toString()).endObject();
+        }
+        writer.endArray();
+        writer.endObject();
+    }
+
+    private static void outputDefault(final JsonWriter writer, final YangInstanceIdentifier path,
+                                      final DataTreeCandidateNode node) throws IOException {
+        writer.beginObject().name("Node");
+        writer.beginArray();
+        writer.beginObject().name("Path").value(path.toString()).endObject();
+        writer.beginObject().name("ModificationType")
+                .value("UNSUPPORTED MODIFICATION: " + node.modificationType()).endObject();
+        writer.endArray();
+        writer.endObject();
+    }
+
+    private void createDir(final Path path) {
+        try {
+            Files.createDirectories(path);
+        } catch (IOException e) {
+            LOG.warn("Directory {} cannot be created", path, e);
+        }
+    }
+
+    private static final class NodeIterator {
+        private final Iterator<DataTreeCandidateNode> iterator;
+        private final YangInstanceIdentifier path;
+        private final NodeIterator parent;
+
+        NodeIterator(final @Nullable NodeIterator parent, final YangInstanceIdentifier path,
+                     final Iterator<DataTreeCandidateNode> iterator) {
+            this.iterator = requireNonNull(iterator);
+            this.path = requireNonNull(path);
+            this.parent = parent;
+        }
+
+        NodeIterator next(final JsonWriter writer) throws IOException {
+            while (iterator.hasNext()) {
+                final var node = iterator.next();
+                final var child = path.node(node.name());
+
+                switch (node.modificationType()) {
+                    case APPEARED:
+                    case DISAPPEARED:
+                    case SUBTREE_MODIFIED:
+                        return new NodeIterator(this, child, node.childNodes().iterator());
+                    case DELETE:
+                    case UNMODIFIED:
+                    case WRITE:
+                        outputNodeInfo(writer, path, node);
+                        break;
+                    default:
+                        outputDefault(writer, child, node);
+                }
+            }
+
+            return parent;
+        }
+    }
+}
index 14cf4cd1a3ec2d58552a3a86377f9da3127b04ac..91da59d3bae91faf6628abb3b7853cd2ab042e5f 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.actors;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Props;
-import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.io.ObjectOutputStream;
 import java.io.OutputStream;
@@ -17,6 +18,7 @@ import java.util.Optional;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.io.InputOutputStreamFactory;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 
 /**
@@ -34,9 +36,9 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
 
         SerializeSnapshot(final ShardDataTreeSnapshot snapshot, final Optional<OutputStream> installSnapshotStream,
                 final ActorRef replyTo) {
-            this.snapshot = Preconditions.checkNotNull(snapshot);
-            this.installSnapshotStream = Preconditions.checkNotNull(installSnapshotStream);
-            this.replyTo = Preconditions.checkNotNull(replyTo);
+            this.snapshot = requireNonNull(snapshot);
+            this.installSnapshotStream = requireNonNull(installSnapshotStream);
+            this.replyTo = requireNonNull(replyTo);
         }
 
         ShardDataTreeSnapshot getSnapshot() {
@@ -55,8 +57,11 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
     //actor name override used for metering. This does not change the "real" actor name
     private static final String ACTOR_NAME_FOR_METERING = "shard-snapshot";
 
-    private ShardSnapshotActor() {
+    private final InputOutputStreamFactory streamFactory;
+
+    private ShardSnapshotActor(final InputOutputStreamFactory streamFactory) {
         super(ACTOR_NAME_FOR_METERING);
+        this.streamFactory = requireNonNull(streamFactory);
     }
 
     @Override
@@ -71,7 +76,7 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
     private void onSerializeSnapshot(final SerializeSnapshot request) {
         Optional<OutputStream> installSnapshotStream = request.getInstallSnapshotStream();
         if (installSnapshotStream.isPresent()) {
-            try (ObjectOutputStream out = new ObjectOutputStream(installSnapshotStream.get())) {
+            try (ObjectOutputStream out = getOutputStream(installSnapshotStream.orElseThrow())) {
                 request.getSnapshot().serialize(out);
             } catch (IOException e) {
                 // TODO - we should communicate the failure in the CaptureSnapshotReply.
@@ -83,6 +88,10 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
                 installSnapshotStream), ActorRef.noSender());
     }
 
+    private ObjectOutputStream getOutputStream(final OutputStream outputStream) throws IOException {
+        return new ObjectOutputStream(streamFactory.wrapOutputStream(outputStream));
+    }
+
     /**
      * Sends a request to a ShardSnapshotActor to process a snapshot and send a CaptureSnapshotReply.
      *
@@ -97,7 +106,7 @@ public final class ShardSnapshotActor extends AbstractUntypedActorWithMetering {
         snapshotActor.tell(new SerializeSnapshot(snapshot, installSnapshotStream, replyTo), ActorRef.noSender());
     }
 
-    public static Props props() {
-        return Props.create(ShardSnapshotActor.class);
+    public static Props props(final InputOutputStreamFactory streamFactory) {
+        return Props.create(ShardSnapshotActor.class, streamFactory);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/compat/LegacyDOMStoreAdapter.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/compat/LegacyDOMStoreAdapter.java
deleted file mode 100644 (file)
index 2040420..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2016 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.compat;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.compat.ReadFailedExceptionAdapter;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Adapter between the legacy controller API-based DOMStore and the mdsal API-based DOMStore.
- *
- * @author Thomas Pantelis
- */
-public class LegacyDOMStoreAdapter extends ForwardingObject implements DOMStore, AutoCloseable {
-    private final DistributedDataStoreInterface delegate;
-
-    public LegacyDOMStoreAdapter(DistributedDataStoreInterface delegate) {
-        this.delegate = delegate;
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        return new DOMStoreTransactionAdapter(delegate().newReadOnlyTransaction());
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        return new DOMStoreTransactionAdapter(delegate().newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        return new DOMStoreTransactionAdapter(delegate().newReadWriteTransaction());
-    }
-
-    @Override
-    public DOMStoreTransactionChain createTransactionChain() {
-        final org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain delegateChain =
-                delegate().createTransactionChain();
-        return new DOMStoreTransactionChain() {
-            @Override
-            public DOMStoreReadTransaction newReadOnlyTransaction() {
-                return new DOMStoreTransactionAdapter(delegateChain.newReadOnlyTransaction());
-            }
-
-            @Override
-            public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-                return new DOMStoreTransactionAdapter(delegateChain.newWriteOnlyTransaction());
-            }
-
-            @Override
-            public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-                return new DOMStoreTransactionAdapter(delegateChain.newReadWriteTransaction());
-            }
-
-            @Override
-            public void close() {
-                delegateChain.close();
-            }
-        };
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    protected DistributedDataStoreInterface delegate() {
-        return delegate;
-    }
-
-    private static class DOMStoreTransactionAdapter implements DOMStoreReadWriteTransaction {
-        private final org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction readDelegate;
-        private final org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction writeDelegate;
-        private final Object identifier;
-
-        DOMStoreTransactionAdapter(@Nonnull org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction readDelegate) {
-            this.readDelegate = Preconditions.checkNotNull(readDelegate);
-            this.identifier = readDelegate.getIdentifier();
-            this.writeDelegate = null;
-        }
-
-        DOMStoreTransactionAdapter(
-                @Nonnull org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction writeDelegate) {
-            this.writeDelegate = Preconditions.checkNotNull(writeDelegate);
-            this.identifier = writeDelegate.getIdentifier();
-            this.readDelegate = null;
-        }
-
-        DOMStoreTransactionAdapter(
-                @Nonnull org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction rwDelegate) {
-            this.readDelegate = Preconditions.checkNotNull(rwDelegate);
-            this.writeDelegate = rwDelegate;
-            this.identifier = readDelegate.getIdentifier();
-        }
-
-        @Override
-        public Object getIdentifier() {
-            return identifier;
-        }
-
-        @Override
-        public void close() {
-            if (writeDelegate != null) {
-                writeDelegate.close();
-            } else {
-                readDelegate.close();
-            }
-        }
-
-        @Override
-        public void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            writeDelegate.write(path, data);
-        }
-
-        @Override
-        public void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            writeDelegate.merge(path, data);
-        }
-
-        @Override
-        public void delete(YangInstanceIdentifier path) {
-            writeDelegate.delete(path);
-        }
-
-        @Override
-        public DOMStoreThreePhaseCommitCohort ready() {
-            final org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort cohort = writeDelegate.ready();
-            return new DOMStoreThreePhaseCommitCohort() {
-                @Override
-                public ListenableFuture<Boolean> canCommit() {
-                    return cohort.canCommit();
-                }
-
-                @Override
-                public ListenableFuture<Void> preCommit() {
-                    return cohort.preCommit();
-                }
-
-                @Override
-                public ListenableFuture<Void> commit() {
-                    return cohort.commit();
-                }
-
-                @Override
-                public ListenableFuture<Void> abort() {
-                    return cohort.abort();
-                }
-            };
-        }
-
-        @Override
-        public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(YangInstanceIdentifier path) {
-            return MappingCheckedFuture.create(readDelegate.read(path).transform(
-                Optional::fromJavaUtil, MoreExecutors.directExecutor()), ReadFailedExceptionAdapter.INSTANCE);
-        }
-
-        @Override
-        public CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path) {
-            return MappingCheckedFuture.create(readDelegate.exists(path), ReadFailedExceptionAdapter.INSTANCE);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/AbstractModuleShardConfigProvider.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/AbstractModuleShardConfigProvider.java
new file mode 100644 (file)
index 0000000..af112fb
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.config;
+
+import static java.util.Objects.requireNonNull;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.ConfigObject;
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+abstract class AbstractModuleShardConfigProvider implements ModuleShardConfigProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractModuleShardConfigProvider.class);
+
+    static final Config loadConfigFromPath(final String configPath) {
+        final File configFile = new File(configPath);
+        Config config = null;
+        if (configFile.exists()) {
+            LOG.info("Config file exists - reading config from it");
+            config = ConfigFactory.parseFile(configFile);
+        } else {
+            LOG.warn("Reading Config from resource");
+            config = ConfigFactory.load(configPath);
+        }
+        return config;
+    }
+
+    static final void readModulesConfig(final Config modulesConfig,
+            final Map<String, ModuleConfig.Builder> moduleConfigMap, final Configuration configuration) {
+        for (final ConfigObject o : modulesConfig.getObjectList("modules")) {
+            final ConfigObjectWrapper wrapper = new ConfigObjectWrapper(o);
+
+            final String moduleName = wrapper.stringValue("name");
+            final ModuleConfig.Builder builder = moduleConfigMap.computeIfAbsent(moduleName, ModuleConfig::builder);
+
+            builder.nameSpace(wrapper.stringValue("namespace"));
+            builder.shardStrategy(ShardStrategyFactory.newShardStrategyInstance(moduleName,
+                    wrapper.stringValue("shard-strategy"), configuration));
+        }
+    }
+
+    static final Map<String, ModuleConfig.Builder> readModuleShardsConfig(final Config moduleShardsConfig) {
+        final Map<String, ModuleConfig.Builder> moduleConfigMap = new HashMap<>();
+        for (final ConfigObject moduleShardConfigObject : moduleShardsConfig.getObjectList("module-shards")) {
+            final String moduleName = moduleShardConfigObject.get("name").unwrapped().toString();
+            final ModuleConfig.Builder builder = ModuleConfig.builder(moduleName);
+
+            for (final ConfigObject shard : moduleShardConfigObject.toConfig().getObjectList("shards")) {
+                final String shardName = shard.get("name").unwrapped().toString();
+                final List<MemberName> replicas = shard.toConfig().getStringList("replicas").stream()
+                        .map(MemberName::forName).collect(Collectors.toList());
+                builder.shardConfig(shardName, replicas);
+            }
+
+            moduleConfigMap.put(moduleName, builder);
+        }
+
+        return moduleConfigMap;
+    }
+
+    private static final class ConfigObjectWrapper {
+        private final ConfigObject configObject;
+
+        ConfigObjectWrapper(final ConfigObject configObject) {
+            this.configObject = requireNonNull(configObject);
+        }
+
+        String stringValue(final String name) {
+            return configObject.get(name).unwrapped().toString();
+        }
+    }
+}
index d5878c643153bb08b1ce2d881d15481dc0e0b190..70f2ccb69f9fed8f017cb525046e557203b640bb 100644 (file)
@@ -5,49 +5,43 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.config;
 
 import java.util.Collection;
-import java.util.Map;
 import java.util.Set;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 
 public interface Configuration {
 
     /**
      * Returns all the shard names that belong on the member by the given name.
      */
-    @Nonnull Collection<String> getMemberShardNames(@Nonnull MemberName memberName);
+    // FIXME: return Set here
+    @NonNull Collection<String> getMemberShardNames(@NonNull MemberName memberName);
 
     /**
      * Returns the module name for the given namespace name or null if not found.
      */
-    @Nullable String getModuleNameFromNameSpace(@Nonnull String nameSpace);
+    @Nullable String getModuleNameFromNameSpace(@NonNull String nameSpace);
 
     /**
      * Returns the first shard name corresponding to the given module name or null if none is configured.
      */
-    @Nullable String getShardNameForModule(@Nonnull String moduleName);
-
-    /**
-     * Return the shard name corresponding to the prefix, or null if none is configured.
-     */
-    @Nullable String getShardNameForPrefix(@Nonnull DOMDataTreeIdentifier prefix);
+    @Nullable String getShardNameForModule(@NonNull String moduleName);
 
     /**
      * Returns the member replicas for the given shard name.
      */
-    @Nonnull Collection<MemberName> getMembersFromShardName(@Nonnull String shardName);
+    // FIXME: return Set here
+    @NonNull Collection<MemberName> getMembersFromShardName(@NonNull String shardName);
 
     /**
      * Returns the ShardStrategy for the given module name or null if the module is not found.
      */
-    @Nullable ShardStrategy getStrategyForModule(@Nonnull String moduleName);
+    @Nullable ShardStrategy getStrategyForModule(@NonNull String moduleName);
 
     /**
      * Returns all the configured shard names.
@@ -57,28 +51,12 @@ public interface Configuration {
     /**
      * Adds a new configuration for a module and shard.
      */
-    void addModuleShardConfiguration(@Nonnull ModuleShardConfiguration config);
-
-    /**
-     * Adds a new configuration for a shard based on prefix.
-     */
-    void addPrefixShardConfiguration(@Nonnull PrefixShardConfiguration config);
-
-    /**
-     * Removes a shard configuration for the specified prefix.
-     */
-    void removePrefixShardConfiguration(@Nonnull DOMDataTreeIdentifier prefix);
-
-    /**
-     * Returns the configuration for all configured prefix shards.
-     *
-     * @return An immutable copy of the currently configured prefix shards.
-     */
-    Map<DOMDataTreeIdentifier, PrefixShardConfiguration> getAllPrefixShardConfigurations();
+    void addModuleShardConfiguration(@NonNull ModuleShardConfiguration config);
 
     /**
      * Returns a unique set of all member names configured for all shards.
      */
+    // FIXME: return Set here
     Collection<MemberName> getUniqueMemberNamesForAllShards();
 
     /*
@@ -95,9 +73,4 @@ public interface Configuration {
      * Removes the given member as a replica for the given shardName.
      */
     void removeMemberReplicaForShard(String shardName, MemberName memberName);
-
-    /**
-     * Returns the ShardStrategy for the given prefix or null if the prefix is not found.
-     */
-    @Nullable ShardStrategy getStrategyForPrefix(@Nonnull DOMDataTreeIdentifier prefix);
 }
index 0007d0941b1cab104f62476bc8a856ab5a643dba..d0e8d875f65d4ba1f765a969c85107c156c3f5bc 100644 (file)
@@ -5,39 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.config;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import java.util.AbstractMap.SimpleEntry;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.PrefixShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
-// TODO clean this up once we get rid of module based configuration, prefix one should be alot simpler
+// FIXME: Non-final for testing
 public class ConfigurationImpl implements Configuration {
     private volatile Map<String, ModuleConfig> moduleConfigMap;
 
-    // TODO should this be initialized with something? on restart we should restore the shards from configuration?
-    private volatile Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixConfigMap = Collections.emptyMap();
-
     // Look up maps to speed things up
 
     private volatile Map<String, String> namespaceToModuleName;
@@ -47,19 +37,20 @@ public class ConfigurationImpl implements Configuration {
         this(new FileModuleShardConfigProvider(moduleShardsConfigPath, modulesConfigPath));
     }
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Subclassed for testing")
     public ConfigurationImpl(final ModuleShardConfigProvider provider) {
         ImmutableMap.Builder<String, ModuleConfig> mapBuilder = ImmutableMap.builder();
-        for (Map.Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
+        for (Entry<String, ModuleConfig.Builder> e: provider.retrieveModuleConfigs(this).entrySet()) {
             mapBuilder.put(e.getKey(), e.getValue().build());
         }
 
-        this.moduleConfigMap = mapBuilder.build();
+        moduleConfigMap = mapBuilder.build();
 
-        this.allShardNames = createAllShardNames(moduleConfigMap.values());
-        this.namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
+        allShardNames = createAllShardNames(moduleConfigMap.values());
+        namespaceToModuleName = createNamespaceToModuleName(moduleConfigMap.values());
     }
 
-    private static Set<String> createAllShardNames(Iterable<ModuleConfig> moduleConfigs) {
+    private static Set<String> createAllShardNames(final Iterable<ModuleConfig> moduleConfigs) {
         final ImmutableSet.Builder<String> builder = ImmutableSet.builder();
         for (ModuleConfig moduleConfig : moduleConfigs) {
             builder.addAll(moduleConfig.getShardNames());
@@ -68,7 +59,7 @@ public class ConfigurationImpl implements Configuration {
         return builder.build();
     }
 
-    private static Map<String, String> createNamespaceToModuleName(Iterable<ModuleConfig> moduleConfigs) {
+    private static Map<String, String> createNamespaceToModuleName(final Iterable<ModuleConfig> moduleConfigs) {
         final ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
         for (ModuleConfig moduleConfig : moduleConfigs) {
             if (moduleConfig.getNamespace() != null) {
@@ -81,7 +72,7 @@ public class ConfigurationImpl implements Configuration {
 
     @Override
     public Collection<String> getMemberShardNames(final MemberName memberName) {
-        Preconditions.checkNotNull(memberName, "memberName should not be null");
+        requireNonNull(memberName, "memberName should not be null");
 
         List<String> shards = new ArrayList<>();
         for (ModuleConfig moduleConfig: moduleConfigMap.values()) {
@@ -97,47 +88,29 @@ public class ConfigurationImpl implements Configuration {
 
     @Override
     public String getModuleNameFromNameSpace(final String nameSpace) {
-        Preconditions.checkNotNull(nameSpace, "nameSpace should not be null");
-
-        return namespaceToModuleName.get(nameSpace);
+        return namespaceToModuleName.get(requireNonNull(nameSpace, "nameSpace should not be null"));
     }
 
     @Override
-    public ShardStrategy getStrategyForModule(String moduleName) {
-        Preconditions.checkNotNull(moduleName, "moduleName should not be null");
-
-        ModuleConfig moduleConfig = moduleConfigMap.get(moduleName);
+    public ShardStrategy getStrategyForModule(final String moduleName) {
+        ModuleConfig moduleConfig = getModuleConfig(moduleName);
         return moduleConfig != null ? moduleConfig.getShardStrategy() : null;
     }
 
     @Override
     public String getShardNameForModule(final String moduleName) {
-        Preconditions.checkNotNull(moduleName, "moduleName should not be null");
-
-        ModuleConfig moduleConfig = moduleConfigMap.get(moduleName);
-        Collection<ShardConfig> shardConfigs = moduleConfig != null ? moduleConfig.getShardConfigs() :
-            Collections.<ShardConfig>emptySet();
-        return !shardConfigs.isEmpty() ? shardConfigs.iterator().next().getName() : null;
-    }
-
-    @Nullable
-    @Override
-    public String getShardNameForPrefix(@Nonnull final DOMDataTreeIdentifier prefix) {
-        Preconditions.checkNotNull(prefix, "prefix should not be null");
-
-        Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> bestMatchEntry =
-                new SimpleEntry<>(
-                        new DOMDataTreeIdentifier(prefix.getDatastoreType(), YangInstanceIdentifier.EMPTY), null);
-
-        for (Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> entry : prefixConfigMap.entrySet()) {
-            if (entry.getKey().contains(prefix) && entry.getKey().getRootIdentifier().getPathArguments().size()
-                    > bestMatchEntry.getKey().getRootIdentifier().getPathArguments().size()) {
-                bestMatchEntry = entry;
+        ModuleConfig moduleConfig = getModuleConfig(moduleName);
+        if (moduleConfig != null) {
+            Collection<ShardConfig> shardConfigs = moduleConfig.getShardConfigs();
+            if (!shardConfigs.isEmpty()) {
+                return shardConfigs.iterator().next().getName();
             }
         }
+        return null;
+    }
 
-        //TODO we really should have mapping based on prefix instead of Strings
-        return ClusterUtils.getCleanShardName(bestMatchEntry.getKey().getRootIdentifier());
+    private ModuleConfig getModuleConfig(final String moduleName) {
+        return moduleConfigMap.get(requireNonNull(moduleName, "moduleName should not be null"));
     }
 
     @Override
@@ -151,17 +124,11 @@ public class ConfigurationImpl implements Configuration {
             }
         }
 
-        for (final PrefixShardConfiguration prefixConfig : prefixConfigMap.values()) {
-            if (shardName.equals(ClusterUtils.getCleanShardName(prefixConfig.getPrefix().getRootIdentifier()))) {
-                return prefixConfig.getShardMemberNames();
-            }
-        }
-
-        return Collections.emptyList();
+        return List.of();
     }
 
     private static void checkNotNullShardName(final String shardName) {
-        Preconditions.checkNotNull(shardName, "shardName should not be null");
+        requireNonNull(shardName, "shardName should not be null");
     }
 
     @Override
@@ -180,11 +147,11 @@ public class ConfigurationImpl implements Configuration {
     }
 
     @Override
-    public synchronized void addModuleShardConfiguration(ModuleShardConfiguration config) {
-        Preconditions.checkNotNull(config, "ModuleShardConfiguration should not be null");
+    public synchronized void addModuleShardConfiguration(final ModuleShardConfiguration config) {
+        requireNonNull(config, "ModuleShardConfiguration should not be null");
 
         ModuleConfig moduleConfig = ModuleConfig.builder(config.getModuleName())
-                .nameSpace(config.getNamespace().toASCIIString())
+                .nameSpace(config.getNamespace().toString())
                 .shardStrategy(createShardStrategy(config.getModuleName(), config.getShardStrategyName()))
                 .shardConfig(config.getShardName(), config.getShardMemberNames()).build();
 
@@ -195,57 +162,20 @@ public class ConfigurationImpl implements Configuration {
         allShardNames = ImmutableSet.<String>builder().addAll(allShardNames).add(config.getShardName()).build();
     }
 
-    @Override
-    public void addPrefixShardConfiguration(@Nonnull final PrefixShardConfiguration config) {
-        Preconditions.checkNotNull(config, "PrefixShardConfiguration cannot be null");
-        addPrefixConfig(config);
-        allShardNames = ImmutableSet.<String>builder().addAll(allShardNames)
-                .add(ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier())).build();
-    }
-
-    @Override
-    public void removePrefixShardConfiguration(@Nonnull final DOMDataTreeIdentifier prefix) {
-        Preconditions.checkNotNull(prefix, "Prefix cannot be null");
-
-        removePrefixConfig(prefix);
-
-        final HashSet<String> temp = new HashSet<>(allShardNames);
-        temp.remove(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
-        allShardNames = ImmutableSet.copyOf(temp);
-    }
-
-    @Override
-    public Map<DOMDataTreeIdentifier, PrefixShardConfiguration> getAllPrefixShardConfigurations() {
-        return ImmutableMap.copyOf(prefixConfigMap);
-    }
-
-    private void addPrefixConfig(final PrefixShardConfiguration config) {
-        final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newPrefixConfigMap = new HashMap<>(prefixConfigMap);
-        newPrefixConfigMap.put(config.getPrefix(), config);
-        prefixConfigMap = ImmutableMap.copyOf(newPrefixConfigMap);
-    }
-
-    private void removePrefixConfig(final DOMDataTreeIdentifier prefix) {
-        final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> newPrefixConfigMap = new HashMap<>(prefixConfigMap);
-        newPrefixConfigMap.remove(prefix);
-        prefixConfigMap = ImmutableMap.copyOf(newPrefixConfigMap);
-    }
-
-    private ShardStrategy createShardStrategy(String moduleName, String shardStrategyName) {
+    private ShardStrategy createShardStrategy(final String moduleName, final String shardStrategyName) {
         return ShardStrategyFactory.newShardStrategyInstance(moduleName, shardStrategyName, this);
     }
 
     @Override
-    public boolean isShardConfigured(String shardName) {
+    public boolean isShardConfigured(final String shardName) {
         checkNotNullShardName(shardName);
         return allShardNames.contains(shardName);
     }
 
     @Override
-    public void addMemberReplicaForShard(String shardName, MemberName newMemberName) {
+    public void addMemberReplicaForShard(final String shardName, final MemberName newMemberName) {
         checkNotNullShardName(shardName);
-        Preconditions.checkNotNull(newMemberName, "MemberName should not be null");
+        requireNonNull(newMemberName, "MemberName should not be null");
 
         for (ModuleConfig moduleConfig: moduleConfigMap.values()) {
             ShardConfig shardConfig = moduleConfig.getShardConfig(shardName);
@@ -259,9 +189,9 @@ public class ConfigurationImpl implements Configuration {
     }
 
     @Override
-    public void removeMemberReplicaForShard(String shardName, MemberName newMemberName) {
+    public void removeMemberReplicaForShard(final String shardName, final MemberName newMemberName) {
         checkNotNullShardName(shardName);
-        Preconditions.checkNotNull(newMemberName, "MemberName should not be null");
+        requireNonNull(newMemberName, "MemberName should not be null");
 
         for (ModuleConfig moduleConfig: moduleConfigMap.values()) {
             ShardConfig shardConfig = moduleConfig.getShardConfig(shardName);
@@ -274,29 +204,6 @@ public class ConfigurationImpl implements Configuration {
         }
     }
 
-    @Override
-    public ShardStrategy getStrategyForPrefix(@Nonnull final DOMDataTreeIdentifier prefix) {
-        Preconditions.checkNotNull(prefix, "Prefix cannot be null");
-        // FIXME using prefix tables like in mdsal will be better
-        Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> bestMatchEntry =
-                new SimpleEntry<>(
-                        new DOMDataTreeIdentifier(prefix.getDatastoreType(), YangInstanceIdentifier.EMPTY), null);
-
-        for (Entry<DOMDataTreeIdentifier, PrefixShardConfiguration> entry : prefixConfigMap.entrySet()) {
-            if (entry.getKey().contains(prefix) && entry.getKey().getRootIdentifier().getPathArguments().size()
-                    > bestMatchEntry.getKey().getRootIdentifier().getPathArguments().size()) {
-                bestMatchEntry = entry;
-            }
-        }
-
-        if (bestMatchEntry.getValue() == null) {
-            return null;
-        }
-        return new PrefixShardStrategy(ClusterUtils
-                .getCleanShardName(bestMatchEntry.getKey().getRootIdentifier()),
-                bestMatchEntry.getKey().getRootIdentifier());
-    }
-
     private void updateModuleConfigMap(final ModuleConfig moduleConfig) {
         final Map<String, ModuleConfig> newModuleConfigMap = new HashMap<>(moduleConfigMap);
         newModuleConfigMap.put(moduleConfig.getName(), moduleConfig);
index ba28e5dc3ac8c13b27a44d017653a0b156e5304b..7ee67be47c0159b83218820951cf369903f4c598 100644 (file)
@@ -8,15 +8,10 @@
 package org.opendaylight.controller.cluster.datastore.config;
 
 import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import com.typesafe.config.ConfigObject;
-import java.io.File;
-import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -25,12 +20,17 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public class FileModuleShardConfigProvider implements ModuleShardConfigProvider {
+@Component(immediate = true, service = ModuleShardConfigProvider.class)
+public class FileModuleShardConfigProvider extends AbstractModuleShardConfigProvider {
     private static final Logger LOG = LoggerFactory.getLogger(FileModuleShardConfigProvider.class);
 
     private final String moduleShardsConfigPath;
     private final String modulesConfigPath;
 
+    public FileModuleShardConfigProvider() {
+        this("./configuration/initial/module-shards.conf", "./configuration/initial/modules.conf");
+    }
+
     public FileModuleShardConfigProvider(final String moduleShardsConfigPath, final String modulesConfigPath) {
         this.moduleShardsConfigPath = moduleShardsConfigPath;
         this.modulesConfigPath = modulesConfigPath;
@@ -38,84 +38,21 @@ public class FileModuleShardConfigProvider implements ModuleShardConfigProvider
 
     @Override
     public Map<String, ModuleConfig.Builder> retrieveModuleConfigs(final Configuration configuration) {
-        final File moduleShardsFile = new File(moduleShardsConfigPath);
-        final File modulesFile = new File(modulesConfigPath);
-
-        Config moduleShardsConfig = null;
-        if (moduleShardsFile.exists()) {
-            LOG.info("module shards config file exists - reading config from it");
-            moduleShardsConfig = ConfigFactory.parseFile(moduleShardsFile);
-        } else {
-            LOG.warn("module shards configuration read from resource");
-            moduleShardsConfig = ConfigFactory.load(moduleShardsConfigPath);
-        }
-
-        Config modulesConfig = null;
-        if (modulesFile.exists()) {
-            LOG.info("modules config file exists - reading config from it");
-            modulesConfig = ConfigFactory.parseFile(modulesFile);
-        } else {
-            LOG.warn("modules configuration read from resource");
-            modulesConfig = ConfigFactory.load(modulesConfigPath);
-        }
+        Config moduleShardsConfig = loadConfigFromPath(moduleShardsConfigPath);
+        Config modulesConfig = loadConfigFromPath(modulesConfigPath);
 
         final Map<String, ModuleConfig.Builder> moduleConfigMap = readModuleShardsConfig(moduleShardsConfig);
         readModulesConfig(modulesConfig, moduleConfigMap, configuration);
-
         return moduleConfigMap;
     }
 
-    private static void readModulesConfig(final Config modulesConfig,
-            final Map<String, ModuleConfig.Builder> moduleConfigMap, final Configuration configuration) {
-        final List<? extends ConfigObject> modulesConfigObjectList = modulesConfig.getObjectList("modules");
-
-        for (final ConfigObject o : modulesConfigObjectList) {
-            final ConfigObjectWrapper wrapper = new ConfigObjectWrapper(o);
-
-            final String moduleName = wrapper.stringValue("name");
-            final ModuleConfig.Builder builder = moduleConfigMap.computeIfAbsent(moduleName, ModuleConfig::builder);
-
-            builder.nameSpace(wrapper.stringValue("namespace"));
-            builder.shardStrategy(ShardStrategyFactory.newShardStrategyInstance(moduleName,
-                    wrapper.stringValue("shard-strategy"), configuration));
-        }
+    @Activate
+    void activate() {
+        LOG.info("Shard configuration provider started");
     }
 
-    private static Map<String, ModuleConfig.Builder> readModuleShardsConfig(final Config moduleShardsConfig) {
-        final List<? extends ConfigObject> moduleShardsConfigObjectList =
-            moduleShardsConfig.getObjectList("module-shards");
-
-        final Map<String, ModuleConfig.Builder> moduleConfigMap = new HashMap<>();
-        for (final ConfigObject moduleShardConfigObject : moduleShardsConfigObjectList) {
-            final String moduleName = moduleShardConfigObject.get("name").unwrapped().toString();
-            final ModuleConfig.Builder builder = ModuleConfig.builder(moduleName);
-
-            final List<? extends ConfigObject> shardsConfigObjectList =
-                moduleShardConfigObject.toConfig().getObjectList("shards");
-
-            for (final ConfigObject shard : shardsConfigObjectList) {
-                final String shardName = shard.get("name").unwrapped().toString();
-                final List<MemberName> replicas = shard.toConfig().getStringList("replicas").stream()
-                        .map(MemberName::forName).collect(Collectors.toList());
-                builder.shardConfig(shardName, replicas);
-            }
-
-            moduleConfigMap.put(moduleName, builder);
-        }
-
-        return moduleConfigMap;
-    }
-
-    private static class ConfigObjectWrapper {
-
-        private final ConfigObject configObject;
-
-        ConfigObjectWrapper(final ConfigObject configObject) {
-            this.configObject = configObject;
-        }
-
-        public String stringValue(final String name) {
-            return configObject.get(name).unwrapped().toString();
-        }
+    @Deactivate
+    void deactivate() {
+        LOG.info("Shard configuration provider stopped");
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/HybridModuleShardConfigProvider.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/HybridModuleShardConfigProvider.java
new file mode 100644 (file)
index 0000000..02c2f2b
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.config;
+
+import static java.util.Objects.requireNonNull;
+
+import com.typesafe.config.Config;
+import java.util.Map;
+
+public class HybridModuleShardConfigProvider extends AbstractModuleShardConfigProvider {
+    private final Config moduleShardsConfig;
+    private final String modulesConfigPath;
+
+    public HybridModuleShardConfigProvider(final Config moduleShardsConfig, final String modulesConfigPath) {
+        this.moduleShardsConfig = requireNonNull(moduleShardsConfig, "ModuleShardsConfig can't be null");
+        this.modulesConfigPath = modulesConfigPath;
+    }
+
+    @Override
+    public Map<String, ModuleConfig.Builder> retrieveModuleConfigs(final Configuration configuration) {
+        Config modulesConfig = loadConfigFromPath(modulesConfigPath);
+
+        final Map<String, ModuleConfig.Builder> moduleConfigMap = readModuleShardsConfig(this.moduleShardsConfig);
+        readModulesConfig(modulesConfig, moduleConfigMap, configuration);
+        return moduleConfigMap;
+    }
+}
index 5b4e2ffe702a8a538d8d5876712cbb3dae5a2948..ccd7dd66065ef32b8b824a764e6cbb061c5b5db6 100644 (file)
@@ -7,13 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore.config;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableMap;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 
@@ -30,39 +31,33 @@ public final class ModuleConfig {
 
     ModuleConfig(final String name, final String namespace, final ShardStrategy shardStrategy,
             final Map<String, ShardConfig> shardConfigs) {
-        this.name = name;
+        this.name = requireNonNull(name);
         this.namespace = namespace;
         this.shardStrategy = shardStrategy;
         this.shardConfigs = shardConfigs;
     }
 
-    @Nonnull
-    public String getName() {
+    public @NonNull String getName() {
         return name;
     }
 
-    @Nullable
-    public String getNamespace() {
+    public @Nullable String getNamespace() {
         return namespace;
     }
 
-    @Nullable
-    public ShardStrategy getShardStrategy() {
+    public @Nullable ShardStrategy getShardStrategy() {
         return shardStrategy;
     }
 
-    @Nullable
-    public ShardConfig getShardConfig(final String forName) {
+    public @Nullable ShardConfig getShardConfig(final String forName) {
         return shardConfigs.get(forName);
     }
 
-    @Nonnull
-    public Collection<ShardConfig> getShardConfigs() {
+    public @NonNull Collection<ShardConfig> getShardConfigs() {
         return shardConfigs.values();
     }
 
-    @Nonnull
-    public Collection<String> getShardNames() {
+    public @NonNull Collection<String> getShardNames() {
         return shardConfigs.keySet();
     }
 
@@ -114,8 +109,7 @@ public final class ModuleConfig {
         }
 
         public ModuleConfig build() {
-            return new ModuleConfig(Preconditions.checkNotNull(name), nameSpace, shardStrategy,
-                    ImmutableMap.copyOf(shardConfigs));
+            return new ModuleConfig(name, nameSpace, shardStrategy, ImmutableMap.copyOf(shardConfigs));
         }
     }
 }
index 1fc31f52ff65ad8d73c684e78ef6c9a9d9267a3a..e11375943c64ec72c919cbc97ddcc52b2d379623 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.config;
 
 import java.util.Map;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Interface for a class that provides module and shard configuration information.
@@ -19,5 +19,5 @@ public interface ModuleShardConfigProvider {
     /**
      * Returns a Map of ModuleConfig Builder instances keyed by module name.
      */
-    @Nonnull Map<String, ModuleConfig.Builder> retrieveModuleConfigs(@Nonnull Configuration configuration);
+    @NonNull Map<String, ModuleConfig.Builder> retrieveModuleConfigs(@NonNull Configuration configuration);
 }
index f52687f698ab8d178a6fa015422269e4d7fc8e9a..177eb8d7342fe5f5b6f523af9a7121faa536cec1 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore.config;
 
-import com.google.common.base.Preconditions;
-import java.net.URI;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
 
 /**
  * Encapsulates information for adding a new module shard configuration.
@@ -20,7 +21,7 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
  * @author Thomas Pantelis
  */
 public class ModuleShardConfiguration {
-    private final URI namespace;
+    private final XMLNamespace namespace;
     private final String moduleName;
     private final String shardName;
     private final String shardStrategyName;
@@ -36,16 +37,17 @@ public class ModuleShardConfiguration {
      *                          is used.
      * @param shardMemberNames the names of the shard's member replicas.
      */
-    public ModuleShardConfiguration(@Nonnull URI namespace, @Nonnull String moduleName, @Nonnull String shardName,
-            @Nullable String shardStrategyName, @Nonnull Collection<MemberName> shardMemberNames) {
-        this.namespace = Preconditions.checkNotNull(namespace, "nameSpace should not be null");
-        this.moduleName = Preconditions.checkNotNull(moduleName, "moduleName should not be null");
-        this.shardName = Preconditions.checkNotNull(shardName, "shardName should not be null");
+    public ModuleShardConfiguration(final @NonNull XMLNamespace namespace, final @NonNull String moduleName,
+            final @NonNull String shardName, final @Nullable String shardStrategyName,
+            final @NonNull Collection<MemberName> shardMemberNames) {
+        this.namespace = requireNonNull(namespace, "nameSpace should not be null");
+        this.moduleName = requireNonNull(moduleName, "moduleName should not be null");
+        this.shardName = requireNonNull(shardName, "shardName should not be null");
         this.shardStrategyName = shardStrategyName;
-        this.shardMemberNames = Preconditions.checkNotNull(shardMemberNames, "shardMemberNames");
+        this.shardMemberNames = requireNonNull(shardMemberNames, "shardMemberNames");
     }
 
-    public URI getNamespace() {
+    public XMLNamespace getNamespace() {
         return namespace;
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/PrefixShardConfiguration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/PrefixShardConfiguration.java
deleted file mode 100644 (file)
index c387fe4..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.config;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Configuration for prefix based shards.
- */
-public class PrefixShardConfiguration implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private PrefixShardConfiguration prefixShardConfiguration;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-        }
-
-        Proxy(final PrefixShardConfiguration prefixShardConfiguration) {
-            this.prefixShardConfiguration = prefixShardConfiguration;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput objectOutput) throws IOException {
-            objectOutput.writeObject(prefixShardConfiguration.getPrefix());
-            objectOutput.writeObject(prefixShardConfiguration.getShardStrategyName());
-
-            objectOutput.writeInt(prefixShardConfiguration.getShardMemberNames().size());
-            for (MemberName name : prefixShardConfiguration.getShardMemberNames()) {
-                name.writeTo(objectOutput);
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput objectInput) throws IOException, ClassNotFoundException {
-            final DOMDataTreeIdentifier localPrefix = (DOMDataTreeIdentifier) objectInput.readObject();
-            final String localStrategyName = (String) objectInput.readObject();
-
-            final int size = objectInput.readInt();
-            final Collection<MemberName> localShardMemberNames = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                localShardMemberNames.add(MemberName.readFrom(objectInput));
-            }
-
-            prefixShardConfiguration = new PrefixShardConfiguration(localPrefix, localStrategyName,
-                    localShardMemberNames);
-        }
-
-        private Object readResolve() {
-            return prefixShardConfiguration;
-        }
-    }
-
-    private static final long serialVersionUID = 1L;
-
-    private final DOMDataTreeIdentifier prefix;
-    private final String shardStrategyName;
-    private final Collection<MemberName> shardMemberNames;
-
-    public PrefixShardConfiguration(final DOMDataTreeIdentifier prefix,
-                                    final String shardStrategyName,
-                                    final Collection<MemberName> shardMemberNames) {
-        this.prefix = Preconditions.checkNotNull(prefix);
-        this.shardStrategyName = Preconditions.checkNotNull(shardStrategyName);
-        this.shardMemberNames = ImmutableSet.copyOf(shardMemberNames);
-    }
-
-    public DOMDataTreeIdentifier getPrefix() {
-        return prefix;
-    }
-
-    public String getShardStrategyName() {
-        return shardStrategyName;
-    }
-
-    public Collection<MemberName> getShardMemberNames() {
-        return shardMemberNames;
-    }
-
-    @Override
-    public String toString() {
-        return "PrefixShardConfiguration{"
-                + "prefix=" + prefix
-                + ", shardStrategyName='"
-                + shardStrategyName + '\''
-                + ", shardMemberNames=" + shardMemberNames
-                + '}';
-    }
-
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-}
index 59240a0dbb971d7b07985d8f5ae21b349a6ba01a..0fe1233b66b5c8237f3e6db320a0d4a8180b6224 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.config;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableSet;
 import java.util.Collection;
 import java.util.Set;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 
 /**
@@ -21,18 +22,16 @@ public class ShardConfig {
     private final String name;
     private final Set<MemberName> replicas;
 
-    public ShardConfig(@Nonnull final String name, @Nonnull final Collection<MemberName> replicas) {
-        this.name = Preconditions.checkNotNull(name);
-        this.replicas = ImmutableSet.copyOf(Preconditions.checkNotNull(replicas));
+    public ShardConfig(final @NonNull String name, final @NonNull Collection<MemberName> replicas) {
+        this.name = requireNonNull(name);
+        this.replicas = ImmutableSet.copyOf(replicas);
     }
 
-    @Nonnull
-    public String getName() {
+    public @NonNull String getName() {
         return name;
     }
 
-    @Nonnull
-    public Set<MemberName> getReplicas() {
+    public @NonNull Set<MemberName> getReplicas() {
         return replicas;
     }
 }
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnerChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnerChangeListener.java
deleted file mode 100644 (file)
index cf1fd43..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_QNAME;
-
-import com.google.common.base.Optional;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-
-public abstract class AbstractEntityOwnerChangeListener implements DOMDataTreeChangeListener {
-    private static final YangInstanceIdentifier EOS_PATH = YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH)
-            .node(EntityType.QNAME).node(EntityType.QNAME).node(ENTITY_QNAME).node(ENTITY_QNAME)
-            .node(ENTITY_OWNER_QNAME).build();
-
-    void init(final ShardDataTree shardDataTree) {
-        shardDataTree.registerTreeChangeListener(EOS_PATH, this, Optional.absent(), noop -> { /* NOOP */ });
-    }
-
-    protected static String extractOwner(final LeafNode<?> ownerLeaf) {
-        return ownerLeaf.getValue().toString();
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListener.java
deleted file mode 100644 (file)
index a7be8c5..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NAME_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_QNAME;
-
-import akka.actor.ActorRef;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateRemoved;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens for candidate entries added/removed and notifies the EntityOwnershipShard appropriately.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-class CandidateListChangeListener implements DOMDataTreeChangeListener {
-    private static final Logger LOG = LoggerFactory.getLogger(CandidateListChangeListener.class);
-
-    private final String logId;
-    private final ActorRef shard;
-    private final Map<YangInstanceIdentifier, Collection<String>> currentCandidates = new HashMap<>();
-
-    CandidateListChangeListener(ActorRef shard, String logId) {
-        this.shard = Preconditions.checkNotNull(shard, "shard should not be null");
-        this.logId = logId;
-    }
-
-    void init(ShardDataTree shardDataTree) {
-        shardDataTree.registerTreeChangeListener(YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH)
-            .node(EntityType.QNAME).node(EntityType.QNAME).node(ENTITY_QNAME).node(ENTITY_QNAME)
-                .node(Candidate.QNAME).node(Candidate.QNAME).build(), this, Optional.absent(), noop -> { /* NOOP */ });
-    }
-
-    @Override
-    public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
-        for (DataTreeCandidate change: changes) {
-            DataTreeCandidateNode changeRoot = change.getRootNode();
-            ModificationType type = changeRoot.getModificationType();
-
-            LOG.debug("{}: Candidate node changed: {}, {}", logId, type, change.getRootPath());
-
-            NodeIdentifierWithPredicates candidateKey =
-                    (NodeIdentifierWithPredicates) change.getRootPath().getLastPathArgument();
-            String candidate = candidateKey.getKeyValues().get(CANDIDATE_NAME_QNAME).toString();
-
-            YangInstanceIdentifier entityId = extractEntityPath(change.getRootPath());
-
-            if (type == ModificationType.WRITE || type == ModificationType.APPEARED) {
-                LOG.debug("{}: Candidate {} was added for entity {}", logId, candidate, entityId);
-
-                Collection<String> newCandidates = addToCurrentCandidates(entityId, candidate);
-                shard.tell(new CandidateAdded(entityId, candidate, new ArrayList<>(newCandidates)), shard);
-            } else if (type == ModificationType.DELETE || type == ModificationType.DISAPPEARED) {
-                LOG.debug("{}: Candidate {} was removed for entity {}", logId, candidate, entityId);
-
-                Collection<String> newCandidates = removeFromCurrentCandidates(entityId, candidate);
-                shard.tell(new CandidateRemoved(entityId, candidate, new ArrayList<>(newCandidates)), shard);
-            }
-        }
-    }
-
-    private Collection<String> addToCurrentCandidates(YangInstanceIdentifier entityId, String newCandidate) {
-        Collection<String> candidates = currentCandidates.computeIfAbsent(entityId, k -> new LinkedHashSet<>());
-
-        candidates.add(newCandidate);
-        return candidates;
-    }
-
-    private Collection<String> removeFromCurrentCandidates(YangInstanceIdentifier entityId, String candidateToRemove) {
-        Collection<String> candidates = currentCandidates.get(entityId);
-        if (candidates != null) {
-            candidates.remove(candidateToRemove);
-            return candidates;
-        }
-
-        // Shouldn't happen
-        return Collections.emptyList();
-    }
-
-    private static YangInstanceIdentifier extractEntityPath(YangInstanceIdentifier candidatePath) {
-        List<PathArgument> newPathArgs = new ArrayList<>();
-        for (PathArgument pathArg: candidatePath.getPathArguments()) {
-            newPathArgs.add(pathArg);
-            if (pathArg instanceof NodeIdentifierWithPredicates) {
-                NodeIdentifierWithPredicates nodeKey = (NodeIdentifierWithPredicates) pathArg;
-                Entry<QName, Object> key = nodeKey.getKeyValues().entrySet().iterator().next();
-                if (ENTITY_ID_QNAME.equals(key.getKey())) {
-                    break;
-                }
-            }
-        }
-
-        return YangInstanceIdentifier.create(newPathArgs);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipCandidateRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipCandidateRegistration.java
deleted file mode 100644 (file)
index 4d65c55..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-/**
- * Implementation of EntityOwnershipCandidateRegistration.
- *
- * @author Thomas Pantelis
- */
-class DistributedEntityOwnershipCandidateRegistration extends AbstractObjectRegistration<DOMEntity>
-        implements DOMEntityOwnershipCandidateRegistration {
-    private final DistributedEntityOwnershipService service;
-
-    DistributedEntityOwnershipCandidateRegistration(final DOMEntity entity,
-            final DistributedEntityOwnershipService service) {
-        super(entity);
-        this.service = service;
-    }
-
-    @Override
-    protected void removeRegistration() {
-        service.unregisterCandidate(getInstance());
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipListenerRegistration.java
deleted file mode 100644 (file)
index 149551b..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-/**
- * Implementation of EntityOwnershipListenerRegistration.
- *
- * @author Thomas Pantelis
- */
-class DistributedEntityOwnershipListenerRegistration extends AbstractObjectRegistration<DOMEntityOwnershipListener>
-        implements DOMEntityOwnershipListenerRegistration {
-    private final DistributedEntityOwnershipService service;
-    private final String entityType;
-
-    DistributedEntityOwnershipListenerRegistration(final DOMEntityOwnershipListener listener, final String entityType,
-            final DistributedEntityOwnershipService service) {
-        super(listener);
-        this.entityType = Preconditions.checkNotNull(entityType, "entityType cannot be null");
-        this.service = Preconditions.checkNotNull(service, "DOMEntityOwnershipListener cannot be null");
-    }
-
-    @Override
-    protected void removeRegistration() {
-        service.unregisterListener(getEntityType(), getInstance());
-    }
-
-    @Override
-    public String getEntityType() {
-        return entityType;
-    }
-
-    @Override
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("entityType", entityType);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipService.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipService.java
deleted file mode 100644 (file)
index ab4d9b2..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import akka.actor.ActorRef;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.Duration;
-
-/**
- * The distributed implementation of the EntityOwnershipService.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipService implements DOMEntityOwnershipService, AutoCloseable {
-    @VisibleForTesting
-    static final String ENTITY_OWNERSHIP_SHARD_NAME = "entity-ownership";
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedEntityOwnershipService.class);
-    private static final Timeout MESSAGE_TIMEOUT = new Timeout(1, TimeUnit.MINUTES);
-
-    private final ConcurrentMap<DOMEntity, DOMEntity> registeredEntities = new ConcurrentHashMap<>();
-    private final ActorUtils context;
-
-    private volatile ActorRef localEntityOwnershipShard;
-    private volatile DataTree localEntityOwnershipShardDataTree;
-
-    DistributedEntityOwnershipService(final ActorUtils context) {
-        this.context = Preconditions.checkNotNull(context);
-    }
-
-    public static DistributedEntityOwnershipService start(final ActorUtils context,
-            final EntityOwnerSelectionStrategyConfig strategyConfig) {
-        ActorRef shardManagerActor = context.getShardManager();
-
-        Configuration configuration = context.getConfiguration();
-        Collection<MemberName> entityOwnersMemberNames = configuration.getUniqueMemberNamesForAllShards();
-        CreateShard createShard = new CreateShard(new ModuleShardConfiguration(EntityOwners.QNAME.getNamespace(),
-                "entity-owners", ENTITY_OWNERSHIP_SHARD_NAME, ModuleShardStrategy.NAME, entityOwnersMemberNames),
-                        newShardBuilder(context, strategyConfig), null);
-
-        Future<Object> createFuture = context.executeOperationAsync(shardManagerActor,
-                createShard, MESSAGE_TIMEOUT);
-
-        createFuture.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                if (failure != null) {
-                    LOG.error("Failed to create {} shard", ENTITY_OWNERSHIP_SHARD_NAME, failure);
-                } else {
-                    LOG.info("Successfully created {} shard", ENTITY_OWNERSHIP_SHARD_NAME);
-                }
-            }
-        }, context.getClientDispatcher());
-
-        return new DistributedEntityOwnershipService(context);
-    }
-
-    private void executeEntityOwnershipShardOperation(final ActorRef shardActor, final Object message) {
-        Future<Object> future = context.executeOperationAsync(shardActor, message, MESSAGE_TIMEOUT);
-        future.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                if (failure != null) {
-                    LOG.debug("Error sending message {} to {}", message, shardActor, failure);
-                } else {
-                    LOG.debug("{} message to {} succeeded", message, shardActor);
-                }
-            }
-        }, context.getClientDispatcher());
-    }
-
-    @VisibleForTesting
-    void executeLocalEntityOwnershipShardOperation(final Object message) {
-        if (localEntityOwnershipShard == null) {
-            Future<ActorRef> future = context.findLocalShardAsync(ENTITY_OWNERSHIP_SHARD_NAME);
-            future.onComplete(new OnComplete<ActorRef>() {
-                @Override
-                public void onComplete(final Throwable failure, final ActorRef shardActor) {
-                    if (failure != null) {
-                        LOG.error("Failed to find local {} shard", ENTITY_OWNERSHIP_SHARD_NAME, failure);
-                    } else {
-                        localEntityOwnershipShard = shardActor;
-                        executeEntityOwnershipShardOperation(localEntityOwnershipShard, message);
-                    }
-                }
-            }, context.getClientDispatcher());
-
-        } else {
-            executeEntityOwnershipShardOperation(localEntityOwnershipShard, message);
-        }
-    }
-
-    @Override
-    public DOMEntityOwnershipCandidateRegistration registerCandidate(final DOMEntity entity)
-            throws CandidateAlreadyRegisteredException {
-        Preconditions.checkNotNull(entity, "entity cannot be null");
-
-        if (registeredEntities.putIfAbsent(entity, entity) != null) {
-            throw new CandidateAlreadyRegisteredException(entity);
-        }
-
-        RegisterCandidateLocal registerCandidate = new RegisterCandidateLocal(entity);
-
-        LOG.debug("Registering candidate with message: {}", registerCandidate);
-
-        executeLocalEntityOwnershipShardOperation(registerCandidate);
-        return new DistributedEntityOwnershipCandidateRegistration(entity, this);
-    }
-
-    void unregisterCandidate(final DOMEntity entity) {
-        LOG.debug("Unregistering candidate for {}", entity);
-
-        executeLocalEntityOwnershipShardOperation(new UnregisterCandidateLocal(entity));
-        registeredEntities.remove(entity);
-    }
-
-    @Override
-    public DOMEntityOwnershipListenerRegistration registerListener(final String entityType,
-            final DOMEntityOwnershipListener listener) {
-        Preconditions.checkNotNull(entityType, "entityType cannot be null");
-        Preconditions.checkNotNull(listener, "listener cannot be null");
-
-        RegisterListenerLocal registerListener = new RegisterListenerLocal(listener, entityType);
-
-        LOG.debug("Registering listener with message: {}", registerListener);
-
-        executeLocalEntityOwnershipShardOperation(registerListener);
-        return new DistributedEntityOwnershipListenerRegistration(listener, entityType, this);
-    }
-
-    @Override
-    public Optional<EntityOwnershipState> getOwnershipState(final DOMEntity forEntity) {
-        Preconditions.checkNotNull(forEntity, "forEntity cannot be null");
-
-        DataTree dataTree = getLocalEntityOwnershipShardDataTree();
-        if (dataTree == null) {
-            return Optional.empty();
-        }
-
-        Optional<NormalizedNode<?, ?>> entityNode = dataTree.takeSnapshot().readNode(
-                entityPath(forEntity.getType(), forEntity.getIdentifier()));
-        if (!entityNode.isPresent()) {
-            return Optional.empty();
-        }
-
-        // Check if there are any candidates, if there are none we do not really have ownership state
-        final MapEntryNode entity = (MapEntryNode) entityNode.get();
-        final Optional<DataContainerChild<? extends PathArgument, ?>> optionalCandidates =
-                entity.getChild(CANDIDATE_NODE_ID);
-        final boolean hasCandidates = optionalCandidates.isPresent()
-                && ((MapNode) optionalCandidates.get()).getValue().size() > 0;
-        if (!hasCandidates) {
-            return Optional.empty();
-        }
-
-        MemberName localMemberName = context.getCurrentMemberName();
-        Optional<DataContainerChild<? extends PathArgument, ?>> ownerLeaf = entity.getChild(ENTITY_OWNER_NODE_ID);
-        String owner = ownerLeaf.isPresent() ? ownerLeaf.get().getValue().toString() : null;
-        boolean hasOwner = !Strings.isNullOrEmpty(owner);
-        boolean isOwner = hasOwner && localMemberName.getName().equals(owner);
-
-        return Optional.of(EntityOwnershipState.from(isOwner, hasOwner));
-    }
-
-    @Override
-    public boolean isCandidateRegistered(@Nonnull final DOMEntity entity) {
-        return registeredEntities.get(entity) != null;
-    }
-
-    @VisibleForTesting
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    DataTree getLocalEntityOwnershipShardDataTree() {
-        if (localEntityOwnershipShardDataTree == null) {
-            try {
-                if (localEntityOwnershipShard == null) {
-                    localEntityOwnershipShard = Await.result(context.findLocalShardAsync(
-                            ENTITY_OWNERSHIP_SHARD_NAME), Duration.Inf());
-                }
-
-                localEntityOwnershipShardDataTree = (DataTree) Await.result(Patterns.ask(localEntityOwnershipShard,
-                        GetShardDataTree.INSTANCE, MESSAGE_TIMEOUT), Duration.Inf());
-            } catch (Exception e) {
-                LOG.error("Failed to find local {} shard", ENTITY_OWNERSHIP_SHARD_NAME, e);
-            }
-        }
-
-        return localEntityOwnershipShardDataTree;
-    }
-
-    void unregisterListener(final String entityType, final DOMEntityOwnershipListener listener) {
-        LOG.debug("Unregistering listener {} for entity type {}", listener, entityType);
-
-        executeLocalEntityOwnershipShardOperation(new UnregisterListenerLocal(listener, entityType));
-    }
-
-    @Override
-    public void close() {
-    }
-
-    private static EntityOwnershipShard.Builder newShardBuilder(final ActorUtils context,
-            final EntityOwnerSelectionStrategyConfig strategyConfig) {
-        return EntityOwnershipShard.newBuilder().localMemberName(context.getCurrentMemberName())
-                .ownerSelectionStrategyConfig(strategyConfig);
-    }
-
-    @VisibleForTesting
-    ActorRef getLocalEntityOwnershipShard() {
-        return localEntityOwnershipShard;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListener.java
deleted file mode 100644 (file)
index 0201745..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.createEntity;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.base.Verify;
-import java.util.Collection;
-import java.util.Objects;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens for entity owner changes and notifies the EntityOwnershipListenerSupport appropriately.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnerChangeListener extends AbstractEntityOwnerChangeListener {
-    private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerChangeListener.class);
-
-    private final String localMemberName;
-    private final EntityOwnershipChangePublisher publisher;
-
-    EntityOwnerChangeListener(final MemberName localMemberName, final EntityOwnershipChangePublisher publisher) {
-        this.localMemberName = Verify.verifyNotNull(localMemberName.getName());
-        this.publisher = Preconditions.checkNotNull(publisher);
-    }
-
-    @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
-        for (DataTreeCandidate change: changes) {
-            DataTreeCandidateNode changeRoot = change.getRootNode();
-            LeafNode<?> ownerLeaf = (LeafNode<?>) changeRoot.getDataAfter().get();
-
-            LOG.debug("{}: Entity node changed: {}, {}", logId(), changeRoot.getModificationType(),
-                    change.getRootPath());
-
-            String newOwner = extractOwner(ownerLeaf);
-
-            String origOwner = null;
-            Optional<NormalizedNode<?, ?>> dataBefore = changeRoot.getDataBefore();
-            if (dataBefore.isPresent()) {
-                origOwner = extractOwner((LeafNode<?>) changeRoot.getDataBefore().get());
-            }
-
-            LOG.debug("{}: New owner: {}, Original owner: {}", logId(), newOwner, origOwner);
-
-            if (!Objects.equals(origOwner, newOwner)) {
-                boolean isOwner = localMemberName.equals(newOwner);
-                boolean wasOwner = localMemberName.equals(origOwner);
-                boolean hasOwner = !Strings.isNullOrEmpty(newOwner);
-
-                DOMEntity entity = createEntity(change.getRootPath());
-
-                LOG.debug(
-                    "{}: Calling notifyEntityOwnershipListeners: entity: {}, wasOwner: {}, isOwner: {}, hasOwner: {}",
-                    logId(), entity, wasOwner, isOwner, hasOwner);
-
-                publisher.notifyEntityOwnershipListeners(entity, wasOwner, isOwner, hasOwner);
-            }
-        }
-    }
-
-    private String logId() {
-        return publisher.getLogId();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnersModel.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnersModel.java
deleted file mode 100644 (file)
index 6451833..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import java.util.Map.Entry;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.Entity;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.util.SharedSingletonMapTemplate;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableOrderedMapNodeBuilder;
-
-/**
- * Utility methods for entity-owners yang model.
- *
- * @author Thomas Pantelis
- */
-public final class EntityOwnersModel {
-    static final QName ENTITY_QNAME = Entity.QNAME;
-    static final QName CANDIDATE_NAME_QNAME = QName.create(Candidate.QNAME, "name");
-    static final QName ENTITY_ID_QNAME = QName.create(ENTITY_QNAME, "id");
-    static final QName ENTITY_OWNER_QNAME = QName.create(ENTITY_QNAME, "owner");
-    static final QName ENTITY_TYPE_QNAME = QName.create(EntityType.QNAME, "type");
-
-    static final NodeIdentifier ENTITY_OWNERS_NODE_ID = new NodeIdentifier(EntityOwners.QNAME);
-    static final NodeIdentifier ENTITY_OWNER_NODE_ID = new NodeIdentifier(ENTITY_OWNER_QNAME);
-    static final NodeIdentifier ENTITY_NODE_ID = new NodeIdentifier(ENTITY_QNAME);
-    static final NodeIdentifier ENTITY_ID_NODE_ID = new NodeIdentifier(ENTITY_ID_QNAME);
-    static final NodeIdentifier ENTITY_TYPE_NODE_ID = new NodeIdentifier(ENTITY_TYPE_QNAME);
-    static final NodeIdentifier CANDIDATE_NODE_ID = new NodeIdentifier(Candidate.QNAME);
-    static final NodeIdentifier CANDIDATE_NAME_NODE_ID = new NodeIdentifier(CANDIDATE_NAME_QNAME);
-    static final YangInstanceIdentifier ENTITY_OWNERS_PATH = YangInstanceIdentifier.of(EntityOwners.QNAME);
-    static final YangInstanceIdentifier ENTITY_TYPES_PATH =
-            YangInstanceIdentifier.of(EntityOwners.QNAME).node(EntityType.QNAME);
-
-    private static final SharedSingletonMapTemplate<QName> NODE_KEY_TEMPLATE = SharedSingletonMapTemplate.ordered(
-        CANDIDATE_NAME_QNAME);
-
-    private EntityOwnersModel() {
-    }
-
-    static YangInstanceIdentifier entityPath(final String entityType, final YangInstanceIdentifier entityId) {
-        return YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH).node(EntityType.QNAME)
-                .nodeWithKey(EntityType.QNAME, ENTITY_TYPE_QNAME, entityType).node(ENTITY_QNAME)
-                        .nodeWithKey(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).build();
-
-    }
-
-    static YangInstanceIdentifier candidatePath(final String entityType, final YangInstanceIdentifier entityId,
-            final String candidateName) {
-        return YangInstanceIdentifier.builder(ENTITY_OWNERS_PATH).node(EntityType.QNAME)
-                .nodeWithKey(EntityType.QNAME, ENTITY_TYPE_QNAME, entityType).node(ENTITY_QNAME)
-                        .nodeWithKey(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).node(Candidate.QNAME)
-                                .nodeWithKey(Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName).build();
-    }
-
-    static YangInstanceIdentifier candidatePath(final YangInstanceIdentifier entityPath, final String candidateName) {
-        return YangInstanceIdentifier.builder(entityPath).node(Candidate.QNAME).nodeWithKey(
-                Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName).build();
-    }
-
-    static NodeIdentifierWithPredicates candidateNodeKey(final String candidateName) {
-        return new NodeIdentifierWithPredicates(Candidate.QNAME, NODE_KEY_TEMPLATE.instantiateWithValue(candidateName));
-    }
-
-    static NormalizedNode<?, ?> entityOwnersWithCandidate(final String entityType,
-            final YangInstanceIdentifier entityId, final String candidateName) {
-        return entityOwnersWithEntityTypeEntry(entityTypeEntryWithEntityEntry(entityType,
-                entityEntryWithCandidateEntry(entityId, candidateName)));
-    }
-
-    static ContainerNode entityOwnersWithEntityTypeEntry(final MapEntryNode entityTypeNode) {
-        return ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                ENTITY_OWNERS_NODE_ID).addChild(ImmutableNodes.mapNodeBuilder(EntityType.QNAME)
-                        .addChild(entityTypeNode).build()).build();
-    }
-
-    static MapEntryNode entityTypeEntryWithEntityEntry(final String entityType, final MapEntryNode entityNode) {
-        return ImmutableNodes.mapEntryBuilder(EntityType.QNAME,
-                ENTITY_TYPE_QNAME, entityType).addChild(ImmutableNodes.mapNodeBuilder(
-                        ENTITY_QNAME).addChild(entityNode).build()).build();
-    }
-
-    static MapEntryNode entityEntryWithCandidateEntry(final YangInstanceIdentifier entityId,
-            final String candidateName) {
-        return ImmutableNodes.mapEntryBuilder(ENTITY_QNAME, ENTITY_ID_QNAME, entityId).addChild(
-                candidateEntry(candidateName)).build();
-    }
-
-    static MapNode candidateEntry(final String candidateName) {
-        return ImmutableOrderedMapNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(Candidate.QNAME))
-                .addChild(candidateMapEntry(candidateName)).build();
-    }
-
-    static MapEntryNode candidateMapEntry(final String candidateName) {
-        return ImmutableNodes.mapEntry(Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName);
-    }
-
-    static MapEntryNode entityEntryWithOwner(final YangInstanceIdentifier entityId, final String owner) {
-        return ImmutableNodes.mapEntryBuilder(ENTITY_QNAME, ENTITY_ID_QNAME, entityId)
-                .addChild(ImmutableNodes.leafNode(ENTITY_OWNER_QNAME, owner != null ? owner : ""))
-                .build();
-    }
-
-    public static String entityTypeFromEntityPath(final YangInstanceIdentifier entityPath) {
-        YangInstanceIdentifier parent = entityPath;
-        while (!parent.isEmpty()) {
-            if (EntityType.QNAME.equals(parent.getLastPathArgument().getNodeType())) {
-                YangInstanceIdentifier.NodeIdentifierWithPredicates entityTypeLastPathArgument =
-                        (YangInstanceIdentifier.NodeIdentifierWithPredicates) parent.getLastPathArgument();
-                return (String) entityTypeLastPathArgument.getKeyValues().get(ENTITY_TYPE_QNAME);
-            }
-            parent = parent.getParent();
-        }
-        return null;
-    }
-
-    static DOMEntity createEntity(final YangInstanceIdentifier entityPath) {
-        String entityType = null;
-        YangInstanceIdentifier entityId = null;
-        for (PathArgument pathArg: entityPath.getPathArguments()) {
-            if (pathArg instanceof NodeIdentifierWithPredicates) {
-                NodeIdentifierWithPredicates nodeKey = (NodeIdentifierWithPredicates) pathArg;
-                Entry<QName, Object> key = nodeKey.getKeyValues().entrySet().iterator().next();
-                if (ENTITY_TYPE_QNAME.equals(key.getKey())) {
-                    entityType = key.getValue().toString();
-                } else if (ENTITY_ID_QNAME.equals(key.getKey())) {
-                    entityId = (YangInstanceIdentifier) key.getValue();
-                }
-            }
-        }
-
-        return new DOMEntity(entityType, entityId);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipChangePublisher.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipChangePublisher.java
deleted file mode 100644 (file)
index eb1e188..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2017 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Abstract base for notifying EntityOwnershipListeners.
- *
- * @author Thomas Pantelis
- */
-abstract class EntityOwnershipChangePublisher {
-    abstract void notifyEntityOwnershipListeners(DOMEntity entity, boolean wasOwner, boolean isOwner, boolean hasOwner);
-
-    abstract String getLogId();
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActor.java
deleted file mode 100644 (file)
index 42779b0..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import akka.actor.Props;
-import akka.japi.Creator;
-import com.google.common.base.Preconditions;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * An actor which is responsible for notifying an EntityOwnershipListener of changes.
- *
- * @author Thomas Pantelis
- */
-final class EntityOwnershipListenerActor extends AbstractUntypedActor {
-    private final DOMEntityOwnershipListener listener;
-
-    private EntityOwnershipListenerActor(final DOMEntityOwnershipListener listener) {
-        this.listener = listener;
-    }
-
-    @Override
-    protected void handleReceive(final Object message) {
-        if (message instanceof DOMEntityOwnershipChange) {
-            onEntityOwnershipChanged((DOMEntityOwnershipChange)message);
-        } else {
-            unknownMessage(message);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void onEntityOwnershipChanged(final DOMEntityOwnershipChange change) {
-        LOG.debug("Notifying EntityOwnershipListener {}: {}", listener, change);
-
-        try {
-            listener.ownershipChanged(change);
-        } catch (Exception e) {
-            LOG.error("Error notifying listener {}", listener, e);
-        }
-    }
-
-    static Props props(final DOMEntityOwnershipListener listener) {
-        return Props.create(new EntityOwnershipListenerCreator(listener));
-    }
-
-    private static final class EntityOwnershipListenerCreator implements Creator<EntityOwnershipListenerActor> {
-        private static final long serialVersionUID = 1L;
-
-        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
-                + "create remote instances of this actor and thus don't need it to be Serializable.")
-        private final DOMEntityOwnershipListener listener;
-
-        EntityOwnershipListenerCreator(final DOMEntityOwnershipListener listener) {
-            this.listener = Preconditions.checkNotNull(listener);
-        }
-
-        @Override
-        public EntityOwnershipListenerActor create() {
-            return new EntityOwnershipListenerActor(listener);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupport.java
deleted file mode 100644 (file)
index ee786e7..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import akka.actor.ActorContext;
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Multimap;
-import java.util.Collection;
-import java.util.IdentityHashMap;
-import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-import javax.annotation.concurrent.GuardedBy;
-import javax.annotation.concurrent.ThreadSafe;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Manages EntityOwnershipListener registrations and notifications for the EntityOwnershipShard.
- *
- * @author Thomas Pantelis
- */
-@ThreadSafe
-class EntityOwnershipListenerSupport extends EntityOwnershipChangePublisher {
-    private static final Logger LOG = LoggerFactory.getLogger(EntityOwnershipListenerSupport.class);
-
-    private final String logId;
-    private final ActorContext actorContext;
-    private final ReadWriteLock listenerLock = new ReentrantReadWriteLock();
-
-    @GuardedBy("listenerLock")
-    private final Map<DOMEntityOwnershipListener, ListenerActorRefEntry> listenerActorMap = new IdentityHashMap<>();
-
-    @GuardedBy("listenerLock")
-    private final Multimap<String, DOMEntityOwnershipListener> entityTypeListenerMap = HashMultimap.create();
-
-    private volatile boolean inJeopardy = false;
-
-    EntityOwnershipListenerSupport(final ActorContext actorContext, final String logId) {
-        this.actorContext = actorContext;
-        this.logId = logId;
-    }
-
-    @Override
-    String getLogId() {
-        return logId;
-    }
-
-    /**
-     * Set the in-jeopardy flag and indicate its previous state.
-     *
-     * @param inJeopardy new value of the in-jeopardy flag
-     * @return Previous value of the flag.
-     */
-    @SuppressWarnings("checkstyle:hiddenField")
-    boolean setInJeopardy(final boolean inJeopardy) {
-        final boolean wasInJeopardy = this.inJeopardy;
-        this.inJeopardy = inJeopardy;
-        return wasInJeopardy;
-    }
-
-    void addEntityOwnershipListener(final String entityType, final DOMEntityOwnershipListener listener) {
-        LOG.debug("{}: Adding EntityOwnershipListener {} for entity type {}", logId, listener, entityType);
-
-        listenerLock.writeLock().lock();
-        try {
-            if (entityTypeListenerMap.put(entityType, listener)) {
-                ListenerActorRefEntry listenerEntry = listenerActorMap.get(listener);
-                if (listenerEntry == null) {
-                    listenerActorMap.put(listener, new ListenerActorRefEntry(listener));
-                } else {
-                    listenerEntry.referenceCount++;
-                }
-            }
-        } finally {
-            listenerLock.writeLock().unlock();
-        }
-    }
-
-    void removeEntityOwnershipListener(final String entityType, final DOMEntityOwnershipListener listener) {
-        LOG.debug("{}: Removing EntityOwnershipListener {} for entity type {}", logId, listener, entityType);
-
-        listenerLock.writeLock().lock();
-        try {
-            if (entityTypeListenerMap.remove(entityType, listener)) {
-                ListenerActorRefEntry listenerEntry = listenerActorMap.get(listener);
-
-                LOG.debug("{}: Found {}", logId, listenerEntry);
-
-                listenerEntry.referenceCount--;
-                if (listenerEntry.referenceCount <= 0) {
-                    listenerActorMap.remove(listener);
-
-                    if (listenerEntry.actorRef != null) {
-                        LOG.debug("Killing EntityOwnershipListenerActor {}", listenerEntry.actorRef);
-                        listenerEntry.actorRef.tell(PoisonPill.getInstance(), ActorRef.noSender());
-                    }
-                }
-            }
-        } finally {
-            listenerLock.writeLock().unlock();
-        }
-    }
-
-    @Override
-    void notifyEntityOwnershipListeners(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
-            final boolean hasOwner) {
-        listenerLock.readLock().lock();
-        try {
-            Collection<DOMEntityOwnershipListener> listeners = entityTypeListenerMap.get(entity.getType());
-            if (!listeners.isEmpty()) {
-                notifyListeners(entity, wasOwner, isOwner, hasOwner,
-                        listeners.stream().map(listenerActorMap::get).collect(Collectors.toList()));
-            }
-        } finally {
-            listenerLock.readLock().unlock();
-        }
-    }
-
-    void notifyEntityOwnershipListener(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
-            final boolean hasOwner, final DOMEntityOwnershipListener listener) {
-        listenerLock.readLock().lock();
-        try {
-            notifyListeners(entity, wasOwner, isOwner, hasOwner, ImmutableList.of(listenerActorMap.get(listener)));
-        } finally {
-            listenerLock.readLock().unlock();
-        }
-    }
-
-    @GuardedBy("listenerLock")
-    private void notifyListeners(final DOMEntity entity, final boolean wasOwner, final boolean isOwner,
-            final boolean hasOwner, final Collection<ListenerActorRefEntry> listenerEntries) {
-        DOMEntityOwnershipChange changed = new DOMEntityOwnershipChange(entity,
-                EntityOwnershipChangeState.from(wasOwner, isOwner, hasOwner), inJeopardy);
-        for (ListenerActorRefEntry entry: listenerEntries) {
-            ActorRef listenerActor = entry.actorFor();
-
-            LOG.debug("{}: Notifying EntityOwnershipListenerActor {} with {}", logId, listenerActor, changed);
-
-            listenerActor.tell(changed, ActorRef.noSender());
-        }
-    }
-
-    private class ListenerActorRefEntry {
-        final DOMEntityOwnershipListener listener;
-
-        @GuardedBy("listenerLock")
-        ActorRef actorRef;
-
-        @GuardedBy("listenerLock")
-        int referenceCount = 1;
-
-        ListenerActorRefEntry(final DOMEntityOwnershipListener listener) {
-            this.listener = listener;
-        }
-
-        ActorRef actorFor() {
-            if (actorRef == null) {
-                actorRef = actorContext.actorOf(EntityOwnershipListenerActor.props(listener));
-
-                LOG.debug("{}: Created EntityOwnershipListenerActor {} for listener {}", logId, actorRef, listener);
-            }
-
-            return actorRef;
-        }
-
-        @Override
-        public String toString() {
-            return "ListenerActorRefEntry [actorRef=" + actorRef + ", referenceCount=" + referenceCount + "]";
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShard.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShard.java
deleted file mode 100644 (file)
index bb00f24..0000000
+++ /dev/null
@@ -1,715 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NAME_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_ID_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_TYPES_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_TYPE_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_TYPE_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.candidateNodeKey;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.Cancellable;
-import akka.cluster.Cluster;
-import akka.cluster.ClusterEvent.CurrentClusterState;
-import akka.cluster.Member;
-import akka.cluster.MemberStatus;
-import akka.pattern.Patterns;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateRemoved;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RemoveAllCandidates;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.SelectOwner;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategy;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
-import org.opendaylight.controller.cluster.datastore.messages.SuccessReply;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.VotingState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Special Shard for EntityOwnership.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnershipShard extends Shard {
-    private final MemberName localMemberName;
-    private final EntityOwnershipShardCommitCoordinator commitCoordinator;
-    private final EntityOwnershipListenerSupport listenerSupport;
-    private final Set<MemberName> downPeerMemberNames = new HashSet<>();
-    private final EntityOwnerSelectionStrategyConfig strategyConfig;
-    private final Map<YangInstanceIdentifier, Cancellable> entityToScheduledOwnershipTask = new HashMap<>();
-    private final EntityOwnershipStatistics entityOwnershipStatistics;
-    private boolean removeAllInitialCandidates = true;
-
-    protected EntityOwnershipShard(final Builder builder) {
-        super(builder);
-        this.localMemberName = builder.localMemberName;
-        this.commitCoordinator = new EntityOwnershipShardCommitCoordinator(builder.localMemberName, LOG);
-        this.listenerSupport = new EntityOwnershipListenerSupport(getContext(), persistenceId());
-        this.strategyConfig = builder.ownerSelectionStrategyConfig;
-        this.entityOwnershipStatistics = new EntityOwnershipStatistics();
-        this.entityOwnershipStatistics.init(getDataStore());
-    }
-
-    private static DatastoreContext noPersistenceDatastoreContext(final DatastoreContext datastoreContext) {
-        return DatastoreContext.newBuilderFrom(datastoreContext).persistent(false).build();
-    }
-
-    @Override
-    protected void onDatastoreContext(final DatastoreContext context) {
-        super.onDatastoreContext(noPersistenceDatastoreContext(context));
-    }
-
-    @Override
-    protected void onRecoveryComplete() {
-        super.onRecoveryComplete();
-
-        new CandidateListChangeListener(getSelf(), persistenceId()).init(getDataStore());
-        new EntityOwnerChangeListener(localMemberName, listenerSupport).init(getDataStore());
-    }
-
-    @Override
-    public void handleNonRaftCommand(final Object message) {
-        if (message instanceof RegisterCandidateLocal) {
-            onRegisterCandidateLocal((RegisterCandidateLocal) message);
-        } else if (message instanceof UnregisterCandidateLocal) {
-            onUnregisterCandidateLocal((UnregisterCandidateLocal) message);
-        } else if (message instanceof CandidateAdded) {
-            onCandidateAdded((CandidateAdded) message);
-        } else if (message instanceof CandidateRemoved) {
-            onCandidateRemoved((CandidateRemoved) message);
-        } else if (message instanceof PeerDown) {
-            onPeerDown((PeerDown) message);
-        } else if (message instanceof PeerUp) {
-            onPeerUp((PeerUp) message);
-        } else if (message instanceof RegisterListenerLocal) {
-            onRegisterListenerLocal((RegisterListenerLocal) message);
-        } else if (message instanceof UnregisterListenerLocal) {
-            onUnregisterListenerLocal((UnregisterListenerLocal) message);
-        } else if (message instanceof SelectOwner) {
-            onSelectOwner((SelectOwner) message);
-        } else if (message instanceof RemoveAllCandidates) {
-            onRemoveAllCandidates((RemoveAllCandidates) message);
-        } else if (!commitCoordinator.handleMessage(message, this)) {
-            super.handleNonRaftCommand(message);
-        }
-    }
-
-    private void onRemoveAllCandidates(final RemoveAllCandidates message) {
-        LOG.debug("{}: onRemoveAllCandidates: {}", persistenceId(), message);
-
-        removeCandidateFromEntities(message.getMemberName());
-    }
-
-    private void onSelectOwner(final SelectOwner selectOwner) {
-        LOG.debug("{}: onSelectOwner: {}", persistenceId(), selectOwner);
-
-        String currentOwner = getCurrentOwner(selectOwner.getEntityPath());
-        if (Strings.isNullOrEmpty(currentOwner)) {
-            writeNewOwner(selectOwner.getEntityPath(), newOwner(currentOwner, selectOwner.getAllCandidates(),
-                    selectOwner.getOwnerSelectionStrategy()));
-
-            Cancellable cancellable = entityToScheduledOwnershipTask.get(selectOwner.getEntityPath());
-            if (cancellable != null) {
-                if (!cancellable.isCancelled()) {
-                    cancellable.cancel();
-                }
-                entityToScheduledOwnershipTask.remove(selectOwner.getEntityPath());
-            }
-        }
-    }
-
-    private void onRegisterCandidateLocal(final RegisterCandidateLocal registerCandidate) {
-        LOG.debug("{}: onRegisterCandidateLocal: {}", persistenceId(), registerCandidate);
-
-        NormalizedNode<?, ?> entityOwners = entityOwnersWithCandidate(registerCandidate.getEntity().getType(),
-                registerCandidate.getEntity().getIdentifier(), localMemberName.getName());
-        commitCoordinator.commitModification(new MergeModification(ENTITY_OWNERS_PATH, entityOwners), this);
-
-        getSender().tell(SuccessReply.INSTANCE, getSelf());
-    }
-
-    private void onUnregisterCandidateLocal(final UnregisterCandidateLocal unregisterCandidate) {
-        LOG.debug("{}: onUnregisterCandidateLocal: {}", persistenceId(), unregisterCandidate);
-
-        DOMEntity entity = unregisterCandidate.getEntity();
-        YangInstanceIdentifier candidatePath = candidatePath(entity.getType(), entity.getIdentifier(),
-                localMemberName.getName());
-        commitCoordinator.commitModification(new DeleteModification(candidatePath), this);
-
-        getSender().tell(SuccessReply.INSTANCE, getSelf());
-    }
-
-    private void onRegisterListenerLocal(final RegisterListenerLocal registerListener) {
-        LOG.debug("{}: onRegisterListenerLocal: {}", persistenceId(), registerListener);
-
-        listenerSupport.addEntityOwnershipListener(registerListener.getEntityType(), registerListener.getListener());
-
-        getSender().tell(SuccessReply.INSTANCE, getSelf());
-
-        searchForEntities((entityTypeNode, entityNode) -> {
-            java.util.Optional<DataContainerChild<?, ?>> possibleType = entityTypeNode.getChild(ENTITY_TYPE_NODE_ID);
-            String entityType = possibleType.isPresent() ? possibleType.get().getValue().toString() : null;
-            if (registerListener.getEntityType().equals(entityType)) {
-                final boolean hasOwner;
-                final boolean isOwner;
-
-                java.util.Optional<DataContainerChild<?, ?>> possibleOwner = entityNode.getChild(ENTITY_OWNER_NODE_ID);
-                if (possibleOwner.isPresent()) {
-                    isOwner = localMemberName.getName().equals(possibleOwner.get().getValue().toString());
-                    hasOwner = true;
-                } else {
-                    isOwner = false;
-                    hasOwner = false;
-                }
-
-                DOMEntity entity = new DOMEntity(entityType,
-                    (YangInstanceIdentifier) entityNode.getChild(ENTITY_ID_NODE_ID).get().getValue());
-
-                listenerSupport.notifyEntityOwnershipListener(entity, false, isOwner, hasOwner,
-                    registerListener.getListener());
-            }
-        });
-    }
-
-    private void onUnregisterListenerLocal(final UnregisterListenerLocal unregisterListener) {
-        LOG.debug("{}: onUnregisterListenerLocal: {}", persistenceId(), unregisterListener);
-
-        listenerSupport.removeEntityOwnershipListener(unregisterListener.getEntityType(),
-                unregisterListener.getListener());
-
-        getSender().tell(SuccessReply.INSTANCE, getSelf());
-    }
-
-    void tryCommitModifications(final BatchedModifications modifications) {
-        if (isLeader()) {
-            LOG.debug("{}: Committing BatchedModifications {} locally", persistenceId(),
-                    modifications.getTransactionId());
-
-            // Note that it's possible the commit won't get consensus and will timeout and not be applied
-            // to the state. However we don't need to retry it in that case b/c it will be committed to
-            // the journal first and, once a majority of followers come back on line and it is replicated,
-            // it will be applied at that point.
-            handleBatchedModificationsLocal(modifications, self());
-        } else {
-            final ActorSelection leader = getLeader();
-            if (leader != null) {
-                possiblyRemoveAllInitialCandidates(leader);
-
-                LOG.debug("{}: Sending BatchedModifications {} to leader {}", persistenceId(),
-                        modifications.getTransactionId(), leader);
-
-                Future<Object> future = Patterns.ask(leader, modifications, TimeUnit.SECONDS.toMillis(
-                        getDatastoreContext().getShardTransactionCommitTimeoutInSeconds()));
-
-                Patterns.pipe(future, getContext().dispatcher()).pipeTo(getSelf(), ActorRef.noSender());
-            }
-        }
-    }
-
-    void possiblyRemoveAllInitialCandidates(final ActorSelection leader) {
-        // The following handles removing all candidates on startup when re-joining with a remote leader. When a
-        // follower is detected as down, the leader will re-assign new owners to entities that were owned by the
-        // down member but doesn't remove the down member as a candidate, as the down node may actually be isolated
-        // and still running. Therefore on startup we send an initial message to the remote leader to remove any
-        // potential stale candidates we had previously registered, as it's possible a candidate may not be
-        // registered by a client in the new incarnation. We have to send the RemoveAllCandidates message prior to any
-        // pending registrations.
-        if (removeAllInitialCandidates && leader != null) {
-            removeAllInitialCandidates = false;
-            if (!isLeader()) {
-                LOG.debug("{} - got new leader {} on startup - sending RemoveAllCandidates", persistenceId(), leader);
-
-                leader.tell(new RemoveAllCandidates(localMemberName), ActorRef.noSender());
-            }
-        }
-    }
-
-    boolean hasLeader() {
-        return getLeader() != null && (!isLeader() || isLeaderActive());
-    }
-
-    /**
-     * Determine if we are in jeopardy based on observed RAFT state.
-     */
-    private static boolean inJeopardy(final RaftState state) {
-        switch (state) {
-            case Candidate:
-            case Follower:
-            case Leader:
-            case PreLeader:
-                return false;
-            case IsolatedLeader:
-                return true;
-            default:
-                throw new IllegalStateException("Unsupported RAFT state " + state);
-        }
-    }
-
-    private void notifyAllListeners() {
-        searchForEntities((entityTypeNode, entityNode) -> {
-            java.util.Optional<DataContainerChild<?, ?>> possibleType = entityTypeNode.getChild(ENTITY_TYPE_NODE_ID);
-            if (possibleType.isPresent()) {
-                final boolean hasOwner;
-                final boolean isOwner;
-
-                java.util.Optional<DataContainerChild<?, ?>> possibleOwner = entityNode.getChild(ENTITY_OWNER_NODE_ID);
-                if (possibleOwner.isPresent()) {
-                    isOwner = localMemberName.getName().equals(possibleOwner.get().getValue().toString());
-                    hasOwner = true;
-                } else {
-                    isOwner = false;
-                    hasOwner = false;
-                }
-
-                DOMEntity entity = new DOMEntity(possibleType.get().getValue().toString(),
-                    (YangInstanceIdentifier) entityNode.getChild(ENTITY_ID_NODE_ID).get().getValue());
-
-                listenerSupport.notifyEntityOwnershipListeners(entity, isOwner, isOwner, hasOwner);
-            }
-        });
-    }
-
-    @Override
-    protected void onStateChanged() {
-        boolean isLeader = isLeader();
-        LOG.debug("{}: onStateChanged: isLeader: {}, hasLeader: {}", persistenceId(), isLeader, hasLeader());
-
-        // Examine current RAFT state to see if we are in jeopardy, potentially notifying all listeners
-        final boolean inJeopardy = inJeopardy(getRaftState());
-        final boolean wasInJeopardy = listenerSupport.setInJeopardy(inJeopardy);
-        if (inJeopardy != wasInJeopardy) {
-            LOG.debug("{}: {} jeopardy state, notifying all listeners", persistenceId(),
-                inJeopardy ? "entered" : "left");
-            notifyAllListeners();
-        }
-
-        commitCoordinator.onStateChanged(this, isLeader);
-
-        super.onStateChanged();
-    }
-
-    @Override
-    protected void onLeaderChanged(final String oldLeader, final String newLeader) {
-        boolean isLeader = isLeader();
-        LOG.debug("{}: onLeaderChanged: oldLeader: {}, newLeader: {}, isLeader: {}", persistenceId(), oldLeader,
-                newLeader, isLeader);
-
-        if (isLeader) {
-
-            // Re-initialize the downPeerMemberNames from the current akka Cluster state. The previous leader, if any,
-            // is most likely down however it's possible we haven't received the PeerDown message yet.
-            initializeDownPeerMemberNamesFromClusterState();
-
-            // Clear all existing strategies so that they get re-created when we call createStrategy again
-            // This allows the strategies to be re-initialized with existing statistics maintained by
-            // EntityOwnershipStatistics
-            strategyConfig.clearStrategies();
-
-            // Re-assign owners for all members that are known to be down. In a cluster which has greater than
-            // 3 nodes it is possible for some node beside the leader being down when the leadership transitions
-            // it makes sense to use this event to re-assign owners for those downed nodes.
-            Set<String> ownedBy = new HashSet<>(downPeerMemberNames.size() + 1);
-            for (MemberName downPeerName : downPeerMemberNames) {
-                ownedBy.add(downPeerName.getName());
-            }
-
-            // Also try to assign owners for entities that have no current owner. See explanation in onPeerUp.
-            ownedBy.add("");
-            selectNewOwnerForEntitiesOwnedBy(ownedBy);
-        } else {
-            // The leader changed - notify the coordinator to check if pending modifications need to be sent.
-            // While onStateChanged also does this, this method handles the case where the shard hears from a
-            // leader and stays in the follower state. In that case no behavior state change occurs.
-            commitCoordinator.onStateChanged(this, isLeader);
-        }
-
-        super.onLeaderChanged(oldLeader, newLeader);
-    }
-
-    @Override
-    protected void onVotingStateChangeComplete() {
-        // Re-evaluate ownership for all entities - if a member changed from voting to non-voting it should lose
-        // ownership and vice versa it now is a candidate to become owner.
-        final List<Modification> modifications = new ArrayList<>();
-        searchForEntities((entityTypeNode, entityNode) -> {
-            YangInstanceIdentifier entityPath = YangInstanceIdentifier.builder(ENTITY_TYPES_PATH)
-                    .node(entityTypeNode.getIdentifier()).node(ENTITY_NODE_ID).node(entityNode.getIdentifier())
-                    .node(ENTITY_OWNER_NODE_ID).build();
-
-            java.util.Optional<String> possibleOwner =
-                    entityNode.getChild(ENTITY_OWNER_NODE_ID).map(node -> node.getValue().toString());
-            String newOwner = newOwner(possibleOwner.orElse(null), getCandidateNames(entityNode),
-                    getEntityOwnerElectionStrategy(entityPath));
-
-            if (!newOwner.equals(possibleOwner.orElse(""))) {
-                modifications.add(new WriteModification(entityPath,
-                        ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)));
-            }
-        });
-
-        commitCoordinator.commitModifications(modifications, this);
-    }
-
-    private void initializeDownPeerMemberNamesFromClusterState() {
-        java.util.Optional<Cluster> cluster = getRaftActorContext().getCluster();
-        if (!cluster.isPresent()) {
-            return;
-        }
-
-        CurrentClusterState state = cluster.get().state();
-        Set<Member> unreachable = state.getUnreachable();
-
-        LOG.debug(
-            "{}: initializeDownPeerMemberNamesFromClusterState - current downPeerMemberNames: {}, unreachable: {}",
-            persistenceId(), downPeerMemberNames, unreachable);
-
-        downPeerMemberNames.clear();
-        for (Member m: unreachable) {
-            downPeerMemberNames.add(MemberName.forName(m.getRoles().iterator().next()));
-        }
-
-        for (Member m: state.getMembers()) {
-            if (m.status() != MemberStatus.up() && m.status() != MemberStatus.weaklyUp()) {
-                LOG.debug("{}: Adding down member with status {}", persistenceId(), m.status());
-                downPeerMemberNames.add(MemberName.forName(m.getRoles().iterator().next()));
-            }
-        }
-
-        LOG.debug("{}: new downPeerMemberNames: {}", persistenceId(), downPeerMemberNames);
-    }
-
-    private void onCandidateRemoved(final CandidateRemoved message) {
-        LOG.debug("{}: onCandidateRemoved: {}", persistenceId(), message);
-
-        if (isLeader()) {
-            String currentOwner = getCurrentOwner(message.getEntityPath());
-            writeNewOwner(message.getEntityPath(),
-                    newOwner(currentOwner, message.getRemainingCandidates(),
-                            getEntityOwnerElectionStrategy(message.getEntityPath())));
-        }
-    }
-
-    private EntityOwnerSelectionStrategy getEntityOwnerElectionStrategy(final YangInstanceIdentifier entityPath) {
-        final String entityType = EntityOwnersModel.entityTypeFromEntityPath(entityPath);
-        return strategyConfig.createStrategy(entityType, entityOwnershipStatistics.byEntityType(entityType));
-    }
-
-    private void onCandidateAdded(final CandidateAdded message) {
-        if (!isLeader()) {
-            return;
-        }
-
-        LOG.debug("{}: onCandidateAdded: {}", persistenceId(), message);
-
-        // Since a node's candidate member is only added by the node itself, we can assume the node is up so
-        // remove it from the downPeerMemberNames.
-        downPeerMemberNames.remove(MemberName.forName(message.getNewCandidate()));
-
-        final String currentOwner = getCurrentOwner(message.getEntityPath());
-        final EntityOwnerSelectionStrategy strategy = getEntityOwnerElectionStrategy(message.getEntityPath());
-
-        // Available members is all the known peers - the number of peers that are down + self
-        // So if there are 2 peers and 1 is down then availableMembers will be 2
-        final int availableMembers = getRaftActorContext().getPeerIds().size() - downPeerMemberNames.size() + 1;
-
-        LOG.debug("{}: Using strategy {} to select owner, currentOwner = {}", persistenceId(), strategy, currentOwner);
-
-        if (strategy.getSelectionDelayInMillis() == 0L) {
-            writeNewOwner(message.getEntityPath(), newOwner(currentOwner, message.getAllCandidates(),
-                    strategy));
-        } else if (message.getAllCandidates().size() == availableMembers) {
-            LOG.debug("{}: Received the maximum candidates requests : {} writing new owner",
-                    persistenceId(), availableMembers);
-            cancelOwnerSelectionTask(message.getEntityPath());
-            writeNewOwner(message.getEntityPath(), newOwner(currentOwner, message.getAllCandidates(),
-                    strategy));
-        } else {
-            scheduleOwnerSelection(message.getEntityPath(), message.getAllCandidates(), strategy);
-        }
-    }
-
-    private void onPeerDown(final PeerDown peerDown) {
-        LOG.info("{}: onPeerDown: {}", persistenceId(), peerDown);
-
-        MemberName downMemberName = peerDown.getMemberName();
-        if (downPeerMemberNames.add(downMemberName) && isLeader()) {
-            // Select new owners for entities owned by the down peer and which have other candidates. For an entity for
-            // which the down peer is the only candidate, we leave it as the owner and don't clear it. This is done to
-            // handle the case where the peer member process is actually still running but the node is partitioned.
-            // When the partition is healed, the peer just remains as the owner. If the peer process actually restarted,
-            // it will first remove all its candidates on startup. If another candidate is registered during the time
-            // the peer is down, the new candidate will be selected as the new owner.
-
-            selectNewOwnerForEntitiesOwnedBy(ImmutableSet.of(downMemberName.getName()));
-        }
-    }
-
-    private void selectNewOwnerForEntitiesOwnedBy(final Set<String> ownedBy) {
-        final List<Modification> modifications = new ArrayList<>();
-        searchForEntitiesOwnedBy(ownedBy, (entityTypeNode, entityNode) -> {
-            YangInstanceIdentifier entityPath = YangInstanceIdentifier.builder(ENTITY_TYPES_PATH)
-                    .node(entityTypeNode.getIdentifier()).node(ENTITY_NODE_ID).node(entityNode.getIdentifier())
-                    .node(ENTITY_OWNER_NODE_ID).build();
-            String newOwner = newOwner(getCurrentOwner(entityPath), getCandidateNames(entityNode),
-                    getEntityOwnerElectionStrategy(entityPath));
-
-            if (!newOwner.isEmpty()) {
-                LOG.debug("{}: Found entity {}, writing new owner {}", persistenceId(), entityPath, newOwner);
-
-                modifications.add(new WriteModification(entityPath,
-                    ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)));
-
-            } else {
-                LOG.debug("{}: Found entity {} but no other candidates - not clearing owner", persistenceId(),
-                        entityPath);
-            }
-        });
-
-        commitCoordinator.commitModifications(modifications, this);
-    }
-
-    private void onPeerUp(final PeerUp peerUp) {
-        LOG.debug("{}: onPeerUp: {}", persistenceId(), peerUp);
-
-        downPeerMemberNames.remove(peerUp.getMemberName());
-
-        // Notify the coordinator to check if pending modifications need to be sent. We do this here
-        // to handle the case where the leader's peer address isn't known yet when a prior state or
-        // leader change occurred.
-        commitCoordinator.onStateChanged(this, isLeader());
-
-        if (isLeader()) {
-            // Try to assign owners for entities that have no current owner. It's possible the peer that is now up
-            // had previously registered as a candidate and was the only candidate but the owner write tx couldn't be
-            // committed due to a leader change. Eg, the leader is able to successfully commit the candidate add tx but
-            // becomes isolated before it can commit the owner change and switches to follower. The majority partition
-            // with a new leader has the candidate but the entity has no owner. When the partition is healed and the
-            // previously isolated leader reconnects, we'll receive onPeerUp and, if there's still no owner, the
-            // previous leader will gain ownership.
-            selectNewOwnerForEntitiesOwnedBy(ImmutableSet.of(""));
-        }
-    }
-
-    private static Collection<String> getCandidateNames(final MapEntryNode entity) {
-        return entity.getChild(CANDIDATE_NODE_ID).map(child -> {
-            Collection<MapEntryNode> candidates = ((MapNode) child).getValue();
-            Collection<String> candidateNames = new ArrayList<>(candidates.size());
-            for (MapEntryNode candidate: candidates) {
-                candidateNames.add(candidate.getChild(CANDIDATE_NAME_NODE_ID).get().getValue().toString());
-            }
-            return candidateNames;
-        }).orElse(ImmutableList.of());
-    }
-
-    private void searchForEntitiesOwnedBy(final Set<String> ownedBy, final EntityWalker walker) {
-        LOG.debug("{}: Searching for entities owned by {}", persistenceId(), ownedBy);
-
-        searchForEntities((entityTypeNode, entityNode) -> {
-            java.util.Optional<DataContainerChild<? extends PathArgument, ?>> possibleOwner =
-                    entityNode.getChild(ENTITY_OWNER_NODE_ID);
-            String currentOwner = possibleOwner.isPresent() ? possibleOwner.get().getValue().toString() : "";
-            if (ownedBy.contains(currentOwner)) {
-                walker.onEntity(entityTypeNode, entityNode);
-            }
-        });
-    }
-
-    private void removeCandidateFromEntities(final MemberName member) {
-        final List<Modification> modifications = new ArrayList<>();
-        searchForEntities((entityTypeNode, entityNode) -> {
-            if (hasCandidate(entityNode, member)) {
-                YangInstanceIdentifier entityId =
-                        (YangInstanceIdentifier) entityNode.getIdentifier().getKeyValues().get(ENTITY_ID_QNAME);
-                YangInstanceIdentifier candidatePath = candidatePath(
-                        entityTypeNode.getIdentifier().getKeyValues().get(ENTITY_TYPE_QNAME).toString(),
-                        entityId, member.getName());
-
-                LOG.info("{}: Found entity {}, removing candidate {}, path {}", persistenceId(), entityId,
-                        member, candidatePath);
-
-                modifications.add(new DeleteModification(candidatePath));
-            }
-        });
-
-        commitCoordinator.commitModifications(modifications, this);
-    }
-
-    private static boolean hasCandidate(final MapEntryNode entity, final MemberName candidateName) {
-        return entity.getChild(CANDIDATE_NODE_ID)
-                .flatMap(child -> ((MapNode)child).getChild(candidateNodeKey(candidateName.getName())))
-                .isPresent();
-    }
-
-    private void searchForEntities(final EntityWalker walker) {
-        Optional<NormalizedNode<?, ?>> possibleEntityTypes = getDataStore().readNode(ENTITY_TYPES_PATH);
-        if (!possibleEntityTypes.isPresent()) {
-            return;
-        }
-
-        for (MapEntryNode entityType:  ((MapNode) possibleEntityTypes.get()).getValue()) {
-            java.util.Optional<DataContainerChild<?, ?>> possibleEntities = entityType.getChild(ENTITY_NODE_ID);
-            if (!possibleEntities.isPresent()) {
-                // shouldn't happen but handle anyway
-                continue;
-            }
-
-            for (MapEntryNode entity:  ((MapNode) possibleEntities.get()).getValue()) {
-                walker.onEntity(entityType, entity);
-            }
-        }
-    }
-
-    private void writeNewOwner(final YangInstanceIdentifier entityPath, final String newOwner) {
-        LOG.debug("{}: Writing new owner {} for entity {}", persistenceId(), newOwner, entityPath);
-
-        commitCoordinator.commitModification(new WriteModification(entityPath.node(ENTITY_OWNER_QNAME),
-                ImmutableNodes.leafNode(ENTITY_OWNER_NODE_ID, newOwner)), this);
-    }
-
-    /**
-     * Schedule a new owner selection job. Cancelling any outstanding job if it has not been cancelled.
-     */
-    private void scheduleOwnerSelection(final YangInstanceIdentifier entityPath, final Collection<String> allCandidates,
-                                       final EntityOwnerSelectionStrategy strategy) {
-        cancelOwnerSelectionTask(entityPath);
-
-        LOG.debug("{}: Scheduling owner selection after {} ms", persistenceId(), strategy.getSelectionDelayInMillis());
-
-        final Cancellable lastScheduledTask = context().system().scheduler().scheduleOnce(
-                FiniteDuration.apply(strategy.getSelectionDelayInMillis(), TimeUnit.MILLISECONDS), self(),
-                new SelectOwner(entityPath, allCandidates, strategy), context().system().dispatcher(), self());
-
-        entityToScheduledOwnershipTask.put(entityPath, lastScheduledTask);
-    }
-
-    private void cancelOwnerSelectionTask(final YangInstanceIdentifier entityPath) {
-        final Cancellable lastScheduledTask = entityToScheduledOwnershipTask.get(entityPath);
-        if (lastScheduledTask != null && !lastScheduledTask.isCancelled()) {
-            lastScheduledTask.cancel();
-        }
-    }
-
-    private String newOwner(final String currentOwner, final Collection<String> candidates,
-            final EntityOwnerSelectionStrategy ownerSelectionStrategy) {
-        Collection<String> viableCandidates = getViableCandidates(candidates);
-        if (viableCandidates.isEmpty()) {
-            return "";
-        }
-        return ownerSelectionStrategy.newOwner(currentOwner, viableCandidates);
-    }
-
-    private Collection<String> getViableCandidates(final Collection<String> candidates) {
-        Map<MemberName, VotingState> memberToVotingState = new HashMap<>();
-        getRaftActorContext().getPeers().forEach(peerInfo -> memberToVotingState.put(
-                ShardIdentifier.fromShardIdString(peerInfo.getId()).getMemberName(), peerInfo.getVotingState()));
-
-        Collection<String> viableCandidates = new ArrayList<>();
-
-        for (String candidate : candidates) {
-            MemberName memberName = MemberName.forName(candidate);
-            if (memberToVotingState.get(memberName) != VotingState.NON_VOTING
-                    && !downPeerMemberNames.contains(memberName)) {
-                viableCandidates.add(candidate);
-            }
-        }
-        return viableCandidates;
-    }
-
-    private String getCurrentOwner(final YangInstanceIdentifier entityId) {
-        Optional<NormalizedNode<?, ?>> optionalEntityOwner = getDataStore().readNode(entityId.node(ENTITY_OWNER_QNAME));
-        if (optionalEntityOwner.isPresent()) {
-            return optionalEntityOwner.get().getValue().toString();
-        }
-        return null;
-    }
-
-    @FunctionalInterface
-    private interface EntityWalker {
-        void onEntity(MapEntryNode entityTypeNode, MapEntryNode entityNode);
-    }
-
-    public static Builder newBuilder() {
-        return new Builder();
-    }
-
-    static class Builder extends Shard.AbstractBuilder<Builder, EntityOwnershipShard> {
-        private MemberName localMemberName;
-        private EntityOwnerSelectionStrategyConfig ownerSelectionStrategyConfig;
-
-        protected Builder() {
-            super(EntityOwnershipShard.class);
-        }
-
-        Builder localMemberName(final MemberName newLocalMemberName) {
-            checkSealed();
-            this.localMemberName = newLocalMemberName;
-            return this;
-        }
-
-        Builder ownerSelectionStrategyConfig(final EntityOwnerSelectionStrategyConfig newOwnerSelectionStrategyConfig) {
-            checkSealed();
-            this.ownerSelectionStrategyConfig = newOwnerSelectionStrategyConfig;
-            return this;
-        }
-
-        @Override
-        protected void verify() {
-            super.verify();
-            Preconditions.checkNotNull(localMemberName, "localMemberName should not be null");
-            Preconditions.checkNotNull(ownerSelectionStrategyConfig, "ownerSelectionStrategyConfig should not be null");
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardCommitCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardCommitCoordinator.java
deleted file mode 100644 (file)
index f3adfbd..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-
-import akka.actor.ActorRef;
-import akka.actor.Cancellable;
-import akka.actor.Status.Failure;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import javax.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.slf4j.Logger;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Handles commits and retries for the EntityOwnershipShard.
- *
- * @author Thomas Pantelis
- */
-class EntityOwnershipShardCommitCoordinator {
-    private static final Object COMMIT_RETRY_MESSAGE = new Object() {
-        @Override
-        public String toString() {
-            return "entityCommitRetry";
-        }
-    };
-    private static final FrontendType FRONTEND_TYPE = FrontendType.forName("entity-ownership-internal");
-
-    private final Queue<Modification> pendingModifications = new LinkedList<>();
-    private final LocalHistoryIdentifier historyId;
-    private final Logger log;
-
-    private BatchedModifications inflightCommit;
-    private Cancellable retryCommitSchedule;
-    private long transactionIDCounter = 0;
-
-    EntityOwnershipShardCommitCoordinator(MemberName localMemberName, Logger log) {
-        this.log = Preconditions.checkNotNull(log);
-        historyId = new LocalHistoryIdentifier(
-                ClientIdentifier.create(FrontendIdentifier.create(localMemberName, FRONTEND_TYPE), 0), 0);
-    }
-
-    boolean handleMessage(Object message, EntityOwnershipShard shard) {
-        boolean handled = true;
-        if (CommitTransactionReply.isSerializedType(message)) {
-            // Successful reply from a local commit.
-            inflightCommitSucceeded(shard);
-        } else if (message instanceof akka.actor.Status.Failure) {
-            // Failure reply from a local commit.
-            inflightCommitFailure(((Failure) message).cause(), shard);
-        } else if (COMMIT_RETRY_MESSAGE.equals(message)) {
-            retryInflightCommit(shard);
-        } else {
-            handled = false;
-        }
-
-        return handled;
-    }
-
-    private void retryInflightCommit(EntityOwnershipShard shard) {
-        // Shouldn't be null happen but verify anyway
-        if (inflightCommit == null) {
-            return;
-        }
-
-        if (shard.hasLeader()) {
-            log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionId());
-
-            shard.tryCommitModifications(inflightCommit);
-        } else {
-            scheduleInflightCommitRetry(shard);
-        }
-    }
-
-    void inflightCommitFailure(Throwable cause, EntityOwnershipShard shard) {
-        // This should've originated from a failed inflight commit but verify anyway
-        if (inflightCommit == null) {
-            return;
-        }
-
-        log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionId(), cause);
-
-        if (!(cause instanceof NoShardLeaderException)) {
-            // If the failure is other than NoShardLeaderException the commit may have been partially
-            // processed so retry with a new transaction ID to be safe.
-            newInflightCommitWithDifferentTransactionID();
-        }
-
-        scheduleInflightCommitRetry(shard);
-    }
-
-    private void scheduleInflightCommitRetry(EntityOwnershipShard shard) {
-        FiniteDuration duration = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval();
-
-        log.debug("Scheduling retry for BatchedModifications commit {} in {}",
-                inflightCommit.getTransactionId(), duration);
-
-        retryCommitSchedule = shard.getContext().system().scheduler().scheduleOnce(duration, shard.getSelf(),
-                COMMIT_RETRY_MESSAGE, shard.getContext().dispatcher(), ActorRef.noSender());
-    }
-
-    void inflightCommitSucceeded(EntityOwnershipShard shard) {
-        // Shouldn't be null but verify anyway
-        if (inflightCommit == null) {
-            return;
-        }
-
-        if (retryCommitSchedule != null) {
-            retryCommitSchedule.cancel();
-        }
-
-        log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionId());
-
-        inflightCommit = null;
-        commitNextBatch(shard);
-    }
-
-    void commitNextBatch(EntityOwnershipShard shard) {
-        if (inflightCommit != null || pendingModifications.isEmpty() || !shard.hasLeader()) {
-            return;
-        }
-
-        inflightCommit = newBatchedModifications();
-        Iterator<Modification> iter = pendingModifications.iterator();
-        while (iter.hasNext()) {
-            inflightCommit.addModification(iter.next());
-            iter.remove();
-            if (inflightCommit.getModifications().size()
-                    >= shard.getDatastoreContext().getShardBatchedModificationCount()) {
-                break;
-            }
-        }
-
-        log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionId(),
-                inflightCommit.getModifications().size());
-
-        shard.tryCommitModifications(inflightCommit);
-    }
-
-    void commitModification(Modification modification, EntityOwnershipShard shard) {
-        commitModifications(ImmutableList.of(modification), shard);
-    }
-
-    void commitModifications(List<Modification> modifications, EntityOwnershipShard shard) {
-        if (modifications.isEmpty()) {
-            return;
-        }
-
-        boolean hasLeader = shard.hasLeader();
-        if (inflightCommit != null || !hasLeader) {
-            if (log.isDebugEnabled()) {
-                log.debug("{} - adding modifications to pending",
-                        inflightCommit != null ? "A commit is inflight" : "No shard leader");
-            }
-
-            pendingModifications.addAll(modifications);
-        } else {
-            inflightCommit = newBatchedModifications();
-            inflightCommit.addModifications(modifications);
-            shard.tryCommitModifications(inflightCommit);
-        }
-    }
-
-    void onStateChanged(EntityOwnershipShard shard, boolean isLeader) {
-        shard.possiblyRemoveAllInitialCandidates(shard.getLeader());
-
-        possiblyPrunePendingCommits(shard, isLeader);
-
-        if (!isLeader && inflightCommit != null) {
-            // We're no longer the leader but we have an inflight local commit. This likely means we didn't get
-            // consensus for the commit and switched to follower due to another node with a higher term. We
-            // can't be sure if the commit was replicated to any node so we retry it here with a new
-            // transaction ID.
-            if (retryCommitSchedule != null) {
-                retryCommitSchedule.cancel();
-            }
-
-            newInflightCommitWithDifferentTransactionID();
-            retryInflightCommit(shard);
-        } else {
-            commitNextBatch(shard);
-        }
-    }
-
-    private void possiblyPrunePendingCommits(EntityOwnershipShard shard, boolean isLeader) {
-        // If we were the leader and transitioned to follower, we'll try to forward pending commits to the new leader.
-        // However certain commits, e.g. entity owner changes, should only be committed by a valid leader as the
-        // criteria used to determine the commit may be stale. Since we're no longer a valid leader, we should not
-        // forward such commits thus we prune the pending modifications. We still should forward local candidate change
-        // commits.
-        if (shard.hasLeader() && !isLeader) {
-            // We may have already submitted a transaction for replication and commit. We don't need the base Shard to
-            // forward it since we also have it stored in the inflightCommit and handle retries. So we just clear
-            // pending transactions and drop them.
-            shard.convertPendingTransactionsToMessages();
-
-            // Prune the inflightCommit.
-            if (inflightCommit != null) {
-                inflightCommit = pruneModifications(inflightCommit);
-            }
-
-            // Prune the subsequent pending modifications.
-            pendingModifications.removeIf(mod -> !canForwardModificationToNewLeader(mod));
-        }
-    }
-
-    @Nullable
-    private BatchedModifications pruneModifications(BatchedModifications toPrune) {
-        BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionId(),
-                toPrune.getVersion());
-        prunedModifications.setDoCommitOnReady(toPrune.isDoCommitOnReady());
-        if (toPrune.isReady()) {
-            prunedModifications.setReady(toPrune.getParticipatingShardNames());
-        }
-        prunedModifications.setTotalMessagesSent(toPrune.getTotalMessagesSent());
-        for (Modification mod: toPrune.getModifications()) {
-            if (canForwardModificationToNewLeader(mod)) {
-                prunedModifications.addModification(mod);
-            }
-        }
-
-        return !prunedModifications.getModifications().isEmpty() ? prunedModifications : null;
-    }
-
-    private boolean canForwardModificationToNewLeader(Modification mod) {
-        // If this is a WRITE of entity owner we don't want to forward it to a new leader since the criteria used
-        // to determine the new owner might be stale.
-        if (mod instanceof WriteModification) {
-            WriteModification writeMod = (WriteModification)mod;
-            boolean canForward = !writeMod.getPath().getLastPathArgument().getNodeType().equals(ENTITY_OWNER_QNAME);
-
-            if (!canForward) {
-                log.debug("Not forwarding WRITE modification for {} to new leader", writeMod.getPath());
-            }
-
-            return canForward;
-        }
-
-        return true;
-    }
-
-    private void newInflightCommitWithDifferentTransactionID() {
-        BatchedModifications newBatchedModifications = newBatchedModifications();
-        newBatchedModifications.addModifications(inflightCommit.getModifications());
-        inflightCommit = newBatchedModifications;
-    }
-
-    private BatchedModifications newBatchedModifications() {
-        BatchedModifications modifications = new BatchedModifications(
-            new TransactionIdentifier(historyId, ++transactionIDCounter), DataStoreVersions.CURRENT_VERSION);
-        modifications.setDoCommitOnReady(true);
-        modifications.setReady();
-        modifications.setTotalMessagesSent(1);
-        return modifications;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatistics.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatistics.java
deleted file mode 100644 (file)
index 442aba0..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityTypeFromEntityPath;
-
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import javax.annotation.Nonnull;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import tech.pantheon.triemap.TrieMap;
-
-/**
- * EntityOwnershipStatistics is a utility class that keeps track of ownership statistics for the candidates and
- * caches it for quick count queries.
- * <p/>
- * While the entity ownership model does maintain the information about which entity is owned by which candidate
- * finding out how many entities of a given type are owned by a given candidate is not an efficient query.
- */
-class EntityOwnershipStatistics extends AbstractEntityOwnerChangeListener {
-
-    private final TrieMap<String, TrieMap<String, Long>> statistics = TrieMap.create();
-
-    EntityOwnershipStatistics(){
-    }
-
-    @Override
-    public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
-        for (DataTreeCandidate change : changes) {
-            DataTreeCandidateNode changeRoot = change.getRootNode();
-            LeafNode<?> ownerLeaf = (LeafNode<?>) changeRoot.getDataAfter().get();
-            String entityType = entityTypeFromEntityPath(change.getRootPath());
-            String newOwner = extractOwner(ownerLeaf);
-            if (!Strings.isNullOrEmpty(newOwner)) {
-                updateStatistics(entityType, newOwner, 1);
-            }
-
-            Optional<NormalizedNode<?, ?>> dataBefore = changeRoot.getDataBefore();
-            if (dataBefore.isPresent()) {
-                String origOwner = extractOwner((LeafNode<?>) changeRoot.getDataBefore().get());
-                if (!Strings.isNullOrEmpty(origOwner)) {
-                    updateStatistics(entityType, origOwner, -1);
-                }
-            }
-        }
-    }
-
-    Map<String, Map<String, Long>> all() {
-        Map<String, Map<String, Long>> snapshot = new HashMap<>();
-        for (String entityType : statistics.immutableSnapshot().keySet()) {
-            snapshot.put(entityType, byEntityType(entityType));
-        }
-        return snapshot;
-    }
-
-    Map<String, Long> byEntityType(final String entityType) {
-        if (statistics.get(entityType) != null) {
-            return statistics.get(entityType).immutableSnapshot();
-        }
-        return new HashMap<>();
-    }
-
-    private void updateStatistics(final String entityType, final String candidateName, final long count) {
-        TrieMap<String, Long> map = statistics.get(entityType);
-        if (map == null) {
-            map = TrieMap.create();
-            map.put(candidateName, count);
-            statistics.put(entityType, map);
-        } else {
-            map.merge(candidateName, count, (ownedEntities, addedEntities) -> ownedEntities + addedEntities);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateAdded.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateAdded.java
deleted file mode 100644 (file)
index 6340880..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a new candidate is added for an entity.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-public class CandidateAdded {
-    private final YangInstanceIdentifier entityPath;
-    private final Collection<String> allCandidates;
-    private final String newCandidate;
-
-    public CandidateAdded(YangInstanceIdentifier entityPath, String newCandidate, Collection<String> allCandidates) {
-        this.entityPath = entityPath;
-        this.newCandidate = newCandidate;
-        this.allCandidates = allCandidates;
-    }
-
-    public YangInstanceIdentifier getEntityPath() {
-        return entityPath;
-    }
-
-    public Collection<String> getAllCandidates() {
-        return allCandidates;
-    }
-
-    public String getNewCandidate() {
-        return newCandidate;
-    }
-
-    @Override
-    public String toString() {
-        return "CandidateAdded [entityPath=" + entityPath + ", newCandidate=" + newCandidate + ", allCandidates="
-                + allCandidates + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateRemoved.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/CandidateRemoved.java
deleted file mode 100644 (file)
index 64a333f..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a candidate is removed for an entity.
- *
- * @author Moiz Raja
- * @author Thomas Pantelis
- */
-public class CandidateRemoved {
-    private final YangInstanceIdentifier entityPath;
-    private final String removedCandidate;
-    private final Collection<String> remainingCandidates;
-
-    public CandidateRemoved(YangInstanceIdentifier entityPath, String removedCandidate,
-            Collection<String> remainingCandidates) {
-        this.entityPath = entityPath;
-        this.removedCandidate = removedCandidate;
-        this.remainingCandidates = remainingCandidates;
-    }
-
-    public YangInstanceIdentifier getEntityPath() {
-        return entityPath;
-    }
-
-    public String getRemovedCandidate() {
-        return removedCandidate;
-    }
-
-    public Collection<String> getRemainingCandidates() {
-        return remainingCandidates;
-    }
-
-    @Override
-    public String toString() {
-        return "CandidateRemoved [entityPath=" + entityPath + ", removedCandidate=" + removedCandidate
-                + ", remainingCandidates=" + remainingCandidates + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterCandidateLocal.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterCandidateLocal.java
deleted file mode 100644 (file)
index 3450033..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Message sent to the local EntityOwnershipShard to register a candidate.
- *
- * @author Thomas Pantelis
- */
-public class RegisterCandidateLocal {
-    private final DOMEntity entity;
-
-    public RegisterCandidateLocal(DOMEntity entity) {
-        this.entity = entity;
-    }
-
-    public DOMEntity getEntity() {
-        return entity;
-    }
-
-    @Override
-    public String toString() {
-        return "RegisterCandidateLocal [entity=" + entity + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterListenerLocal.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RegisterListenerLocal.java
deleted file mode 100644 (file)
index dd52cda..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * Message sent to the local EntityOwnershipShard to register an EntityOwnershipListener.
- *
- * @author Thomas Pantelis
- */
-public class RegisterListenerLocal {
-    private final DOMEntityOwnershipListener listener;
-    private final String entityType;
-
-    public RegisterListenerLocal(DOMEntityOwnershipListener listener, String entityType) {
-        this.listener = listener;
-        this.entityType = entityType;
-    }
-
-    public DOMEntityOwnershipListener getListener() {
-        return listener;
-    }
-
-    public String getEntityType() {
-        return entityType;
-    }
-
-    @Override
-    public String toString() {
-        return "RegisterListenerLocal [entityType=" + entityType + ", listener=" + listener + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RemoveAllCandidates.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/RemoveAllCandidates.java
deleted file mode 100644 (file)
index 17f80cd..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2016 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent by an EntityOwnershipShard to its leader on startup to remove all its candidates.
- *
- * @author Thomas Pantelis
- */
-public class RemoveAllCandidates implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private final MemberName memberName;
-
-    public RemoveAllCandidates(MemberName memberName) {
-        this.memberName = memberName;
-    }
-
-    public MemberName getMemberName() {
-        return memberName;
-    }
-
-    @Override
-    public String toString() {
-        return "RemoveAllCandidates [memberName=" + memberName + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/SelectOwner.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/SelectOwner.java
deleted file mode 100644 (file)
index 8cd4f78..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategy;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Message sent when a new owner needs to be selected.
- */
-public class SelectOwner {
-    private final YangInstanceIdentifier entityPath;
-    private final Collection<String> allCandidates;
-    private final EntityOwnerSelectionStrategy ownerSelectionStrategy;
-
-    public SelectOwner(YangInstanceIdentifier entityPath, Collection<String> allCandidates,
-                       EntityOwnerSelectionStrategy ownerSelectionStrategy) {
-
-        this.entityPath = Preconditions.checkNotNull(entityPath, "entityPath should not be null");
-        this.allCandidates = Preconditions.checkNotNull(allCandidates, "allCandidates should not be null");
-        this.ownerSelectionStrategy = Preconditions.checkNotNull(ownerSelectionStrategy,
-                "ownerSelectionStrategy should not be null");
-    }
-
-    public YangInstanceIdentifier getEntityPath() {
-        return entityPath;
-    }
-
-    public Collection<String> getAllCandidates() {
-        return allCandidates;
-    }
-
-    public EntityOwnerSelectionStrategy getOwnerSelectionStrategy() {
-        return ownerSelectionStrategy;
-    }
-
-    @Override
-    public String toString() {
-        return "SelectOwner [entityPath=" + entityPath + ", allCandidates=" + allCandidates
-                + ", ownerSelectionStrategy=" + ownerSelectionStrategy + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterCandidateLocal.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterCandidateLocal.java
deleted file mode 100644 (file)
index 8a94fab..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-
-/**
- * Message sent to the local EntityOwnershipShard to unregister a candidate.
- *
- * @author Thomas Pantelis
- */
-public class UnregisterCandidateLocal {
-    private final DOMEntity entity;
-
-    public UnregisterCandidateLocal(DOMEntity entity) {
-        this.entity = entity;
-    }
-
-    public DOMEntity getEntity() {
-        return entity;
-    }
-
-    @Override
-    public String toString() {
-        return "UnregisterCandidateLocal [entity=" + entity + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterListenerLocal.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/messages/UnregisterListenerLocal.java
deleted file mode 100644 (file)
index 03db369..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.messages;
-
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-
-/**
- * Message sent to the local EntityOwnershipShard to unregister an EntityOwnershipListener.
- *
- * @author Thomas Pantelis
- */
-public class UnregisterListenerLocal {
-    private final DOMEntityOwnershipListener listener;
-    private final String entityType;
-
-    public UnregisterListenerLocal(DOMEntityOwnershipListener listener, String entityType) {
-        this.listener = listener;
-        this.entityType = entityType;
-    }
-
-    public DOMEntityOwnershipListener getListener() {
-        return listener;
-    }
-
-    public String getEntityType() {
-        return entityType;
-    }
-
-    @Override
-    public String toString() {
-        return "UnregisterListenerLocal [entityType=" + entityType + ", listener=" + listener + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/AbstractEntityOwnerSelectionStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/AbstractEntityOwnerSelectionStrategy.java
deleted file mode 100644 (file)
index a757f49..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import java.util.Map;
-
-public abstract class AbstractEntityOwnerSelectionStrategy implements EntityOwnerSelectionStrategy {
-
-    private final long selectionDelayInMillis;
-    private final Map<String, Long> initialStatistics;
-
-    protected AbstractEntityOwnerSelectionStrategy(long selectionDelayInMillis, Map<String, Long> initialStatistics) {
-        this.selectionDelayInMillis = selectionDelayInMillis;
-        this.initialStatistics = initialStatistics;
-    }
-
-    @Override
-    public long getSelectionDelayInMillis() {
-        return selectionDelayInMillis;
-    }
-
-    public Map<String, Long> getInitialStatistics() {
-        return initialStatistics;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategy.java
deleted file mode 100644 (file)
index 38899ec..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import java.util.Collection;
-import javax.annotation.Nullable;
-
-/**
- * An EntityOwnerSelectionStrategy is to be used by the EntityOwnershipShard to select a new owner from a collection
- * of candidates.
- */
-public interface EntityOwnerSelectionStrategy {
-    /**
-     * Returns the time in millis owner selection should be delayed.
-     *
-     * @return the time in millis owner selection should be delayed
-     */
-    long getSelectionDelayInMillis();
-
-
-    /**
-     * Selects a new owner from the list of viable candidates.
-     *
-     * @param currentOwner the current owner of the entity if any, null otherwise
-     * @param viableCandidates the available candidates from which to choose the new owner
-     * @return the new owner
-     */
-    String newOwner(@Nullable String currentOwner, Collection<String> viableCandidates);
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfig.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfig.java
deleted file mode 100644 (file)
index a5ffc49..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import java.lang.reflect.InvocationTargetException;
-import java.util.HashMap;
-import java.util.Map;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * FIXME: this is simple registry service, except it also loads classes.
- */
-public final class EntityOwnerSelectionStrategyConfig {
-    private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerSelectionStrategyConfig.class);
-    private final Map<String, StrategyInfo> entityTypeToStrategyInfo = new HashMap<>();
-    private final Map<String, EntityOwnerSelectionStrategy> entityTypeToOwnerSelectionStrategy = new HashMap<>();
-
-    private EntityOwnerSelectionStrategyConfig() {
-
-    }
-
-    public boolean isStrategyConfigured(final String entityType) {
-        return entityTypeToStrategyInfo.get(entityType) != null;
-    }
-
-    public EntityOwnerSelectionStrategy createStrategy(final String entityType,
-            final Map<String, Long> initialStatistics) {
-        final EntityOwnerSelectionStrategy strategy;
-        final EntityOwnerSelectionStrategy existingStrategy = entityTypeToOwnerSelectionStrategy.get(entityType);
-        if (existingStrategy != null) {
-            strategy = existingStrategy;
-        } else {
-            EntityOwnerSelectionStrategyConfig.StrategyInfo strategyInfo = entityTypeToStrategyInfo.get(entityType);
-            if (strategyInfo == null) {
-                strategy = FirstCandidateSelectionStrategy.INSTANCE;
-            } else {
-                strategy = strategyInfo.createStrategy(initialStatistics);
-            }
-            entityTypeToOwnerSelectionStrategy.put(entityType, strategy);
-        }
-        return strategy;
-    }
-
-    /**
-     * This class should not exist. It contains a single long, which is passed to the constructor (via reflection).
-     * We are getting that information from a BundleContext. We are running in OSGi environment, hence this class
-     * needs to be deployed in its own bundle, with its own configuration.
-     * If this is used internally, it needs to be relocated into a separate package along with the implementation
-     * using it.
-     *
-     * @deprecated FIXME: THIS IS CONFIGURATION FOR A CUSTOM-LOADED CLASS CONSTRUCTOR
-     */
-    @Deprecated
-    public void clearStrategies() {
-        entityTypeToOwnerSelectionStrategy.clear();
-    }
-
-    private static final class StrategyInfo {
-        private final Class<? extends EntityOwnerSelectionStrategy> strategyClass;
-        private final long delay;
-
-        private StrategyInfo(final Class<? extends EntityOwnerSelectionStrategy> strategyClass, final long delay) {
-            this.strategyClass = strategyClass;
-            this.delay = delay;
-        }
-
-        public EntityOwnerSelectionStrategy createStrategy(final Map<String, Long> initialStatistics) {
-            try {
-                return strategyClass.getDeclaredConstructor(long.class, Map.class)
-                        .newInstance(delay, initialStatistics);
-            } catch (InstantiationException | IllegalAccessException | InvocationTargetException
-                    | NoSuchMethodException e) {
-                LOG.warn("could not create custom strategy", e);
-            }
-            return FirstCandidateSelectionStrategy.INSTANCE;
-        }
-    }
-
-    public static Builder newBuilder() {
-        return new Builder(new EntityOwnerSelectionStrategyConfig());
-    }
-
-    public static final class Builder {
-        private final EntityOwnerSelectionStrategyConfig config;
-
-        Builder(final EntityOwnerSelectionStrategyConfig config) {
-            this.config = config;
-        }
-
-        public Builder addStrategy(final String entityType,
-                final Class<? extends EntityOwnerSelectionStrategy> strategy, final long delay) {
-            config.entityTypeToStrategyInfo.put(entityType, new StrategyInfo(strategy, delay));
-            return this;
-        }
-
-        public EntityOwnerSelectionStrategyConfig build() {
-            return this.config;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReader.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReader.java
deleted file mode 100644 (file)
index 4ac28a6..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import com.google.common.base.Preconditions;
-import java.util.Map;
-import java.util.Map.Entry;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig.Builder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Reads the entity owner selection strategy config.
- *
- */
-public final class EntityOwnerSelectionStrategyConfigReader {
-
-    private static final Logger LOG = LoggerFactory.getLogger(EntityOwnerSelectionStrategyConfigReader.class);
-    private static final String ENTITY_TYPE_PREFIX = "entity.type.";
-
-    private EntityOwnerSelectionStrategyConfigReader() {
-        // Hidden on purpose
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public static EntityOwnerSelectionStrategyConfig loadStrategyWithConfig(final Map<Object, Object> props) {
-        final EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder();
-
-        if (props != null && !props.isEmpty()) {
-            parseConfiguration(builder, props);
-        } else {
-            if (props == null) {
-                LOG.debug("Could not read strategy configuration file, will use default configuration.");
-            } else {
-                LOG.debug("Configuration file is empty, will use default configuration.");
-            }
-        }
-        return builder.build();
-    }
-
-    private static EntityOwnerSelectionStrategyConfig parseConfiguration(final Builder builder,
-            final Map<Object, Object> properties) {
-
-        for (final Entry<Object, Object> entry : properties.entrySet()) {
-            final String key = (String) entry.getKey();
-            if (!key.startsWith(ENTITY_TYPE_PREFIX)) {
-                LOG.debug("Ignoring non-conforming property key : {}");
-                continue;
-            }
-
-            final String[] strategyClassAndDelay = ((String) properties.get(key)).split(",");
-            final Class<? extends EntityOwnerSelectionStrategy> aClass = loadClass(strategyClassAndDelay[0]);
-
-            final long delay;
-            if (strategyClassAndDelay.length > 1) {
-                delay = Long.parseLong(strategyClassAndDelay[1]);
-            } else {
-                delay = 0;
-            }
-
-            final String entityType = key.substring(key.lastIndexOf(".") + 1);
-            builder.addStrategy(entityType, aClass, delay);
-            LOG.debug("Entity Type '{}' using strategy {} delay {}", entityType, aClass, delay);
-        }
-
-        return builder.build();
-    }
-
-    @SuppressWarnings("unchecked")
-    private static Class<? extends EntityOwnerSelectionStrategy> loadClass(final String strategyClassAndDelay) {
-        final Class<?> clazz;
-        try {
-            clazz = EntityOwnerSelectionStrategyConfigReader.class.getClassLoader().loadClass(strategyClassAndDelay);
-        } catch (final ClassNotFoundException e) {
-            throw new IllegalArgumentException("Failed to load strategy " + strategyClassAndDelay, e);
-        }
-
-        Preconditions.checkArgument(EntityOwnerSelectionStrategy.class.isAssignableFrom(clazz),
-            "Selected implementation %s must implement EntityOwnerSelectionStrategy, clazz");
-
-        return (Class<? extends EntityOwnerSelectionStrategy>) clazz;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/FirstCandidateSelectionStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/FirstCandidateSelectionStrategy.java
deleted file mode 100644 (file)
index c4c93b7..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-
-/**
- * The FirstCandidateSelectionStrategy always selects the first viable candidate from the list of candidates.
- */
-public class FirstCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
-
-    public static final FirstCandidateSelectionStrategy INSTANCE =
-            new FirstCandidateSelectionStrategy(0L, Collections.emptyMap());
-
-    public FirstCandidateSelectionStrategy(long selectionDelayInMillis, Map<String, Long> initialStatistics) {
-        super(selectionDelayInMillis, initialStatistics);
-    }
-
-    @Override
-    public String newOwner(String currentOwner, Collection<String> viableCandidates) {
-        Preconditions.checkArgument(viableCandidates.size() > 0, "No viable candidates provided");
-        return viableCandidates.iterator().next();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategy.java
deleted file mode 100644 (file)
index 7102cbb..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import javax.annotation.Nullable;
-
-/**
- * The LeastLoadedCandidateSelectionStrategy assigns ownership for an entity to the candidate which owns the least
- * number of entities.
- */
-public class LeastLoadedCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
-    private final Map<String, Long> localStatistics = new HashMap<>();
-
-    protected LeastLoadedCandidateSelectionStrategy(long selectionDelayInMillis, Map<String, Long> initialStatistics) {
-        super(selectionDelayInMillis, initialStatistics);
-
-        localStatistics.putAll(initialStatistics);
-    }
-
-    @Override
-    public String newOwner(@Nullable String currentOwner, Collection<String> viableCandidates) {
-        Preconditions.checkArgument(viableCandidates.size() > 0);
-        String leastLoadedCandidate = null;
-        long leastLoadedCount = Long.MAX_VALUE;
-
-        if (!Strings.isNullOrEmpty(currentOwner)) {
-            long localVal = MoreObjects.firstNonNull(localStatistics.get(currentOwner), 0L);
-            localStatistics.put(currentOwner, localVal - 1);
-        }
-
-        for (String candidateName : viableCandidates) {
-            long val = MoreObjects.firstNonNull(localStatistics.get(candidateName), 0L);
-            if (val < leastLoadedCount) {
-                leastLoadedCount = val;
-                leastLoadedCandidate = candidateName;
-            }
-        }
-
-        if (leastLoadedCandidate == null) {
-            leastLoadedCandidate = viableCandidates.iterator().next();
-        }
-
-        localStatistics.put(leastLoadedCandidate, leastLoadedCount + 1);
-        return leastLoadedCandidate;
-    }
-
-    @VisibleForTesting
-    Map<String, Long> getLocalStatistics() {
-        return localStatistics;
-    }
-}
index 0ea2e3c8436ba399c089e0998fef1fa8b8d82801..205de939559caaa684a2c21a7034489f5e40746d 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.exceptions;
 
 import com.google.common.base.Strings;
-import javax.annotation.Nullable;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 
 /**
  * Exception indicating a shard has no current leader.
@@ -18,12 +18,16 @@ import javax.annotation.Nullable;
 public class NoShardLeaderException extends RuntimeException {
     private static final long serialVersionUID = 1L;
 
-    public NoShardLeaderException(String message) {
+    public NoShardLeaderException(final String message) {
         super(message);
     }
 
-    public NoShardLeaderException(@Nullable String message, String shardName) {
+    public NoShardLeaderException(final String message, final String shardName) {
         super(String.format("%sShard %s currently has no leader. Try again later.",
-                (Strings.isNullOrEmpty(message) ? "" : message + ". "), shardName));
+                Strings.isNullOrEmpty(message) ? "" : message + ". ", shardName));
+    }
+
+    public NoShardLeaderException(final ShardIdentifier shardId) {
+        this("Shard " + shardId + " currently has no leader. Try again later.");
     }
 }
index 956fa36d102e0af0437d8a35286a2bb3f2ace891..74d100540a1f807807db62578a1ab3f397b269e2 100644 (file)
@@ -5,10 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.identifiers;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
@@ -24,9 +25,9 @@ public class ShardIdentifier {
     private final String fullName;
 
     ShardIdentifier(final String shardName, final MemberName memberName, final String type) {
-        this.shardName = Preconditions.checkNotNull(shardName, "shardName should not be null");
-        this.memberName = Preconditions.checkNotNull(memberName, "memberName should not be null");
-        this.type = Preconditions.checkNotNull(type, "type should not be null");
+        this.shardName = requireNonNull(shardName, "shardName should not be null");
+        this.memberName = requireNonNull(memberName, "memberName should not be null");
+        this.type = requireNonNull(type, "type should not be null");
 
         fullName = memberName.getName() + "-shard-" + shardName + "-" + type;
     }
@@ -37,7 +38,7 @@ public class ShardIdentifier {
 
     public static ShardIdentifier fromShardIdString(final String shardIdString) {
         final Matcher matcher = PATTERN.matcher(shardIdString);
-        Preconditions.checkArgument(matcher.matches(), "Invalid shard id \"%s\"", shardIdString);
+        checkArgument(matcher.matches(), "Invalid shard id \"%s\"", shardIdString);
 
         return new ShardIdentifier(matcher.group(2), MemberName.forName(matcher.group(1)), matcher.group(3));
     }
@@ -51,19 +52,8 @@ public class ShardIdentifier {
             return false;
         }
 
-        ShardIdentifier that = (ShardIdentifier) obj;
-
-        if (!memberName.equals(that.memberName)) {
-            return false;
-        }
-        if (!shardName.equals(that.shardName)) {
-            return false;
-        }
-        if (!type.equals(that.type)) {
-            return false;
-        }
-
-        return true;
+        final var that = (ShardIdentifier) obj;
+        return memberName.equals(that.memberName) && shardName.equals(that.shardName) && type.equals(that.type);
     }
 
     @Override
@@ -102,17 +92,17 @@ public class ShardIdentifier {
         }
 
         public Builder shardName(final String newShardName) {
-            this.shardName = newShardName;
+            shardName = newShardName;
             return this;
         }
 
         public Builder memberName(final MemberName newMemberName) {
-            this.memberName = newMemberName;
+            memberName = newMemberName;
             return this;
         }
 
         public Builder type(final String newType) {
-            this.type = newType;
+            type = newType;
             return this;
         }
 
index 880ba99dbd6f1dcf1be16abd106b1d0dfb37d530..bb47e7c838b58da54a0962d57e147322ec855d00 100644 (file)
@@ -11,26 +11,19 @@ package org.opendaylight.controller.cluster.datastore.identifiers;
 public class ShardManagerIdentifier {
     private final String type;
 
-    public ShardManagerIdentifier(String type) {
+    public ShardManagerIdentifier(final String type) {
         this.type = type;
     }
 
     @Override
-    public boolean equals(Object obj) {
+    public boolean equals(final Object obj) {
         if (this == obj) {
             return true;
         }
         if (obj == null || getClass() != obj.getClass()) {
             return false;
         }
-
-        ShardManagerIdentifier that = (ShardManagerIdentifier) obj;
-
-        if (!type.equals(that.type)) {
-            return false;
-        }
-
-        return true;
+        return type.equals(((ShardManagerIdentifier) obj).type);
     }
 
     @Override
@@ -49,14 +42,13 @@ public class ShardManagerIdentifier {
     public static class Builder {
         private String type;
 
-        public Builder type(String newType) {
-            this.type = newType;
+        public Builder type(final String newType) {
+            type = newType;
             return this;
         }
 
         public ShardManagerIdentifier build() {
-            return new ShardManagerIdentifier(this.type);
+            return new ShardManagerIdentifier(type);
         }
-
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardMBeanFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardMBeanFactory.java
deleted file mode 100644 (file)
index bdf125e..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
-
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.datastore.Shard;
-
-/**
- * Factory for creating ShardStats mbeans.
- *
- * @author Basheeruddin syedbahm@cisco.com
- */
-public final class ShardMBeanFactory {
-
-    private ShardMBeanFactory() {
-    }
-
-    public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType,
-            @Nonnull final Shard shard) {
-        String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
-        ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
-        shardStatsMBeanImpl.registerMBean();
-        return shardStatsMBeanImpl;
-    }
-}
index 25c13989d599a153a3680d78336f4662984a894f..d0f1d3e7e1096cbb9496d789a153bfe0326e8076 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class AbortTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public AbortTransaction() {
     }
 
-    public AbortTransaction(TransactionIdentifier transactionID, final short version) {
+    public AbortTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static AbortTransaction fromSerializable(Object serializable) {
+    public static AbortTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof AbortTransaction);
         return (AbortTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof AbortTransaction;
     }
 }
index 3b58458e1a3dca93fd37ba97d01a05a64c26cc65..911d8cf0589f4fe9e4c5a3e7ff16d239d2f3131c 100644 (file)
@@ -5,26 +5,28 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class AbortTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class AbortTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = 7251132353204199793L;
     private static final AbortTransactionReply INSTANCE = new AbortTransactionReply();
 
     public AbortTransactionReply() {
     }
 
-    private AbortTransactionReply(short version) {
+    private AbortTransactionReply(final short version) {
         super(version);
     }
 
-    public static AbortTransactionReply instance(short version) {
+    public static AbortTransactionReply instance(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new AbortTransactionReply(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof AbortTransactionReply;
     }
 }
index 94e1a3779c5fba68a6e304ef8ff899d2bedab1e8..00aa7fa64b819f9662679e25d27b6954b6a630f3 100644 (file)
@@ -23,6 +23,7 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
  * @author gwu
  *
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractRead<T> extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -41,18 +42,18 @@ public abstract class AbstractRead<T> extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public final void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
-        path = SerializationUtils.deserializePath(in);
+        path = SerializationUtils.readPath(in);
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
-        SerializationUtils.serializePath(path, out);
+        SerializationUtils.writePath(out, getStreamVersion(), path);
     }
 
-    public AbstractRead<T> asVersion(short version) {
+    public AbstractRead<T> asVersion(final short version) {
         return version == getVersion() ? this : newInstance(version);
     }
 
index ba6b64f4c8ffffc3711a570e6e06ce8d60725895..6296c280a2a292af329018eed47282cb17e46930 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
@@ -18,6 +19,7 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -28,7 +30,7 @@ public abstract class AbstractThreePhaseCommitMessage extends VersionedExternali
 
     protected AbstractThreePhaseCommitMessage(final TransactionIdentifier transactionId, final short version) {
         super(version);
-        this.transactionId = Preconditions.checkNotNull(transactionId);
+        this.transactionId = requireNonNull(transactionId);
     }
 
     public TransactionIdentifier getTransactionId() {
@@ -36,13 +38,13 @@ public abstract class AbstractThreePhaseCommitMessage extends VersionedExternali
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
     }
index 09c5b739cf014791b099eab69bc7c73c314ec561..cd6e0d8cfa91da756f616e8c5244465feb7e1e6b 100644 (file)
@@ -7,11 +7,14 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import java.io.Serializable;
+import static java.util.Objects.requireNonNull;
 
-public class ActorInitialized implements Serializable {
-    private static final long serialVersionUID = 1L;
+import akka.actor.ActorRef;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 
-    public ActorInitialized() {
+@NonNullByDefault
+public record ActorInitialized(ActorRef actorRef) {
+    public ActorInitialized {
+        requireNonNull(actorRef);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AddPrefixShardReplica.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AddPrefixShardReplica.java
deleted file mode 100644 (file)
index 05e1271..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * A message sent to the ShardManager to dynamically add a new local shard
- *  that is a replica for an existing prefix shard that is already available
- *  in the cluster.
- */
-public class AddPrefixShardReplica {
-
-    private final YangInstanceIdentifier prefix;
-
-    /**
-     * Constructor.
-     *
-     * @param prefix prefix of the shard that is to be locally replicated.
-     */
-
-    public AddPrefixShardReplica(@Nonnull final YangInstanceIdentifier prefix) {
-        this.prefix = Preconditions.checkNotNull(prefix, "prefix should not be null");
-    }
-
-    public YangInstanceIdentifier getShardPrefix() {
-        return this.prefix;
-    }
-
-    @Override
-    public String toString() {
-        return "AddPrefixShardReplica[prefix=" + prefix + "]";
-    }
-}
index c9028466d46632de7945a7d7368b885ab8ae8bfe..d8121bc0351a6a119009651e333f27d787addd5d 100644 (file)
@@ -5,11 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * A message sent to the ShardManager to dynamically add a new local shard
@@ -26,9 +26,8 @@ public class AddShardReplica {
      *
      * @param shardName name of the shard that is to be locally replicated.
      */
-
-    public AddShardReplica(@Nonnull String shardName) {
-        this.shardName = Preconditions.checkNotNull(shardName, "ShardName should not be null");
+    public AddShardReplica(@NonNull String shardName) {
+        this.shardName = requireNonNull(shardName, "ShardName should not be null");
     }
 
     public String getShardName() {
index b38cd873dfc777e23db5ff8f87f8b8b7ecf6c56c..4e7b40ab1f1393cc762ee636479cce2168969c3b 100644 (file)
@@ -16,7 +16,7 @@ import java.io.ObjectOutput;
 import java.util.Optional;
 import java.util.SortedSet;
 import java.util.TreeSet;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
@@ -26,20 +26,22 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi
  *
  * @author Thomas Pantelis
  */
-public class BatchedModifications extends MutableCompositeModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModifications extends MutableCompositeModification {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private boolean ready;
     private boolean doCommitOnReady;
     private int totalMessagesSent;
     private TransactionIdentifier transactionId;
-    @Nullable
-    private SortedSet<String> participatingShardNames;
+
+    private @Nullable SortedSet<String> participatingShardNames;
 
     public BatchedModifications() {
     }
 
-    public BatchedModifications(TransactionIdentifier transactionId, short version) {
+    public BatchedModifications(final TransactionIdentifier transactionId, final short version) {
         super(version);
         this.transactionId = requireNonNull(transactionId, "transactionID can't be null");
     }
@@ -48,10 +50,10 @@ public class BatchedModifications extends MutableCompositeModification {
         return ready;
     }
 
-    public void setReady(Optional<SortedSet<String>> possibleParticipatingShardNames) {
-        this.ready = true;
-        this.participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
-        Preconditions.checkArgument(this.participatingShardNames == null || this.participatingShardNames.size() > 1);
+    public void setReady(final Optional<SortedSet<String>> possibleParticipatingShardNames) {
+        ready = true;
+        participatingShardNames = requireNonNull(possibleParticipatingShardNames).orElse(null);
+        Preconditions.checkArgument(participatingShardNames == null || participatingShardNames.size() > 1);
     }
 
     public void setReady() {
@@ -66,7 +68,7 @@ public class BatchedModifications extends MutableCompositeModification {
         return doCommitOnReady;
     }
 
-    public void setDoCommitOnReady(boolean doCommitOnReady) {
+    public void setDoCommitOnReady(final boolean doCommitOnReady) {
         this.doCommitOnReady = doCommitOnReady;
     }
 
@@ -74,7 +76,7 @@ public class BatchedModifications extends MutableCompositeModification {
         return totalMessagesSent;
     }
 
-    public void setTotalMessagesSent(int totalMessagesSent) {
+    public void setTotalMessagesSent(final int totalMessagesSent) {
         this.totalMessagesSent = totalMessagesSent;
     }
 
@@ -83,7 +85,7 @@ public class BatchedModifications extends MutableCompositeModification {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
         ready = in.readBoolean();
@@ -104,7 +106,7 @@ public class BatchedModifications extends MutableCompositeModification {
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
         out.writeBoolean(ready);
@@ -114,7 +116,7 @@ public class BatchedModifications extends MutableCompositeModification {
         if (getVersion() >= DataStoreVersions.FLUORINE_VERSION) {
             if (participatingShardNames != null) {
                 out.writeInt(participatingShardNames.size());
-                for (String shardName: participatingShardNames) {
+                for (String shardName : participatingShardNames) {
                     out.writeObject(shardName);
                 }
             } else {
index 29bb3e9ea623b77f6bc2dc7559af02828a883ae6..0cca8d03ffa7f4cc5f9d78fbed7698c6fab23446 100644 (file)
@@ -16,7 +16,9 @@ import java.io.ObjectOutput;
  *
  * @author Thomas Pantelis
  */
-public class BatchedModificationsReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class BatchedModificationsReply extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private int numBatched;
@@ -24,7 +26,7 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage {
     public BatchedModificationsReply() {
     }
 
-    public BatchedModificationsReply(int numBatched) {
+    public BatchedModificationsReply(final int numBatched) {
         this.numBatched = numBatched;
     }
 
@@ -33,13 +35,13 @@ public class BatchedModificationsReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         numBatched = in.readInt();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeInt(numBatched);
     }
index 087c7b6376bd44760b8486804b99cc157581d594..f50412fc0e3b3765e279dc06aeffa3da31178782 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CanCommitTransaction() {
     }
 
-    public CanCommitTransaction(TransactionIdentifier transactionID, final short version) {
+    public CanCommitTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static CanCommitTransaction fromSerializable(Object serializable) {
+    public static CanCommitTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof CanCommitTransaction);
         return (CanCommitTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CanCommitTransaction;
     }
 }
index f346cba4334ffce9b93f1ff53e33b9e0401d94fa..5c8fae94b8ee005444f44fbaa3d61d8057dd0e94 100644 (file)
@@ -14,7 +14,11 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class CanCommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CanCommitTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = 4355566635423934872L;
+
     private static final CanCommitTransactionReply YES =
             new CanCommitTransactionReply(true, DataStoreVersions.CURRENT_VERSION);
     private static final CanCommitTransactionReply NO =
@@ -35,13 +39,13 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         canCommit = in.readBoolean();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeBoolean(canCommit);
     }
@@ -51,11 +55,11 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
         return "CanCommitTransactionReply [canCommit=" + canCommit + ", version=" + getVersion() + "]";
     }
 
-    public static CanCommitTransactionReply yes(short version) {
+    public static CanCommitTransactionReply yes(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? YES : new CanCommitTransactionReply(true, version);
     }
 
-    public static CanCommitTransactionReply no(short version) {
+    public static CanCommitTransactionReply no(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? NO : new CanCommitTransactionReply(false, version);
     }
 
@@ -64,7 +68,7 @@ public class CanCommitTransactionReply extends VersionedExternalizableMessage {
         return (CanCommitTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CanCommitTransactionReply;
     }
 }
index 0846f7b1c92d35bf6129f6276110dbbf1ab9f75d..92d186a99f66c67966f5fe28548f0051dc89af9d 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableMap;
 import java.util.Map;
 
@@ -20,8 +21,8 @@ public class ChangeShardMembersVotingStatus {
     private final String shardName;
     private final Map<String, Boolean> meberVotingStatusMap;
 
-    public ChangeShardMembersVotingStatus(String shardName, Map<String, Boolean> meberVotingStatusMap) {
-        this.shardName = Preconditions.checkNotNull(shardName);
+    public ChangeShardMembersVotingStatus(final String shardName, final Map<String, Boolean> meberVotingStatusMap) {
+        this.shardName = requireNonNull(shardName);
         this.meberVotingStatusMap = ImmutableMap.copyOf(meberVotingStatusMap);
     }
 
index 3b5c6b3b8c89e81e0df5d0aac2c283f17ce893b9..327dca0e801ef001cc10643b39e61ea6843c582a 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class CloseDataTreeNotificationListenerRegistration implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final CloseDataTreeNotificationListenerRegistration INSTANCE =
             new CloseDataTreeNotificationListenerRegistration();
@@ -21,6 +22,7 @@ public final class CloseDataTreeNotificationListenerRegistration implements Seri
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
index 0bc5254c1459a47dc19bb2e505738558168d5e69..ae825106ad36e56cbb677c78c089d621559d79cb 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class CloseDataTreeNotificationListenerRegistrationReply implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final CloseDataTreeNotificationListenerRegistrationReply INSTANCE =
             new CloseDataTreeNotificationListenerRegistrationReply();
@@ -22,6 +23,7 @@ public final class CloseDataTreeNotificationListenerRegistrationReply implements
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
index 1a3567cafa0c2989490b78f037555347bd6701b1..5b3c050e4b17a755da8e44ecab7ab9833891b0db 100644 (file)
@@ -5,20 +5,21 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-public class CloseTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransaction extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CloseTransaction() {
     }
 
-    public CloseTransaction(short version) {
+    public CloseTransaction(final short version) {
         super(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CloseTransaction;
     }
 }
index 2fe96cbe67eddf945dbb41e5bca1eae6aad554b1..d06b7319b4433bbf326cfc5e1ace2602c96335e3 100644 (file)
@@ -5,18 +5,21 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.opendaylight.yangtools.concepts.Identifiable;
 
-public class CloseTransactionChain extends VersionedExternalizableMessage
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CloseTransactionChain extends VersionedExternalizableMessage
         implements Identifiable<LocalHistoryIdentifier> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private LocalHistoryIdentifier transactionChainId;
@@ -26,7 +29,7 @@ public class CloseTransactionChain extends VersionedExternalizableMessage
 
     public CloseTransactionChain(final LocalHistoryIdentifier transactionChainId, final short version) {
         super(version);
-        this.transactionChainId = Preconditions.checkNotNull(transactionChainId);
+        this.transactionChainId = requireNonNull(transactionChainId);
     }
 
     @Override
@@ -35,23 +38,23 @@ public class CloseTransactionChain extends VersionedExternalizableMessage
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionChainId = LocalHistoryIdentifier.readFrom(in);
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionChainId.writeTo(out);
     }
 
     public static CloseTransactionChain fromSerializable(final Object serializable) {
-        Preconditions.checkArgument(serializable instanceof CloseTransactionChain);
+        checkArgument(serializable instanceof CloseTransactionChain);
         return (CloseTransactionChain)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CloseTransactionChain;
     }
 }
index 0e21b578ca91f59cc814f62f7c1ffb7d02e0c028..a746580516e76f12fb91f6cb57291f6f135332f5 100644 (file)
@@ -5,9 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
index fe13e5d8b177439cc31e1e72520a3bfb2e5868ef..bd80287ae9c187bdb8e2d30127cd0b74adac8654 100644 (file)
@@ -5,28 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CommitTransaction extends AbstractThreePhaseCommitMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransaction extends AbstractThreePhaseCommitMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public CommitTransaction() {
     }
 
-    public CommitTransaction(TransactionIdentifier transactionID, final short version) {
+    public CommitTransaction(final TransactionIdentifier transactionID, final short version) {
         super(transactionID, version);
     }
 
-    public static CommitTransaction fromSerializable(Object serializable) {
+    public static CommitTransaction fromSerializable(final Object serializable) {
         Preconditions.checkArgument(serializable instanceof CommitTransaction);
         return (CommitTransaction)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CommitTransaction;
     }
 }
index cd3a13a9f7a8d8a4948cc5d0b1d3395c45b4e44b..167124c6fe672258632e1ebe84ca36cf1b513832 100644 (file)
@@ -5,26 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
-public class CommitTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CommitTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
+    private static final long serialVersionUID = -8342450250867395000L;
+
     public static final CommitTransactionReply INSTANCE = new CommitTransactionReply();
 
     public CommitTransactionReply() {
     }
 
-    private CommitTransactionReply(short version) {
+    private CommitTransactionReply(final short version) {
         super(version);
     }
 
-    public static CommitTransactionReply instance(short version) {
+    public static CommitTransactionReply instance(final short version) {
         return version == DataStoreVersions.CURRENT_VERSION ? INSTANCE : new CommitTransactionReply(version);
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CommitTransactionReply;
     }
 }
index 372155107d54b105521d8774f47d5931e098ea3b..8c5cbad33088c3beceb929fa5851fe29a882dee8 100644 (file)
@@ -7,11 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.Shard.AbstractBuilder;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
 
 /**
@@ -31,25 +33,22 @@ public class CreateShard {
      * @param shardBuilder used to obtain the Props for creating the shard actor instance.
      * @param datastoreContext the DatastoreContext for the new shard. If null, the default is used.
      */
-    public CreateShard(@Nonnull ModuleShardConfiguration moduleShardConfig,
-            @Nonnull Shard.AbstractBuilder<?, ?> shardBuilder, @Nullable DatastoreContext datastoreContext) {
-        this.moduleShardConfig = Preconditions.checkNotNull(moduleShardConfig);
-        this.shardBuilder = Preconditions.checkNotNull(shardBuilder);
+    public CreateShard(@NonNull ModuleShardConfiguration moduleShardConfig, @NonNull AbstractBuilder<?, ?> shardBuilder,
+            @Nullable DatastoreContext datastoreContext) {
+        this.moduleShardConfig = requireNonNull(moduleShardConfig);
+        this.shardBuilder = requireNonNull(shardBuilder);
         this.datastoreContext = datastoreContext;
     }
 
-    @Nonnull
-    public ModuleShardConfiguration getModuleShardConfig() {
+    public @NonNull ModuleShardConfiguration getModuleShardConfig() {
         return moduleShardConfig;
     }
 
-    @Nonnull
-    public Shard.AbstractBuilder<?, ?> getShardBuilder() {
+    public @NonNull AbstractBuilder<?, ?> getShardBuilder() {
         return shardBuilder;
     }
 
-    @Nullable
-    public DatastoreContext getDatastoreContext() {
+    public @Nullable DatastoreContext getDatastoreContext() {
         return datastoreContext;
     }
 
index 52e8824343fe1cafcf9398e58a340787ce2ea1e2..5ef056e8a0cb7127db01c680230564abc216e6bd 100644 (file)
@@ -5,16 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CreateTransaction extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransaction extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private TransactionIdentifier transactionId;
@@ -23,9 +26,10 @@ public class CreateTransaction extends VersionedExternalizableMessage {
     public CreateTransaction() {
     }
 
-    public CreateTransaction(TransactionIdentifier transactionId, int transactionType, short version) {
+    public CreateTransaction(final TransactionIdentifier transactionId, final int transactionType,
+            final short version) {
         super(version);
-        this.transactionId = Preconditions.checkNotNull(transactionId);
+        this.transactionId = requireNonNull(transactionId);
         this.transactionType = transactionType;
     }
 
@@ -38,14 +42,14 @@ public class CreateTransaction extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
         transactionType = in.readInt();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
         out.writeInt(transactionType);
@@ -56,12 +60,12 @@ public class CreateTransaction extends VersionedExternalizableMessage {
         return "CreateTransaction [transactionId=" + transactionId + ", transactionType=" + transactionType + "]";
     }
 
-    public static CreateTransaction fromSerializable(Object message) {
-        Preconditions.checkArgument(message instanceof CreateTransaction);
+    public static CreateTransaction fromSerializable(final Object message) {
+        checkArgument(message instanceof CreateTransaction);
         return (CreateTransaction)message;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CreateTransaction;
     }
 }
index 87dd7c57fb5d8c64cea1d950bc75ec976ab989ab..644daf21fbdc4b158946f9785f1f4f96dcf28b7b 100644 (file)
@@ -15,7 +15,9 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 
-public class CreateTransactionReply extends VersionedExternalizableMessage {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class CreateTransactionReply extends VersionedExternalizableMessage {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private String transactionPath;
@@ -40,14 +42,14 @@ public class CreateTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         transactionId = TransactionIdentifier.readFrom(in);
         transactionPath = in.readUTF();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         transactionId.writeTo(out);
         out.writeUTF(transactionPath);
@@ -60,12 +62,12 @@ public class CreateTransactionReply extends VersionedExternalizableMessage {
                 + ", version=" + getVersion() + "]";
     }
 
-    public static CreateTransactionReply fromSerializable(Object serializable) {
+    public static CreateTransactionReply fromSerializable(final Object serializable) {
         checkArgument(serializable instanceof CreateTransactionReply);
         return (CreateTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof CreateTransactionReply;
     }
 }
index b7e38d50824095a4f0e31c094272593ef704441c..6c646f7cc3dd630223287d5cef2dfca6ba959e27 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
@@ -15,6 +14,7 @@ import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExists extends AbstractRead<Boolean> {
     private static final long serialVersionUID = 1L;
 
@@ -26,12 +26,12 @@ public class DataExists extends AbstractRead<Boolean> {
     }
 
     @Override
-    public FluentFuture<Boolean> apply(DOMStoreReadTransaction readDelegate) {
+    public FluentFuture<Boolean> apply(final DOMStoreReadTransaction readDelegate) {
         return readDelegate.exists(getPath());
     }
 
     @Override
-    public void processResponse(Object response, SettableFuture<Boolean> returnFuture) {
+    public void processResponse(final Object response, final SettableFuture<Boolean> returnFuture) {
         if (DataExistsReply.isSerializedType(response)) {
             returnFuture.set(Boolean.valueOf(DataExistsReply.fromSerializable(response).exists()));
         } else {
@@ -41,7 +41,7 @@ public class DataExists extends AbstractRead<Boolean> {
     }
 
     @Override
-    protected AbstractRead<Boolean> newInstance(short withVersion) {
+    protected AbstractRead<Boolean> newInstance(final short withVersion) {
         return new DataExists(getPath(), withVersion);
     }
 
@@ -50,7 +50,7 @@ public class DataExists extends AbstractRead<Boolean> {
         return (DataExists)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof DataExists;
     }
 }
index a57df0ecdab14a3bf3400a14c5efaef6d29b34b1..799cd8b86e3d8d2f1903a753dcdbd1e3586b9131 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
@@ -13,6 +12,7 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -31,13 +31,13 @@ public class DataExistsReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         exists = in.readBoolean();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeBoolean(exists);
     }
@@ -47,7 +47,7 @@ public class DataExistsReply extends VersionedExternalizableMessage {
         return (DataExistsReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof DataExistsReply;
     }
 }
index ff2c89c8ddf72d6bcc2ebb280e639791ec6a0c3e..63f39519f5a35e7b2d63f528c8bc7f157782d90d 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import static java.util.Objects.requireNonNull;
+
+import java.util.List;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 /**
  * A message about a DataTree having been changed. The message is not
@@ -18,10 +19,10 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
  * candidate.
  */
 public final class DataTreeChanged {
-    private final Collection<DataTreeCandidate> changes;
+    private final List<DataTreeCandidate> changes;
 
-    public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
-        this.changes = Preconditions.checkNotNull(changes);
+    public DataTreeChanged(final List<DataTreeCandidate> changes) {
+        this.changes = requireNonNull(changes);
     }
 
     /**
@@ -29,7 +30,7 @@ public final class DataTreeChanged {
      *
      * @return Change events
      */
-    public Collection<DataTreeCandidate> getChanges() {
+    public List<DataTreeCandidate> getChanges() {
         return changes;
     }
 }
index d50079e6a12f7bb3f702c3092df5ee1a3ec13711..06e898e09a66062cd59bd0be81bcc388813f11ee 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import java.io.Serializable;
 
 public final class DataTreeChangedReply implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
     private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
 
@@ -21,6 +22,7 @@ public final class DataTreeChangedReply implements Serializable {
         return INSTANCE;
     }
 
+    @java.io.Serial
     private Object readResolve() {
         return INSTANCE;
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/EmptyExternalizable.java
deleted file mode 100644 (file)
index c7ee83a..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Externalizable;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-
-/**
- * Externalizable with no data.
- *
- * @author Thomas Pantelis
- */
-public class EmptyExternalizable implements Externalizable {
-
-    @Override
-    public void readExternal(ObjectInput in) {
-    }
-
-    @Override
-    public void writeExternal(ObjectOutput out) {
-    }
-}
index 9eba5c66928297a7d54ff38a097fe45a12f11bac..52709dd705828d2080dbd588052f7a973279b4c9 100644 (file)
@@ -5,10 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.io.Serializable;
 
 /**
@@ -20,11 +20,8 @@ public class FindPrimary implements Serializable {
     private final String shardName;
     private final boolean waitUntilReady;
 
-    public FindPrimary(String shardName, boolean waitUntilReady) {
-
-        Preconditions.checkNotNull(shardName, "shardName should not be null");
-
-        this.shardName = shardName;
+    public FindPrimary(final String shardName, final boolean waitUntilReady) {
+        this.shardName = requireNonNull(shardName, "shardName should not be null");
         this.waitUntilReady = waitUntilReady;
     }
 
index 24859a4d16998de4c8dbf92cf75cf5f59ed9bd84..a4c8d2e1be7c543e6aa635c7bc130a9fe961d05a 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
 
 /**
  * A local message sent to the ShardManager to flip the raft voting states for members of a shard.
@@ -17,8 +17,8 @@ import com.google.common.base.Preconditions;
 public class FlipShardMembersVotingStatus {
     private final String shardName;
 
-    public FlipShardMembersVotingStatus(String shardName) {
-        this.shardName = Preconditions.checkNotNull(shardName);
+    public FlipShardMembersVotingStatus(final String shardName) {
+        this.shardName = requireNonNull(shardName);
     }
 
     public String getShardName() {
index 529b7e2153cb63888387b9a86dee36fd85aa3c95..fbc66a4d057099c22ca7c77ae79eb261ccf3e562 100644 (file)
@@ -11,7 +11,7 @@ import static java.util.Objects.requireNonNull;
 
 import java.util.Optional;
 import java.util.SortedSet;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.ReadWriteShardDataTreeTransaction;
 
@@ -20,17 +20,17 @@ import org.opendaylight.controller.cluster.datastore.ReadWriteShardDataTreeTrans
  *
  * @author Thomas Pantelis
  */
-public class ForwardedReadyTransaction {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class ForwardedReadyTransaction {
     private final TransactionIdentifier transactionId;
     private final ReadWriteShardDataTreeTransaction transaction;
     private final boolean doImmediateCommit;
     private final short txnClientVersion;
-    @Nullable
-    private final SortedSet<String> participatingShardNames;
+    private final @Nullable SortedSet<String> participatingShardNames;
 
-    public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion,
-            ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit,
-            Optional<SortedSet<String>> participatingShardNames) {
+    public ForwardedReadyTransaction(final TransactionIdentifier transactionId, final short txnClientVersion,
+            final ReadWriteShardDataTreeTransaction transaction, final boolean doImmediateCommit,
+            final Optional<SortedSet<String>> participatingShardNames) {
         this.transactionId = requireNonNull(transactionId);
         this.transaction = requireNonNull(transaction);
         this.txnClientVersion = txnClientVersion;
index 2026bd843fbbb5a6cb81e74a9ec11365e3a64d85..443574795d37be78213f4e974554af00ce2415d7 100644 (file)
@@ -7,8 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import org.opendaylight.controller.cluster.mgmt.api.DataTreeListenerInfo;
+
 /**
- * Local message sent to an actor to retrieve internal information for reporting.
+ * Local message sent to an actor to retrieve {@link DataTreeListenerInfo} for reporting.
  *
  * @author Thomas Pantelis
  */
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClients.java
new file mode 100644 (file)
index 0000000..30ac1a9
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+
+/**
+ * Request a shard to report the clients it knows about. Shard is required to respond with {@link GetKnownClientsReply}.
+ */
+public final class GetKnownClients implements Serializable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    public static final @NonNull GetKnownClients INSTANCE = new GetKnownClients();
+
+    private GetKnownClients() {
+
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return INSTANCE;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClientsReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/GetKnownClientsReply.java
new file mode 100644 (file)
index 0000000..2864141
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+
+/**
+ * Reply to {@link GetKnownClients}.
+ */
+public final class GetKnownClientsReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull ImmutableSet<ClientIdentifier> clients;
+
+    public GetKnownClientsReply(final ImmutableSet<ClientIdentifier> clients) {
+        this.clients = requireNonNull(clients);
+    }
+
+    public @NonNull ImmutableSet<ClientIdentifier> getClients() {
+        return clients;
+    }
+}
index 0ca4f6444da1861f382ba5dfa626fbe72f2674e1..fc0df4a951bcba4bb80c266c1cf8b3946abbe621 100644 (file)
@@ -7,10 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
+import static java.util.Objects.requireNonNull;
+
 import org.apache.commons.lang3.ObjectUtils;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Local message sent in reply to FindPrimaryShard to indicate the primary shard is local to the caller.
@@ -18,22 +19,20 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
  * @author Thomas Pantelis
  */
 public class LocalPrimaryShardFound {
-
     private final String primaryPath;
-    private final DataTree localShardDataTree;
+    private final ReadOnlyDataTree localShardDataTree;
 
-    public LocalPrimaryShardFound(@Nonnull String primaryPath, @Nonnull DataTree localShardDataTree) {
-        this.primaryPath = Preconditions.checkNotNull(primaryPath);
-        this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+    public LocalPrimaryShardFound(final @NonNull String primaryPath,
+            final @NonNull ReadOnlyDataTree localShardDataTree) {
+        this.primaryPath = requireNonNull(primaryPath);
+        this.localShardDataTree = requireNonNull(localShardDataTree);
     }
 
-    @Nonnull
-    public String getPrimaryPath() {
+    public @NonNull String getPrimaryPath() {
         return primaryPath;
     }
 
-    @Nonnull
-    public DataTree getLocalShardDataTree() {
+    public @NonNull ReadOnlyDataTree getLocalShardDataTree() {
         return localShardDataTree;
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerDown.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerDown.java
deleted file mode 100644 (file)
index bfe7230..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent to a shard actor indicating one of its peers is down.
- *
- * @author Thomas Pantelis
- */
-public class PeerDown {
-    private final MemberName memberName;
-    private final String peerId;
-
-    public PeerDown(MemberName memberName, String peerId) {
-        this.memberName = memberName;
-        this.peerId = peerId;
-    }
-
-    public MemberName getMemberName() {
-        return memberName;
-    }
-
-
-    public String getPeerId() {
-        return peerId;
-    }
-
-    @Override
-    public String toString() {
-        return "PeerDown [memberName=" + memberName.getName() + ", peerId=" + peerId + "]";
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerUp.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerUp.java
deleted file mode 100644 (file)
index 053e515..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Message sent to a shard actor indicating one of its peers is up.
- *
- * @author Thomas Pantelis
- */
-public class PeerUp {
-    private final MemberName memberName;
-    private final String peerId;
-
-    public PeerUp(MemberName memberName, String peerId) {
-        this.memberName = memberName;
-        this.peerId = peerId;
-    }
-
-    public MemberName getMemberName() {
-        return memberName;
-    }
-
-    public String getPeerId() {
-        return peerId;
-    }
-
-    @Override
-    public String toString() {
-        return "PeerUp [memberName=" + memberName.getName() + ", peerId=" + peerId + "]";
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PersistAbortTransactionPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PersistAbortTransactionPayload.java
deleted file mode 100644 (file)
index b581ddb..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-
-/**
- * A request sent from {@link org.opendaylight.controller.cluster.datastore.ShardTransaction} to
- * {@link org.opendaylight.controller.cluster.datastore.Shard} to persist an
- * {@link org.opendaylight.controller.cluster.datastore.persisted.AbortTransactionPayload} after the transaction has
- * been closed by the frontend and internal backend state has been updated.
- *
- * <p>
- * Since the two are actors, we cannot perform a direct upcall, as that breaks actor containment and wreaks havoc into
- * Akka itself. This class does not need to be serializable, as both actors are guaranteed to be co-located.
- *
- * @author Robert Varga
- */
-public final class PersistAbortTransactionPayload {
-    private final TransactionIdentifier txId;
-
-    public PersistAbortTransactionPayload(final TransactionIdentifier txId) {
-        this.txId = Preconditions.checkNotNull(txId);
-    }
-
-    public TransactionIdentifier getTransactionId() {
-        return txId;
-    }
-}
index f739e1e6c4ab66de3f958f5c39d98a241e8b9f04..c9d10a62e60a4061b5d5dc3eb4891415907adce6 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorSelection;
-import com.google.common.base.Preconditions;
 import java.util.Optional;
-import javax.annotation.Nonnull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * Local message DTO that contains information about the primary shard.
@@ -21,26 +22,25 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
 public class PrimaryShardInfo {
     private final ActorSelection primaryShardActor;
     private final short primaryShardVersion;
-    private final DataTree localShardDataTree;
+    private final ReadOnlyDataTree localShardDataTree;
 
-    public PrimaryShardInfo(@Nonnull ActorSelection primaryShardActor, short primaryShardVersion,
-            @Nonnull DataTree localShardDataTree) {
-        this.primaryShardActor = Preconditions.checkNotNull(primaryShardActor);
+    public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion,
+            final @NonNull ReadOnlyDataTree localShardDataTree) {
+        this.primaryShardActor = requireNonNull(primaryShardActor);
         this.primaryShardVersion = primaryShardVersion;
-        this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+        this.localShardDataTree = requireNonNull(localShardDataTree);
     }
 
-    public PrimaryShardInfo(@Nonnull ActorSelection primaryShardActor, short primaryShardVersion) {
-        this.primaryShardActor = Preconditions.checkNotNull(primaryShardActor);
+    public PrimaryShardInfo(final @NonNull ActorSelection primaryShardActor, final short primaryShardVersion) {
+        this.primaryShardActor = requireNonNull(primaryShardActor);
         this.primaryShardVersion = primaryShardVersion;
-        this.localShardDataTree = null;
+        localShardDataTree = null;
     }
 
     /**
      * Returns an ActorSelection representing the primary shard actor.
      */
-    @Nonnull
-    public ActorSelection getPrimaryShardActor() {
+    public @NonNull ActorSelection getPrimaryShardActor() {
         return primaryShardActor;
     }
 
@@ -55,8 +55,7 @@ public class PrimaryShardInfo {
      * Returns an Optional whose value contains the primary shard's DataTree if the primary shard is local
      * to the caller. Otherwise the Optional value is absent.
      */
-    @Nonnull
-    public Optional<DataTree> getLocalShardDataTree() {
+    public @NonNull Optional<ReadOnlyDataTree> getLocalShardDataTree() {
         return Optional.ofNullable(localShardDataTree);
     }
 }
index 6352b5734b9dae632f187792c5119d1ce815fb10..8172d64d52ccba8fa75a1b48054d057702e95355 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
@@ -17,33 +16,35 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-public class ReadData extends AbstractRead<Optional<NormalizedNode<?, ?>>> {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public class ReadData extends AbstractRead<Optional<NormalizedNode>> {
     private static final long serialVersionUID = 1L;
 
     public ReadData() {
     }
 
-    public ReadData(final YangInstanceIdentifier path, short version) {
+    public ReadData(final YangInstanceIdentifier path, final short version) {
         super(path, version);
     }
 
     @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> apply(DOMStoreReadTransaction readDelegate) {
+    public FluentFuture<Optional<NormalizedNode>> apply(final DOMStoreReadTransaction readDelegate) {
         return readDelegate.read(getPath());
     }
 
     @Override
-    public void processResponse(Object readResponse, SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
+    public void processResponse(final Object readResponse,
+            final SettableFuture<Optional<NormalizedNode>> returnFuture) {
         if (ReadDataReply.isSerializedType(readResponse)) {
             ReadDataReply reply = ReadDataReply.fromSerializable(readResponse);
-            returnFuture.set(Optional.<NormalizedNode<?, ?>>ofNullable(reply.getNormalizedNode()));
+            returnFuture.set(Optional.ofNullable(reply.getNormalizedNode()));
         } else {
             returnFuture.setException(new ReadFailedException("Invalid response reading data for path " + getPath()));
         }
     }
 
     @Override
-    protected AbstractRead<Optional<NormalizedNode<?, ?>>> newInstance(short withVersion) {
+    protected AbstractRead<Optional<NormalizedNode>> newInstance(final short withVersion) {
         return new ReadData(getPath(), withVersion);
     }
 
@@ -52,7 +53,7 @@ public class ReadData extends AbstractRead<Optional<NormalizedNode<?, ?>>> {
         return (ReadData)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof ReadData;
     }
 }
index eda5c262802669bfcc9eca9aed4f398187728edd..2ed53ad0501a81e6ce9615fa318f4fa7a3fbb1c3 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import java.io.IOException;
@@ -14,40 +13,41 @@ import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
-    private NormalizedNode<?, ?> normalizedNode;
+    private NormalizedNode normalizedNode;
 
     public ReadDataReply() {
     }
 
-    public ReadDataReply(NormalizedNode<?, ?> normalizedNode, short version) {
+    public ReadDataReply(final NormalizedNode normalizedNode, final short version) {
         super(version);
         this.normalizedNode = normalizedNode;
     }
 
-    public NormalizedNode<?, ?> getNormalizedNode() {
+    public NormalizedNode getNormalizedNode() {
         return normalizedNode;
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
-        normalizedNode = SerializationUtils.deserializeNormalizedNode(in);
+        normalizedNode = SerializationUtils.readNormalizedNode(in).orElse(null);
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
-        SerializationUtils.serializeNormalizedNode(normalizedNode, out);
+        SerializationUtils.writeNormalizedNode(out, getStreamVersion(), normalizedNode);
     }
 
-    public static ReadDataReply fromSerializable(Object serializable) {
+    public static ReadDataReply fromSerializable(final Object serializable) {
         return (ReadDataReply) serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof ReadDataReply;
     }
 }
index bff6ea862029a647ef96205931022491342fa76e..55ab132db699e276cc0370196fa099a10ec6cea4 100644 (file)
@@ -11,10 +11,10 @@ import static java.util.Objects.requireNonNull;
 
 import java.util.Optional;
 import java.util.SortedSet;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Message notifying the shard leader to apply modifications which have been
@@ -23,18 +23,18 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
  * to be sent out to a remote system, it needs to be intercepted by {@link ReadyLocalTransactionSerializer}
  * and turned into {@link BatchedModifications}.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public final class ReadyLocalTransaction {
     private final DataTreeModification modification;
     private final TransactionIdentifier transactionId;
     private final boolean doCommitOnReady;
-    @Nullable
-    private final SortedSet<String> participatingShardNames;
+    private final @Nullable SortedSet<String> participatingShardNames;
 
     // The version of the remote system used only when needing to convert to BatchedModifications.
     private short remoteVersion = DataStoreVersions.CURRENT_VERSION;
 
     public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification,
-            final boolean doCommitOnReady, Optional<SortedSet<String>> participatingShardNames) {
+            final boolean doCommitOnReady, final Optional<SortedSet<String>> participatingShardNames) {
         this.transactionId = requireNonNull(transactionId);
         this.modification = requireNonNull(modification);
         this.doCommitOnReady = doCommitOnReady;
@@ -57,7 +57,7 @@ public final class ReadyLocalTransaction {
         return remoteVersion;
     }
 
-    public void setRemoteVersion(short remoteVersion) {
+    public void setRemoteVersion(final short remoteVersion) {
         this.remoteVersion = remoteVersion;
     }
 
index 53731fa49170ffd22aa39512deebd825f7e12c08..7346a8c07e5d3401610c1f5a237cd6c026af33e6 100644 (file)
@@ -7,10 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ExtendedActorSystem;
 import akka.serialization.JSerializer;
 import akka.util.ClassLoaderObjectInputStream;
-import com.google.common.base.Preconditions;
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import org.apache.commons.lang3.SerializationUtils;
@@ -22,12 +24,12 @@ import org.opendaylight.controller.cluster.datastore.utils.AbstractBatchedModifi
  * into akka serialization to allow forwarding of ReadyLocalTransaction to remote
  * shards.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public final class ReadyLocalTransactionSerializer extends JSerializer {
-
     private final ExtendedActorSystem system;
 
     public ReadyLocalTransactionSerializer(final ExtendedActorSystem system) {
-        this.system = Preconditions.checkNotNull(system);
+        this.system = requireNonNull(system);
     }
 
     @Override
@@ -42,7 +44,7 @@ public final class ReadyLocalTransactionSerializer extends JSerializer {
 
     @Override
     public byte[] toBinary(final Object obj) {
-        Preconditions.checkArgument(obj instanceof ReadyLocalTransaction, "Unsupported object type %s", obj.getClass());
+        checkArgument(obj instanceof ReadyLocalTransaction, "Unsupported object type %s", obj.getClass());
         final ReadyLocalTransaction readyLocal = (ReadyLocalTransaction) obj;
         final BatchedModifications batched = new BatchedModifications(readyLocal.getTransactionId(),
                 readyLocal.getRemoteVersion());
@@ -69,7 +71,7 @@ public final class ReadyLocalTransactionSerializer extends JSerializer {
         private final BatchedModifications message;
 
         BatchedCursor(final BatchedModifications message) {
-            this.message = Preconditions.checkNotNull(message);
+            this.message = requireNonNull(message);
         }
 
         @Override
index 5ddc77f8f624c15f2e75dcebf3adbc693cebfde4..a341c72333e3ca92c123ef749bd9a9ab9f6226d5 100644 (file)
@@ -13,6 +13,7 @@ import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyTransactionReply extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
@@ -21,11 +22,11 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage {
     public ReadyTransactionReply() {
     }
 
-    public ReadyTransactionReply(String cohortPath) {
+    public ReadyTransactionReply(final String cohortPath) {
         this(cohortPath, DataStoreVersions.CURRENT_VERSION);
     }
 
-    public ReadyTransactionReply(String cohortPath, short version) {
+    public ReadyTransactionReply(final String cohortPath, final short version) {
         super(version);
         this.cohortPath = cohortPath;
     }
@@ -35,22 +36,22 @@ public class ReadyTransactionReply extends VersionedExternalizableMessage {
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
         cohortPath = in.readUTF();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
         out.writeUTF(cohortPath);
     }
 
-    public static ReadyTransactionReply fromSerializable(Object serializable) {
+    public static ReadyTransactionReply fromSerializable(final Object serializable) {
         return (ReadyTransactionReply)serializable;
     }
 
-    public static boolean isSerializedType(Object message) {
+    public static boolean isSerializedType(final Object message) {
         return message instanceof ReadyTransactionReply;
     }
 }
index f790d1d4b2ada70a7938ed5316850d6ac22ae95c..0e9fae42432b8d2b98802142e74d51b38818c0e0 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -18,11 +19,11 @@ import org.opendaylight.controller.cluster.datastore.node.utils.stream.Serializa
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 /**
- * Request a {@link org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener} registration be made on the
- * shard leader.
+ * Request a {@link org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener} registration be made on the shard leader.
  */
 public final class RegisterDataTreeChangeListener implements Externalizable {
     private static final long serialVersionUID = 1L;
+
     private ActorRef dataTreeChangeListenerPath;
     private YangInstanceIdentifier path;
     private boolean registerOnAllInstances;
@@ -33,8 +34,8 @@ public final class RegisterDataTreeChangeListener implements Externalizable {
 
     public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath,
             final boolean registerOnAllInstances) {
-        this.path = Preconditions.checkNotNull(path);
-        this.dataTreeChangeListenerPath = Preconditions.checkNotNull(dataTreeChangeListenerPath);
+        this.path = requireNonNull(path);
+        this.dataTreeChangeListenerPath = requireNonNull(dataTreeChangeListenerPath);
         this.registerOnAllInstances = registerOnAllInstances;
     }
 
@@ -53,14 +54,14 @@ public final class RegisterDataTreeChangeListener implements Externalizable {
     @Override
     public void writeExternal(final ObjectOutput out) throws IOException {
         out.writeObject(dataTreeChangeListenerPath);
-        SerializationUtils.serializePath(path, out);
+        SerializationUtils.writePath(out, path);
         out.writeBoolean(registerOnAllInstances);
     }
 
     @Override
     public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         dataTreeChangeListenerPath = (ActorRef) in.readObject();
-        path = SerializationUtils.deserializePath(in);
+        path = SerializationUtils.readPath(in);
         registerOnAllInstances = in.readBoolean();
     }
 
index 7642a90c5f4efd6518e6f1145cb9bfe5f2f62a3b..7feb6c46e631f9a89a309e34cdc562358b88a4c1 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
 import java.io.Serializable;
 
 /**
@@ -23,7 +24,7 @@ public class RegisterDataTreeNotificationListenerReply implements Serializable {
     private final ActorRef listenerRegistrationPath;
 
     public RegisterDataTreeNotificationListenerReply(final ActorRef listenerRegistrationPath) {
-        this.listenerRegistrationPath = Preconditions.checkNotNull(listenerRegistrationPath);
+        this.listenerRegistrationPath = requireNonNull(listenerRegistrationPath);
     }
 
     public ActorPath getListenerRegistrationPath() {
index 041085fe159154685d5b412c537591a61a02a984..20b85d7ad1d19903868f7037b968f75330145f86 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * A remote message sent to locate the primary shard.
@@ -23,13 +24,12 @@ public class RemoteFindPrimary extends FindPrimary {
 
     private final Set<String> visitedAddresses;
 
-    public RemoteFindPrimary(String shardName, boolean waitUntilReady, @Nonnull Collection<String> visitedAddresses) {
+    public RemoteFindPrimary(String shardName, boolean waitUntilReady, @NonNull Collection<String> visitedAddresses) {
         super(shardName, waitUntilReady);
-        this.visitedAddresses = new HashSet<>(Preconditions.checkNotNull(visitedAddresses));
+        this.visitedAddresses = new HashSet<>(requireNonNull(visitedAddresses));
     }
 
-    @Nonnull
-    public Set<String> getVisitedAddresses() {
+    public @NonNull Set<String> getVisitedAddresses() {
         return visitedAddresses;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RemovePrefixShardReplica.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RemovePrefixShardReplica.java
deleted file mode 100644 (file)
index 9c33cf0..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * A message sent to the ShardManager to dynamically remove a local prefix shard
- *  replica available in this node.
- */
-public class RemovePrefixShardReplica {
-
-    private final YangInstanceIdentifier prefix;
-    private final MemberName memberName;
-
-    /**
-     * Constructor.
-     *
-     * @param prefix prefix of the local shard that is to be dynamically removed.
-     */
-    public RemovePrefixShardReplica(@Nonnull final YangInstanceIdentifier prefix,
-                                    @Nonnull final MemberName memberName) {
-        this.prefix = Preconditions.checkNotNull(prefix, "prefix should not be null");
-        this.memberName = Preconditions.checkNotNull(memberName, "memberName should not be null");
-    }
-
-    public YangInstanceIdentifier getShardPrefix() {
-        return prefix;
-    }
-
-    public MemberName getMemberName() {
-        return memberName;
-    }
-
-    @Override
-    public String toString() {
-        return "RemovePrefixShardReplica [prefix=" + prefix + ", memberName=" + memberName + "]";
-    }
-}
index 0325ee9f2baa38af39d658efcca9c6a29b7d6e22..5da3a201e88a7d71b5800eb44eed63618be89497 100644 (file)
@@ -5,11 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 
 /**
@@ -26,9 +26,9 @@ public class RemoveShardReplica {
      *
      * @param shardName name of the local shard that is to be dynamically removed.
      */
-    public RemoveShardReplica(@Nonnull String shardName, @Nonnull MemberName memberName) {
-        this.shardName = Preconditions.checkNotNull(shardName, "shardName should not be null");
-        this.memberName = Preconditions.checkNotNull(memberName, "memberName should not be null");
+    public RemoveShardReplica(@NonNull String shardName, @NonNull MemberName memberName) {
+        this.shardName = requireNonNull(shardName, "shardName should not be null");
+        this.memberName = requireNonNull(memberName, "memberName should not be null");
     }
 
     public String getShardName() {
index 0b3b6b89186e415dba9c6e45c72c0d601fff41b3..c92670c97138c66060be1d149a0f15a213f4828c 100644 (file)
@@ -7,12 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import com.google.common.base.Preconditions;
-import java.util.Optional;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
 
 /**
  * A local message derived from LeaderStateChanged containing additional Shard-specific info that is sent
@@ -21,24 +21,22 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
  *
  * @author Thomas Pantelis
  */
-public class ShardLeaderStateChanged extends LeaderStateChanged {
-
-    private final DataTree localShardDataTree;
+public final class ShardLeaderStateChanged extends LeaderStateChanged {
+    private final @Nullable ReadOnlyDataTree localShardDataTree;
 
-    public ShardLeaderStateChanged(@Nonnull String memberId, @Nullable String leaderId,
-            @Nonnull DataTree localShardDataTree, short leaderPayloadVersion) {
+    public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final @NonNull ReadOnlyDataTree localShardDataTree, final short leaderPayloadVersion) {
         super(memberId, leaderId, leaderPayloadVersion);
-        this.localShardDataTree = Preconditions.checkNotNull(localShardDataTree);
+        this.localShardDataTree = requireNonNull(localShardDataTree);
     }
 
-    public ShardLeaderStateChanged(@Nonnull String memberId, @Nullable String leaderId,
-            short leaderPayloadVersion) {
+    public ShardLeaderStateChanged(final @NonNull String memberId, final @Nullable String leaderId,
+            final short leaderPayloadVersion) {
         super(memberId, leaderId, leaderPayloadVersion);
-        this.localShardDataTree = null;
+        localShardDataTree = null;
     }
 
-    @Nonnull
-    public Optional<DataTree> getLocalShardDataTree() {
-        return Optional.ofNullable(localShardDataTree);
+    public @Nullable ReadOnlyDataTree localShardDataTree() {
+        return localShardDataTree;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/SuccessReply.java
deleted file mode 100644 (file)
index 4f74bda..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.messages;
-
-import java.io.Serializable;
-
-/**
- * A reply message indicating success.
- *
- * @author Thomas Pantelis
- */
-public final class SuccessReply implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    public static final SuccessReply INSTANCE = new SuccessReply();
-
-    private SuccessReply() {
-    }
-}
index 16f49e278a90ef65671874e4d34defd896d96357..16e59cc2338f5f98c7714c1aca325a9c3711070c 100644 (file)
@@ -5,19 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.messages;
 
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class UpdateSchemaContext {
-    private final SchemaContext schemaContext;
+import static java.util.Objects.requireNonNull;
 
-    public UpdateSchemaContext(SchemaContext schemaContext) {
-        this.schemaContext = schemaContext;
-    }
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
-    public SchemaContext getSchemaContext() {
-        return schemaContext;
+@NonNullByDefault
+public record UpdateSchemaContext(EffectiveModelContext modelContext) {
+    public UpdateSchemaContext {
+        requireNonNull(modelContext);
     }
 }
index 781580a643ec3e03a98e26327fb2a5374b41d319..9548a7ebdc7d4432c6e162915f8e353c04dc4477 100644 (file)
@@ -11,44 +11,59 @@ import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 
 /**
  * Abstract base class for a versioned Externalizable message.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class VersionedExternalizableMessage implements Externalizable, SerializableMessage {
     private static final long serialVersionUID = 1L;
 
     private short version = DataStoreVersions.CURRENT_VERSION;
 
     public VersionedExternalizableMessage() {
+        // Required for externalizable
     }
 
-    public VersionedExternalizableMessage(short version) {
+    public VersionedExternalizableMessage(final short version) {
         this.version = version <= DataStoreVersions.CURRENT_VERSION ? version : DataStoreVersions.CURRENT_VERSION;
     }
 
-    public short getVersion() {
+    public final short getVersion() {
         return version;
     }
 
+    protected final @NonNull NormalizedNodeStreamVersion getStreamVersion() {
+        if (version >= DataStoreVersions.POTASSIUM_VERSION) {
+            return NormalizedNodeStreamVersion.POTASSIUM;
+        } else if (version >= DataStoreVersions.PHOSPHORUS_VERSION) {
+            return NormalizedNodeStreamVersion.MAGNESIUM;
+        } else {
+            throw new IllegalStateException("Unsupported version " + version);
+        }
+    }
+
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         version = in.readShort();
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         out.writeShort(version);
     }
 
     @Override
     public final Object toSerializable() {
-        if (getVersion() < DataStoreVersions.BORON_VERSION) {
-            throw new UnsupportedOperationException("Versions prior to " + DataStoreVersions.BORON_VERSION
-                    + " are not supported");
+        final short ver = getVersion();
+        if (ver < DataStoreVersions.SODIUM_SR1_VERSION) {
+            throw new UnsupportedOperationException("Version " + ver
+                + " is older than the oldest version supported version " + DataStoreVersions.SODIUM_SR1_VERSION);
         }
 
         return this;
index c81468d6617a9a2302a17a2218c9e613764ebe2a..3eec9a4a46469ad17a86e9fbe281bbebcccf0ebe 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -13,20 +12,28 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 /**
  * Base class to be used for all simple modifications that can be applied to a DOMStoreTransaction.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractModification implements Modification {
+    @java.io.Serial
+    private static final long serialVersionUID = 2647778426312509718L;
 
     private YangInstanceIdentifier path;
     private short version;
 
-    protected AbstractModification(short version) {
+    protected AbstractModification(final short version) {
+        this.version = version;
+    }
+
+    protected AbstractModification(final short version, final YangInstanceIdentifier path) {
+        this.path = path;
         this.version = version;
     }
 
-    protected AbstractModification(YangInstanceIdentifier path) {
+    protected AbstractModification(final YangInstanceIdentifier path) {
         this.path = path;
     }
 
-    protected void setPath(YangInstanceIdentifier path) {
+    protected void setPath(final YangInstanceIdentifier path) {
         this.path = path;
     }
 
index b59132fe874471eb7689459788849368ef31190a..3705707de296aac34104683243d81bcc1b0d39ba 100644 (file)
@@ -16,6 +16,7 @@ import java.util.List;
  * A CompositeModification gets stored in the transaction log for a Shard. During recovery when the transaction log
  * is being replayed a DOMStoreWriteTransaction could be created and a CompositeModification could be applied to it.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public interface CompositeModification extends Modification {
     /**
      * Get a list of modifications contained by this composite.
index 347cde9f3fed6f727a96350e386d952a4bd513a9..267dfa8368255955ea4a549f73103259d4d990e2 100644 (file)
@@ -5,42 +5,50 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
+import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * DeleteModification store all the parameters required to delete a path from the data tree.
  */
-public class DeleteModification extends AbstractModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class DeleteModification extends AbstractModification {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     public DeleteModification() {
         this(DataStoreVersions.CURRENT_VERSION);
     }
 
-    public DeleteModification(short version) {
+    public DeleteModification(final short version) {
         super(version);
     }
 
-    public DeleteModification(YangInstanceIdentifier path) {
+    public DeleteModification(final YangInstanceIdentifier path) {
         super(path);
     }
 
+    DeleteModification(final short version, final YangInstanceIdentifier path) {
+        super(version, path);
+    }
+
     @Override
-    public void apply(DOMStoreWriteTransaction transaction) {
+    public void apply(final DOMStoreWriteTransaction transaction) {
         transaction.delete(getPath());
     }
 
     @Override
-    public void apply(DataTreeModification transaction) {
+    public void apply(final DataTreeModification transaction) {
         transaction.delete(getPath());
     }
 
@@ -50,18 +58,22 @@ public class DeleteModification extends AbstractModification {
     }
 
     @Override
-    public void readExternal(ObjectInput in) {
-        setPath(SerializationUtils.deserializePath(in));
+    public void readExternal(final ObjectInput in) throws IOException {
+        setPath(SerializationUtils.readPath(in));
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        SerializationUtils.writePath(out, getPath());
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) {
-        SerializationUtils.serializePath(getPath(), out);
+    public void writeTo(final NormalizedNodeDataOutput out) throws IOException {
+        out.writeYangInstanceIdentifier(getPath());
     }
 
-    public static DeleteModification fromStream(ObjectInput in, short version) {
-        DeleteModification mod = new DeleteModification(version);
-        mod.readExternal(in);
-        return mod;
+    public static DeleteModification fromStream(final NormalizedNodeDataInput in, final short version)
+            throws IOException {
+        return new DeleteModification(version, in.readYangInstanceIdentifier());
     }
 }
index 465372736bb398769d811fd217319a1f1b694328..9244a38c80e92b3014b5376ce3a1e4684d28bac3 100644 (file)
@@ -5,34 +5,40 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
-import java.io.ObjectInput;
+import java.io.IOException;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * MergeModification stores all the parameters required to merge data into the specified path.
  */
-public class MergeModification extends WriteModification {
+@Deprecated(since = "9.0.0", forRemoval = true)
+public final class MergeModification extends WriteModification {
     private static final long serialVersionUID = 1L;
 
     public MergeModification() {
         this(DataStoreVersions.CURRENT_VERSION);
     }
 
-    public MergeModification(short version) {
+    public MergeModification(final short version) {
         super(version);
     }
 
-    public MergeModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    public MergeModification(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path, data);
     }
 
+    MergeModification(final short version, final YangInstanceIdentifier path, final NormalizedNode data) {
+        super(version, path, data);
+    }
+
     @Override
     public void apply(final DOMStoreWriteTransaction transaction) {
         transaction.merge(getPath(), getData());
@@ -48,9 +54,10 @@ public class MergeModification extends WriteModification {
         return MERGE;
     }
 
-    public static MergeModification fromStream(ObjectInput in, short version) {
-        MergeModification mod = new MergeModification(version);
-        mod.readExternal(in);
-        return mod;
+    public static MergeModification fromStream(final NormalizedNodeDataInput in, final short version,
+            final ReusableStreamReceiver receiver) throws IOException {
+        final NormalizedNode node = in.readNormalizedNode(receiver);
+        final YangInstanceIdentifier path = in.readYangInstanceIdentifier();
+        return new MergeModification(version, path, node);
     }
 }
index 626de70ebb3140c37914befe6add92821f8120b4..e7757f36fcdbe214d5a8a793e0057d878c6a03e3 100644 (file)
@@ -5,12 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * Represents a modification to the data store.
@@ -50,4 +53,12 @@ public interface Modification extends Externalizable {
     void apply(DataTreeModification transaction);
 
     byte getType();
+
+    @Override
+    void writeExternal(ObjectOutput out) throws IOException;
+
+    @Override
+    void readExternal(ObjectInput in) throws IOException, ClassNotFoundException;
+
+    void writeTo(NormalizedNodeDataOutput out) throws IOException;
 }
index 485bb42a94c5d0fc1bf70e66b58ce8f3444d51b5..26e049089ed32782c9f35f600cdc1bf70cff509d 100644 (file)
@@ -5,10 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
@@ -17,14 +18,17 @@ import java.util.Collections;
 import java.util.List;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.messages.VersionedExternalizableMessage;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * MutableCompositeModification is just a mutable version of a CompositeModification.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MutableCompositeModification extends VersionedExternalizableMessage implements CompositeModification {
     private static final long serialVersionUID = 1L;
 
@@ -35,19 +39,19 @@ public class MutableCompositeModification extends VersionedExternalizableMessage
         this(DataStoreVersions.CURRENT_VERSION);
     }
 
-    public MutableCompositeModification(short version) {
+    public MutableCompositeModification(final short version) {
         super(version);
     }
 
     @Override
-    public void apply(DOMStoreWriteTransaction transaction) {
+    public void apply(final DOMStoreWriteTransaction transaction) {
         for (Modification modification : modifications) {
             modification.apply(transaction);
         }
     }
 
     @Override
-    public void apply(DataTreeModification transaction) {
+    public void apply(final DataTreeModification transaction) {
         for (Modification modification : modifications) {
             modification.apply(transaction);
         }
@@ -63,12 +67,11 @@ public class MutableCompositeModification extends VersionedExternalizableMessage
      *
      * @param modification the modification to add.
      */
-    public void addModification(Modification modification) {
-        Preconditions.checkNotNull(modification);
-        modifications.add(modification);
+    public void addModification(final Modification modification) {
+        modifications.add(requireNonNull(modification));
     }
 
-    public void addModifications(Iterable<Modification> newMods) {
+    public void addModifications(final Iterable<Modification> newMods) {
         for (Modification mod : newMods) {
             addModification(mod);
         }
@@ -84,61 +87,98 @@ public class MutableCompositeModification extends VersionedExternalizableMessage
     }
 
     @Override
-    public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
-
-        int size = in.readInt();
-
-        if (size > 1) {
-            SerializationUtils.REUSABLE_READER_TL.set(NormalizedNodeInputOutput.newDataInputWithoutValidation(in));
-        }
-
-        try {
-            for (int i = 0; i < size; i++) {
-                byte type = in.readByte();
-                switch (type) {
-                    case Modification.WRITE:
-                        modifications.add(WriteModification.fromStream(in, getVersion()));
-                        break;
-
-                    case Modification.MERGE:
-                        modifications.add(MergeModification.fromStream(in, getVersion()));
-                        break;
-
-                    case Modification.DELETE:
-                        modifications.add(DeleteModification.fromStream(in, getVersion()));
-                        break;
-                    default:
-                        break;
-                }
+        final int size = in.readInt();
+        if (size > 0) {
+            if (getVersion() >= DataStoreVersions.PHOSPHORUS_VERSION) {
+                readExternalModern(NormalizedNodeDataInput.newDataInput(in), size);
+            } else {
+                readExternalLegacy(in, size);
             }
-        } finally {
-            SerializationUtils.REUSABLE_READER_TL.remove();
         }
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) throws IOException {
+    public void writeExternal(final ObjectOutput out) throws IOException {
         super.writeExternal(out);
+        final int size = modifications.size();
+        out.writeInt(size);
+        if (size > 0) {
+            if (getVersion() >= DataStoreVersions.PHOSPHORUS_VERSION) {
+                writeExternalModern(out);
+            } else {
+                writeExternalLegacy(out);
+            }
+        }
+    }
 
-        out.writeInt(modifications.size());
-
-        if (modifications.size() > 1) {
-            SerializationUtils.REUSABLE_WRITER_TL.set(NormalizedNodeInputOutput.newDataOutput(out));
+    private void readExternalLegacy(final ObjectInput in, final int size) throws IOException {
+        final NormalizedNodeDataInput input = NormalizedNodeDataInput.newDataInputWithoutValidation(in);
+        final ReusableStreamReceiver receiver = ReusableImmutableNormalizedNodeStreamWriter.create();
+        for (int i = 0; i < size; i++) {
+            final byte type = in.readByte();
+            switch (type) {
+                case Modification.WRITE:
+                    modifications.add(WriteModification.fromStream(input, getVersion(), receiver));
+                    break;
+                case Modification.MERGE:
+                    modifications.add(MergeModification.fromStream(input, getVersion(), receiver));
+                    break;
+                case Modification.DELETE:
+                    modifications.add(DeleteModification.fromStream(input, getVersion()));
+                    break;
+                default:
+                    break;
+            }
         }
+    }
 
-        try {
-            for (Modification mod: modifications) {
+    private void writeExternalLegacy(final ObjectOutput out) throws IOException {
+        try (NormalizedNodeDataOutput stream = getStreamVersion().newDataOutput(out)) {
+            for (Modification mod : modifications) {
                 out.writeByte(mod.getType());
-                mod.writeExternal(out);
+                mod.writeTo(stream);
             }
-        } finally {
-            SerializationUtils.REUSABLE_WRITER_TL.remove();
         }
     }
 
-    public static MutableCompositeModification fromSerializable(Object serializable) {
-        Preconditions.checkArgument(serializable instanceof MutableCompositeModification);
+    private void readExternalModern(final NormalizedNodeDataInput in, final int size) throws IOException {
+        final ReusableStreamReceiver receiver = ReusableImmutableNormalizedNodeStreamWriter.create();
+        for (int i = 0; i < size; i++) {
+            final byte type = in.readByte();
+            switch (type) {
+                case Modification.WRITE:
+                    modifications.add(WriteModification.fromStream(in, getVersion(), receiver));
+                    break;
+                case Modification.MERGE:
+                    modifications.add(MergeModification.fromStream(in, getVersion(), receiver));
+                    break;
+                case Modification.DELETE:
+                    modifications.add(DeleteModification.fromStream(in, getVersion()));
+                    break;
+                default:
+                    break;
+            }
+        }
+    }
+
+    private void writeExternalModern(final ObjectOutput out) throws IOException {
+        try (NormalizedNodeDataOutput stream = getStreamVersion().newDataOutput(out)) {
+            for (Modification mod : modifications) {
+                stream.writeByte(mod.getType());
+                mod.writeTo(stream);
+            }
+        }
+    }
+
+    public static MutableCompositeModification fromSerializable(final Object serializable) {
+        checkArgument(serializable instanceof MutableCompositeModification);
         return (MutableCompositeModification)serializable;
     }
+
+    @Override
+    public void writeTo(final NormalizedNodeDataOutput out) throws IOException {
+        throw new UnsupportedOperationException();
+    }
 }
index 70125e29e52a063f9e5d0e8cbe2db30d79f5d0bb..dc2d3fff4310564f3cf38e8a0b5f91b34786a1a3 100644 (file)
@@ -5,36 +5,44 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
+import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils.Applier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 /**
  * WriteModification stores all the parameters required to write data to the specified path.
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class WriteModification extends AbstractModification {
     private static final long serialVersionUID = 1L;
 
-    private NormalizedNode<?, ?> data;
+    private NormalizedNode data;
 
     public WriteModification() {
         this(DataStoreVersions.CURRENT_VERSION);
     }
 
-    public WriteModification(short version) {
+    public WriteModification(final short version) {
         super(version);
     }
 
-    public WriteModification(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
+    WriteModification(final short version, final YangInstanceIdentifier path, final NormalizedNode data) {
+        super(version, path);
+        this.data = data;
+    }
+
+    public WriteModification(final YangInstanceIdentifier path, final NormalizedNode data) {
         super(path);
         this.data = data;
     }
@@ -49,7 +57,7 @@ public class WriteModification extends AbstractModification {
         transaction.write(getPath(), data);
     }
 
-    public NormalizedNode<?, ?> getData() {
+    public NormalizedNode getData() {
         return data;
     }
 
@@ -59,23 +67,29 @@ public class WriteModification extends AbstractModification {
     }
 
     @Override
-    public void readExternal(ObjectInput in) {
-        SerializationUtils.deserializePathAndNode(in, this, APPLIER);
+    public void readExternal(final ObjectInput in) throws IOException {
+        SerializationUtils.readNodeAndPath(in, this, (instance, path, node) -> {
+            instance.setPath(path);
+            instance.data = node;
+        });
     }
 
     @Override
-    public void writeExternal(ObjectOutput out) {
-        SerializationUtils.serializePathAndNode(getPath(), data, out);
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        SerializationUtils.writeNodeAndPath(out, getPath(), data);
     }
 
-    public static WriteModification fromStream(ObjectInput in, short version) {
-        WriteModification mod = new WriteModification(version);
-        mod.readExternal(in);
-        return mod;
+    public static WriteModification fromStream(final NormalizedNodeDataInput in, final short version,
+            final ReusableStreamReceiver receiver) throws IOException {
+        final NormalizedNode node = in.readNormalizedNode(receiver);
+        final YangInstanceIdentifier path = in.readYangInstanceIdentifier();
+        return new WriteModification(version, path, node);
     }
 
-    private static final Applier<WriteModification> APPLIER = (instance, path, node) -> {
-        instance.setPath(path);
-        instance.data = node;
-    };
+    @Override
+    public void writeTo(final NormalizedNodeDataOutput out) throws IOException {
+        // FIXME: this should be inverted, as the path helps receivers in establishment of context
+        out.writeNormalizedNode(data);
+        out.writeYangInstanceIdentifier(getPath());
+    }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/AT.java
new file mode 100644 (file)
index 0000000..8002815
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link AbortTransactionPayload}.
+ */
+final class AT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public AT() {
+        // For Externalizable
+    }
+
+    AT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new AbortTransactionPayload(identifier, bytes);
+    }
+}
index 0e34756cede02dfef04496f4f2e1f4066391b220..3c765be6152284aa8f50b17a1d3b3a223fa4fe06 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class AbortTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
-    private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
-            return TransactionIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected AbortTransactionPayload createObject(final TransactionIdentifier identifier,
-                final byte[] serialized) {
-            return new AbortTransactionPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(AbortTransactionPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(AT::new);
 
     AbortTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
         super(transactionId, serialized);
@@ -62,13 +37,18 @@ public final class AbortTransactionPayload extends AbstractIdentifiablePayload<T
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", transactionId, e);
-            throw new RuntimeException("Failed to serialized " + transactionId, e);
+            throw new IllegalStateException("Failed to serialized " + transactionId, e);
         }
         return new AbortTransactionPayload(transactionId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected AT externalizableProxy(final byte[] serialized) {
+        return new AT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index 6c6a5af7545225cdfc86a42ab7e251786e64e515..9d1bb9a3464c31aabc0e3191dc9060a14daf62f8 100644 (file)
@@ -7,57 +7,57 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
- * Abstract base class for our internal implementation of {@link DataTreeCandidateNode},
- * which we instantiate from a serialized stream. We do not retain the before-image and
- * do not implement {@link #getModifiedChild(PathArgument)}, as that method is only
- * useful for end users. Instances based on this class should never be leaked outside of
- * this component.
+ * Abstract base class for our internal implementation of {@link DataTreeCandidateNode}, which we instantiate from a
+ * serialized stream. We do not retain the before-image and do not implement {@link #modifiedChild(PathArgument)}, as
+ * that method is only useful for end users. Instances based on this class should never be leaked outside of this
+ * component.
  */
 abstract class AbstractDataTreeCandidateNode implements DataTreeCandidateNode {
-    private final ModificationType type;
+    private final @NonNull ModificationType type;
 
     protected AbstractDataTreeCandidateNode(final ModificationType type) {
-        this.type = Preconditions.checkNotNull(type);
+        this.type = requireNonNull(type);
     }
 
     @Override
-    public final DataTreeCandidateNode getModifiedChild(final PathArgument identifier) {
+    public final DataTreeCandidateNode modifiedChild(final PathArgument identifier) {
         throw new UnsupportedOperationException("Not implemented");
     }
 
     @Override
-    public final ModificationType getModificationType() {
+    public final ModificationType modificationType() {
         return type;
     }
 
     @Override
-    public final Optional<NormalizedNode<?, ?>> getDataBefore() {
+    public final NormalizedNode dataBefore() {
         throw new UnsupportedOperationException("Before-image not available after serialization");
     }
 
     static DataTreeCandidateNode createUnmodified() {
         return new AbstractDataTreeCandidateNode(ModificationType.UNMODIFIED) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
 
             @Override
-            public Optional<NormalizedNode<?, ?>> getDataAfter() {
+            public NormalizedNode dataAfter() {
                 throw new UnsupportedOperationException("After-image not available after serialization");
             }
 
             @Override
-            public Collection<DataTreeCandidateNode> getChildNodes() {
+            public Collection<DataTreeCandidateNode> childNodes() {
                 throw new UnsupportedOperationException("Children not available after serialization");
             }
         };
index f5a210159fdcde3603e752b2550acb985ce4657c..885b6c5336dc4999075f33b01bf4e19bfbe1af4a 100644 (file)
@@ -7,73 +7,89 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
-import com.google.common.io.ByteStreams;
-import java.io.DataInput;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.io.Serializable;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import java.util.function.Function;
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
 import org.opendaylight.yangtools.concepts.Identifier;
 
 /**
- * Abstract base class for {@link Payload}s which hold a single {@link Identifier}.
- *
- * @author Robert Varga
+ * Abstract base class for {@link IdentifiablePayload}s which hold a single {@link Identifier}.
  */
-public abstract class AbstractIdentifiablePayload<T extends Identifier>
-        extends Payload implements Identifiable<T>, Serializable {
-    protected abstract static class AbstractProxy<T extends Identifier> implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
-        private T identifier;
-
-        public AbstractProxy() {
-            // For Externalizable
-        }
+public abstract class AbstractIdentifiablePayload<T extends Identifier> extends IdentifiablePayload<T> {
+    /**
+     * An {@link Externalizable} with default implementations we expect our implementations to comply with. On-wire
+     * serialization format is defined by {@link #bytes()}.
+     */
+    protected interface SerialForm extends Externalizable {
+        /**
+         * Return the serial form of this object contents, corresponding to
+         * {@link AbstractIdentifiablePayload#serialized}.
+         *
+         * @return Serialized form
+         */
+        byte[] bytes();
 
-        protected AbstractProxy(final byte[] serialized) {
-            this.serialized = Preconditions.checkNotNull(serialized);
-        }
+        /**
+         * Resolve this proxy to an actual {@link AbstractIdentifiablePayload}.
+         *
+         * @return A payload.
+         */
+        @java.io.Serial
+        Object readResolve();
 
-        @Override
-        public final void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
-        }
+        /**
+         * Restore state from specified serialized form.
+         *
+         * @param newBytes Serialized form, as returned by {@link #bytes()}
+         * @throws IOException when a deserialization problem occurs
+         */
+        void readExternal(byte[] newBytes) throws IOException;
 
+        /**
+         * {@inheritDoc}
+         *
+         * <p>
+         * The default implementation is canonical and should never be overridden.
+         */
         @Override
-        public final void readExternal(final ObjectInput in) throws IOException {
-            final int length = in.readInt();
-            serialized = new byte[length];
-            in.readFully(serialized);
-            identifier = Verify.verifyNotNull(readIdentifier(ByteStreams.newDataInput(serialized)));
+        default void readExternal(final ObjectInput in) throws IOException {
+            final var bytes = new byte[in.readInt()];
+            in.readFully(bytes);
+            readExternal(bytes);
         }
 
-        protected final Object readResolve() {
-            return Verify.verifyNotNull(createObject(identifier, serialized));
+        /**
+         * {@inheritDoc}
+         *
+         * <p>
+         * The default implementation is canonical and should never be overridden.
+         */
+        @Override
+        default void writeExternal(final ObjectOutput out) throws IOException {
+            final var bytes = bytes();
+            out.writeInt(bytes.length);
+            out.write(bytes);
         }
-
-        @Nonnull
-        protected abstract T readIdentifier(@Nonnull DataInput in) throws IOException;
-
-        @Nonnull
-        @SuppressWarnings("checkstyle:hiddenField")
-        protected abstract Identifiable<T> createObject(@Nonnull T identifier, @Nonnull byte[] serialized);
     }
 
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
-    private final byte[] serialized;
-    private final T identifier;
 
-    AbstractIdentifiablePayload(@Nonnull final T identifier, @Nonnull final byte[] serialized) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.serialized = Preconditions.checkNotNull(serialized);
+    private final byte @NonNull [] serialized;
+    private final @NonNull T identifier;
+
+    AbstractIdentifiablePayload(final @NonNull T identifier, final byte @NonNull[] serialized) {
+        this.identifier = requireNonNull(identifier);
+        this.serialized = requireNonNull(serialized);
     }
 
     @Override
@@ -86,11 +102,34 @@ public abstract class AbstractIdentifiablePayload<T extends Identifier>
         return serialized.length;
     }
 
-    protected final Object writeReplace() {
-        return Verify.verifyNotNull(externalizableProxy(serialized));
+    protected final byte @NonNull [] serialized() {
+        return serialized;
+    }
+
+    @Override
+    public final int serializedSize() {
+        // TODO: this is not entirely accurate, as the serialization stream has additional overheads:
+        //       - 3 bytes for each block of data <256 bytes
+        //       - 5 bytes for each block of data >=256 bytes
+        //       - each block of data is limited to 1024 bytes as per serialization spec
+        return size() + externalizableProxySize();
+    }
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("identifier", identifier).add("size", size()).toString();
     }
 
-    @Nonnull
-    @SuppressWarnings("checkstyle:hiddenField")
-    protected abstract AbstractProxy<T> externalizableProxy(@Nonnull byte[] serialized);
+    @Override
+    public final Object writeReplace() {
+        return verifyNotNull(externalizableProxy(serialized));
+    }
+
+    protected abstract @NonNull SerialForm externalizableProxy(byte @NonNull[] serialized);
+
+    protected abstract int externalizableProxySize();
+
+    protected static final int externalizableProxySize(final Function<byte[], ? extends SerialForm> constructor) {
+        return SerializationUtils.serialize(constructor.apply(new byte[0])).length;
+    }
 }
index 997fa45c6b1e0bc01466c47897d2e52ad1780862..e634a2de7a029270ada915be0915e2f54baeecd9 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Abstract base exception used for reporting version mismatches from {@link PayloadVersion}.
@@ -19,12 +20,13 @@ import javax.annotation.Nonnull;
 @Beta
 public abstract class AbstractVersionException extends Exception {
     private static final long serialVersionUID = 1L;
-    private final PayloadVersion closestVersion;
+
+    private final @NonNull PayloadVersion closestVersion;
     private final int version;
 
     AbstractVersionException(final String message, final short version, final PayloadVersion closestVersion) {
         super(message);
-        this.closestVersion = Preconditions.checkNotNull(closestVersion);
+        this.closestVersion = requireNonNull(closestVersion);
         this.version = Short.toUnsignedInt(version);
     }
 
@@ -42,9 +44,7 @@ public abstract class AbstractVersionException extends Exception {
      *
      * @return Closest supported {@link PayloadVersion}
      */
-    @Nonnull
-    public final PayloadVersion getClosestVersion() {
+    public final @NonNull PayloadVersion getClosestVersion() {
         return closestVersion;
     }
-
 }
index 7ebb0055f2da2c1fcbbd9f8843ca2abc0c65e560..e87ce58a8b6b25574bfd11759df0468ecc3d30aa 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.util.Optional;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -27,32 +28,35 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps
     private static final Logger LOG = LoggerFactory.getLogger(AbstractVersionedShardDataTreeSnapshot.class);
 
     @SuppressWarnings("checkstyle:FallThrough")
-    static ShardDataTreeSnapshot versionedDeserialize(final ObjectInput in) throws IOException {
+    static @NonNull ShardSnapshotState versionedDeserialize(final ObjectInput in) throws IOException {
         final PayloadVersion version = PayloadVersion.readFrom(in);
         switch (version) {
-            case BORON:
-                // Boron snapshots use Java Serialization
-                try {
-                    return (ShardDataTreeSnapshot) in.readObject();
-                } catch (ClassNotFoundException e) {
-                    LOG.error("Failed to serialize data tree snapshot", e);
-                    throw new IOException("Snapshot failed to deserialize", e);
-                }
+            case CHLORINE_SR2:
+                return new ShardSnapshotState(readSnapshot(in), true);
+            case POTASSIUM:
+                return new ShardSnapshotState(readSnapshot(in), false);
             case TEST_FUTURE_VERSION:
             case TEST_PAST_VERSION:
                 // These versions are never returned and this code is effectively dead
-                break;
             default:
-                throw new IOException("Invalid payload version in snapshot");
+                // Not included as default in above switch to ensure we get warnings when new versions are added
+                throw new IOException("Encountered unhandled version " + version);
         }
+    }
 
-        // Not included as default in above switch to ensure we get warnings when new versions are added
-        throw new IOException("Encountered unhandled version" + version);
+    // Boron and Sodium snapshots use Java Serialization, but differ in stream format
+    private static @NonNull ShardDataTreeSnapshot readSnapshot(final ObjectInput in) throws IOException {
+        try {
+            return (ShardDataTreeSnapshot) in.readObject();
+        } catch (ClassNotFoundException e) {
+            LOG.error("Failed to serialize data tree snapshot", e);
+            throw new IOException("Snapshot failed to deserialize", e);
+        }
     }
 
     @Override
-    public final Optional<NormalizedNode<?, ?>> getRootNode() {
-        return Optional.of(Verify.verifyNotNull(rootNode(), "Snapshot %s returned non-present root node", getClass()));
+    public final Optional<NormalizedNode> getRootNode() {
+        return Optional.of(verifyNotNull(rootNode(), "Snapshot %s returned non-present root node", getClass()));
     }
 
     /**
@@ -60,21 +64,20 @@ abstract class AbstractVersionedShardDataTreeSnapshot extends ShardDataTreeSnaps
      *
      * @return The root node.
      */
-    @Nonnull
-    abstract NormalizedNode<?, ?> rootNode();
+    abstract @NonNull NormalizedNode rootNode();
 
     /**
      * Return the snapshot payload version. Implementations of this method should return a constant.
      *
      * @return Snapshot payload version
      */
-    @Nonnull
-    abstract PayloadVersion version();
+    abstract @NonNull PayloadVersion version();
 
     private void versionedSerialize(final ObjectOutput out, final PayloadVersion version) throws IOException {
         switch (version) {
-            case BORON:
-                // Boron snapshots use Java Serialization
+            case CHLORINE_SR2:
+            case POTASSIUM:
+                // Sodium onwards snapshots use Java Serialization, but differ in stream format
                 out.writeObject(this);
                 return;
             case TEST_FUTURE_VERSION:
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CH.java
new file mode 100644 (file)
index 0000000..a0af841
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CreateLocalHistoryPayload}.
+ */
+final class CH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CH() {
+        // For Externalizable
+    }
+
+    CH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new CreateLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/CT.java
new file mode 100644 (file)
index 0000000..2530d33
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.io.StreamCorruptedException;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Chunked;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.Simple;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+
+/**
+ * Serialization proxy for {@link CommitTransactionPayload}.
+ */
+final class CT implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private CommitTransactionPayload payload;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public CT() {
+        // For Externalizable
+    }
+
+    CT(final CommitTransactionPayload payload) {
+        this.payload = requireNonNull(payload);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(payload.size());
+        payload.writeBytes(out);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final int length = in.readInt();
+        if (length < 0) {
+            throw new StreamCorruptedException("Invalid payload length " + length);
+        } else if (length < CommitTransactionPayload.MAX_ARRAY_SIZE) {
+            final byte[] serialized = new byte[length];
+            in.readFully(serialized);
+            payload = new Simple(serialized);
+        } else {
+            payload = new Chunked(ChunkedByteArray.readFrom(in, length, CommitTransactionPayload.MAX_ARRAY_SIZE));
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(payload);
+    }
+}
index 9acc113a422b9b962e122a5ad11c94c43c69be3b..9d6f526616156da4d3def73f9905ac1a7496419c 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected CloseLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new CloseLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(CloseLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(CH::new);
 
     CloseLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -62,13 +37,18 @@ public final class CloseLocalHistoryPayload extends AbstractIdentifiablePayload<
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new CloseLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected DH externalizableProxy(final byte[] serialized) {
+        return new DH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index ea5fb532557517e568abcf41fb28118b6a0f648f..45cbcc851a80ead2bf0508d024a61f2cd6cc3a09 100644 (file)
@@ -7,22 +7,31 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static com.google.common.math.IntMath.ceilingPowerOfTwo;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.io.ByteArrayDataOutput;
+import com.google.common.base.MoreObjects;
 import com.google.common.io.ByteStreams;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.DataInput;
-import java.io.Externalizable;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.Map.Entry;
+import org.apache.commons.lang3.SerializationUtils;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+import org.opendaylight.controller.cluster.io.ChunkedOutputStream;
+import org.opendaylight.controller.cluster.raft.messages.IdentifiablePayload;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,81 +42,183 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 @Beta
-public final class CommitTransactionPayload extends Payload implements Serializable {
+public abstract sealed class CommitTransactionPayload extends IdentifiablePayload<TransactionIdentifier>
+        implements Serializable {
+    @NonNullByDefault
+    public record CandidateTransaction(
+            TransactionIdentifier transactionId,
+            DataTreeCandidate candidate,
+            NormalizedNodeStreamVersion streamVersion) {
+        public CandidateTransaction {
+            requireNonNull(transactionId);
+            requireNonNull(candidate);
+            requireNonNull(streamVersion);
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(CommitTransactionPayload.class);
+    private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private byte[] serialized;
+    static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
+        "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024));
 
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
+    private volatile CandidateTransaction candidate = null;
 
-        Proxy(final byte[] serialized) {
-            this.serialized = Preconditions.checkNotNull(serialized);
-        }
+    private CommitTransactionPayload() {
+        // hidden on purpose
+    }
 
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(serialized.length);
-            out.write(serialized);
+    public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
+            final DataTreeCandidate candidate, final PayloadVersion version, final int initialSerializedBufferCapacity)
+                    throws IOException {
+        final var cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE);
+        try (var dos = new DataOutputStream(cos)) {
+            transactionId.writeTo(dos);
+            DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, version, candidate);
         }
 
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            final int length = in.readInt();
-            serialized = new byte[length];
-            in.readFully(serialized);
-        }
+        final var source = cos.toVariant();
+        LOG.debug("Initial buffer capacity {}, actual serialized size {}", initialSerializedBufferCapacity, cos.size());
+        return source.isFirst() ? new Simple(source.getFirst()) : new Chunked(source.getSecond());
+    }
+
+    @VisibleForTesting
+    public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
+            final DataTreeCandidate candidate, final PayloadVersion version) throws IOException {
+        return create(transactionId, candidate, version, 512);
+    }
 
-        private Object readResolve() {
-            return new CommitTransactionPayload(serialized);
+    @VisibleForTesting
+    public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
+            final DataTreeCandidate candidate) throws IOException {
+        return create(transactionId, candidate, PayloadVersion.current());
+    }
+
+    public @NonNull CandidateTransaction getCandidate() throws IOException {
+        var localCandidate = candidate;
+        if (localCandidate == null) {
+            synchronized (this) {
+                localCandidate = candidate;
+                if (localCandidate == null) {
+                    candidate = localCandidate = getCandidate(ReusableImmutableNormalizedNodeStreamWriter.create());
+                }
+            }
         }
+        return localCandidate;
     }
 
-    private static final long serialVersionUID = 1L;
+    public final @NonNull CandidateTransaction getCandidate(final ReusableStreamReceiver receiver) throws IOException {
+        final var in = newDataInput();
+        final var transactionId = TransactionIdentifier.readFrom(in);
+        final var readCandidate = DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver);
 
-    private final byte[] serialized;
+        return new CandidateTransaction(transactionId, readCandidate.candidate(), readCandidate.version());
+    }
 
-    CommitTransactionPayload(final byte[] serialized) {
-        this.serialized = Preconditions.checkNotNull(serialized);
+    @Override
+    public TransactionIdentifier getIdentifier() {
+        try  {
+            return getCandidate().transactionId();
+        } catch (IOException e) {
+            throw new IllegalStateException("Candidate deserialization failed.", e);
+        }
     }
 
-    public static CommitTransactionPayload create(final TransactionIdentifier transactionId,
-            final DataTreeCandidate candidate, final int initialSerializedBufferCapacity) throws IOException {
-        final ByteArrayDataOutput out = ByteStreams.newDataOutput(initialSerializedBufferCapacity);
-        transactionId.writeTo(out);
-        DataTreeCandidateInputOutput.writeDataTreeCandidate(out, candidate);
-        final byte[] serialized = out.toByteArray();
+    @Override
+    public final int serializedSize() {
+        // TODO: this is not entirely accurate as the the byte[] can be chunked by the serialization stream
+        return ProxySizeHolder.PROXY_SIZE + size();
+    }
 
-        LOG.debug("Initial buffer capacity {}, actual serialized size {}",
-                initialSerializedBufferCapacity, serialized.length);
+    /**
+     * The cached candidate needs to be cleared after it is done applying to the DataTree, otherwise it would be keeping
+     * deserialized in memory which are not needed anymore leading to wasted memory. This lets the payload know that
+     * this was the last time the candidate was needed ant it is safe to be cleared.
+     */
+    public @NonNull CandidateTransaction acquireCandidate() throws IOException {
+        final var localCandidate = getCandidate();
+        candidate = null;
+        return localCandidate;
+    }
 
-        return new CommitTransactionPayload(serialized);
+    @Override
+    public final String toString() {
+        final var helper = MoreObjects.toStringHelper(this);
+        final var localCandidate = candidate;
+        if (localCandidate != null) {
+            helper.add("identifier", candidate.transactionId());
+        }
+        return helper.add("size", size()).toString();
     }
 
-    @VisibleForTesting
-    public static CommitTransactionPayload create(final TransactionIdentifier transactionId,
-            final DataTreeCandidate candidate) throws IOException {
-        return create(transactionId, candidate, 512);
+    abstract void writeBytes(ObjectOutput out) throws IOException;
+
+    abstract DataInput newDataInput();
+
+    @Override
+    public final Object writeReplace() {
+        return new CT(this);
     }
 
-    public Entry<TransactionIdentifier, DataTreeCandidate> getCandidate() throws IOException {
-        final DataInput in = ByteStreams.newDataInput(serialized);
-        return new SimpleImmutableEntry<>(TransactionIdentifier.readFrom(in),
-                DataTreeCandidateInputOutput.readDataTreeCandidate(in));
+    static final class Simple extends CommitTransactionPayload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        private final byte[] serialized;
+
+        Simple(final byte[] serialized) {
+            this.serialized = requireNonNull(serialized);
+        }
+
+        @Override
+        public int size() {
+            return serialized.length;
+        }
+
+        @Override
+        DataInput newDataInput() {
+            return ByteStreams.newDataInput(serialized);
+        }
+
+        @Override
+        void writeBytes(final ObjectOutput out) throws IOException {
+            out.write(serialized);
+        }
     }
 
-    @Override
-    public int size() {
-        return serialized.length;
+    static final class Chunked extends CommitTransactionPayload {
+        @java.io.Serial
+        private static final long serialVersionUID = 1L;
+
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via serialization proxy")
+        private final ChunkedByteArray source;
+
+        Chunked(final ChunkedByteArray source) {
+            this.source = requireNonNull(source);
+        }
+
+        @Override
+        void writeBytes(final ObjectOutput out) throws IOException {
+            source.copyTo(out);
+        }
+
+        @Override
+        public int size() {
+            return source.size();
+        }
+
+        @Override
+        DataInput newDataInput() {
+            return new DataInputStream(source.openStream());
+        }
     }
 
-    private Object writeReplace() {
-        return new Proxy(serialized);
+    // Exists to break initialization dependency between CommitTransactionPayload/Simple/Proxy
+    private static final class ProxySizeHolder {
+        static final int PROXY_SIZE = SerializationUtils.serialize(new CT(new Simple(new byte[0]))).length;
+
+        private ProxySizeHolder() {
+            // Hidden on purpose
+        }
     }
 }
index dbf72f38d8de0f0016a4651c3ef46fcebe5e4760..928503a9fc78ce7f62cfd71a1b903800e56cc1a9 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class CreateLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected CreateLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new CreateLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(CreateLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(CH::new);
 
     CreateLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -62,13 +37,18 @@ public final class CreateLocalHistoryPayload extends AbstractIdentifiablePayload
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new CreateLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected CH externalizableProxy(final byte[] serialized) {
+        return new CH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DH.java
new file mode 100644 (file)
index 0000000..4ce29b1
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link CloseLocalHistoryPayload}.
+ */
+final class DH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DH() {
+        // For Externalizable
+    }
+
+    DH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new CloseLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DS.java
new file mode 100644 (file)
index 0000000..091eeed
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+
+/**
+ * Serialization proxy for {@link DatastoreSnapshot}.
+ */
+final class DS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private DatastoreSnapshot datastoreSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DS() {
+        // For Externalizable
+    }
+
+    DS(final DatastoreSnapshot datastoreSnapshot) {
+        this.datastoreSnapshot = requireNonNull(datastoreSnapshot);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final var type = (String) in.readObject();
+        final var snapshot = (ShardManagerSnapshot) in.readObject();
+
+        final int size = in.readInt();
+        var localShardSnapshots = new ArrayList<ShardSnapshot>(size);
+        for (int i = 0; i < size; i++) {
+            localShardSnapshots.add((ShardSnapshot) in.readObject());
+        }
+
+        datastoreSnapshot = new DatastoreSnapshot(type, snapshot, localShardSnapshots);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(datastoreSnapshot.getType());
+        out.writeObject(datastoreSnapshot.getShardManagerSnapshot());
+
+        final var shardSnapshots = datastoreSnapshot.getShardSnapshots();
+        out.writeInt(shardSnapshots.size());
+        for (var shardSnapshot : shardSnapshots) {
+            out.writeObject(shardSnapshot);
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(datastoreSnapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DSS.java
new file mode 100644 (file)
index 0000000..9edb090
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot.ShardSnapshot;
+import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
+
+/**
+ * Serialization proxy for {@link ShardDataTreeSnapshot}.
+ */
+final class DSS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardSnapshot shardSnapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DSS() {
+        // For Externalizable
+    }
+
+    DSS(final ShardSnapshot shardSnapshot) {
+        this.shardSnapshot = requireNonNull(shardSnapshot);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(shardSnapshot.getName());
+        out.writeObject(shardSnapshot.getSnapshot());
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        shardSnapshot = new ShardSnapshot((String) in.readObject(), (Snapshot) in.readObject());
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(shardSnapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DT.java
new file mode 100644 (file)
index 0000000..cc1a948
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link DisableTrackingPayload}.
+ */
+final class DT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ClientIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public DT() {
+        // For Externalizable
+    }
+
+    DT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(ClientIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new DisableTrackingPayload(identifier, bytes);
+    }
+}
index bc1fca165544174c96043136a9f98e176c57e777..a5a76e06af8af2c9eda5daf6d40b9d9eecaa851c 100644 (file)
@@ -7,34 +7,45 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataInput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNodes;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import java.util.List;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidateNodes;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * Utility serialization/deserialization for {@link DataTreeCandidate}. Note that this utility does not maintain
  * before-image information across serialization.
- *
- * @author Robert Varga
  */
 @Beta
 public final class DataTreeCandidateInputOutput {
+    public record DataTreeCandidateWithVersion(
+            @NonNull DataTreeCandidate candidate,
+            @NonNull NormalizedNodeStreamVersion version) implements Immutable {
+        public DataTreeCandidateWithVersion {
+            requireNonNull(candidate);
+            requireNonNull(version);
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(DataTreeCandidateInputOutput.class);
     private static final byte DELETE = 0;
     private static final byte SUBTREE_MODIFIED = 1;
@@ -47,166 +58,147 @@ public final class DataTreeCandidateInputOutput {
         throw new UnsupportedOperationException();
     }
 
-    private static DataTreeCandidateNode readModifiedNode(final ModificationType type,
-            final NormalizedNodeDataInput in) throws IOException {
-
-        final PathArgument identifier = in.readPathArgument();
-        final Collection<DataTreeCandidateNode> children = readChildren(in);
+    private static DataTreeCandidateNode readModifiedNode(final ModificationType type, final NormalizedNodeDataInput in,
+            final ReusableStreamReceiver receiver) throws IOException {
+        final var pathArg = in.readPathArgument();
+        final var children = readChildren(in, receiver);
         if (children.isEmpty()) {
-            LOG.debug("Modified node {} does not have any children, not instantiating it", identifier);
+            LOG.debug("Modified node {} does not have any children, not instantiating it", pathArg);
             return null;
-        } else {
-            return ModifiedDataTreeCandidateNode.create(identifier, type, children);
         }
+
+        return ModifiedDataTreeCandidateNode.create(pathArg, type, children);
     }
 
-    private static Collection<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in) throws IOException {
+    private static List<DataTreeCandidateNode> readChildren(final NormalizedNodeDataInput in,
+            final ReusableStreamReceiver receiver) throws IOException {
         final int size = in.readInt();
-        if (size != 0) {
-            final Collection<DataTreeCandidateNode> ret = new ArrayList<>(size);
-            for (int i = 0; i < size; ++i) {
-                final DataTreeCandidateNode child = readNode(in);
-                if (child != null) {
-                    ret.add(child);
-                }
+        if (size == 0) {
+            return List.of();
+        }
+
+        final var ret = new ArrayList<DataTreeCandidateNode>(size);
+        for (int i = 0; i < size; ++i) {
+            final var child = readNode(in, receiver);
+            if (child != null) {
+                ret.add(child);
             }
-            return ret;
-        } else {
-            return Collections.emptyList();
         }
+        return ret;
     }
 
-    private static DataTreeCandidateNode readNode(final NormalizedNodeDataInput in) throws IOException {
+    private static DataTreeCandidateNode readNode(final NormalizedNodeDataInput in,
+            final ReusableStreamReceiver receiver) throws IOException {
         final byte type = in.readByte();
-        switch (type) {
-            case APPEARED:
-                return readModifiedNode(ModificationType.APPEARED, in);
-            case DELETE:
-                return DeletedDataTreeCandidateNode.create(in.readPathArgument());
-            case DISAPPEARED:
-                return readModifiedNode(ModificationType.DISAPPEARED, in);
-            case SUBTREE_MODIFIED:
-                return readModifiedNode(ModificationType.SUBTREE_MODIFIED, in);
-            case UNMODIFIED:
-                return null;
-            case WRITE:
-                return DataTreeCandidateNodes.fromNormalizedNode(in.readNormalizedNode());
-            default:
-                throw new IllegalArgumentException("Unhandled node type " + type);
-        }
+        return switch (type) {
+            case APPEARED -> readModifiedNode(ModificationType.APPEARED, in, receiver);
+            case DELETE -> DeletedDataTreeCandidateNode.create(in.readPathArgument());
+            case DISAPPEARED -> readModifiedNode(ModificationType.DISAPPEARED, in, receiver);
+            case SUBTREE_MODIFIED -> readModifiedNode(ModificationType.SUBTREE_MODIFIED, in, receiver);
+            case UNMODIFIED -> null;
+            case WRITE -> DataTreeCandidateNodes.written(in.readNormalizedNode(receiver));
+            default -> throw new IllegalArgumentException("Unhandled node type " + type);
+        };
     }
 
-    public static DataTreeCandidate readDataTreeCandidate(final DataInput in) throws IOException {
-        final NormalizedNodeDataInput reader = NormalizedNodeInputOutput.newDataInput(in);
-        final YangInstanceIdentifier rootPath = reader.readYangInstanceIdentifier();
+    public static DataTreeCandidateWithVersion readDataTreeCandidate(final DataInput in,
+            final ReusableStreamReceiver receiver) throws IOException {
+        final var reader = NormalizedNodeDataInput.newDataInput(in);
+        final var rootPath = reader.readYangInstanceIdentifier();
         final byte type = reader.readByte();
 
-        final DataTreeCandidateNode rootNode;
-        switch (type) {
-            case APPEARED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED, readChildren(reader));
-                break;
-            case DELETE:
-                rootNode = DeletedDataTreeCandidateNode.create();
-                break;
-            case DISAPPEARED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED, readChildren(reader));
-                break;
-            case SUBTREE_MODIFIED:
-                rootNode = ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
-                        readChildren(reader));
-                break;
-            case WRITE:
-                rootNode = DataTreeCandidateNodes.fromNormalizedNode(reader.readNormalizedNode());
-                break;
-            case UNMODIFIED:
-                rootNode = AbstractDataTreeCandidateNode.createUnmodified();
-                break;
-            default:
-                throw new IllegalArgumentException("Unhandled node type " + type);
-        }
-
-        return DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode);
+        final DataTreeCandidateNode rootNode = switch (type) {
+            case APPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.APPEARED,
+                readChildren(reader, receiver));
+            case DELETE -> DeletedDataTreeCandidateNode.create();
+            case DISAPPEARED -> ModifiedDataTreeCandidateNode.create(ModificationType.DISAPPEARED,
+                readChildren(reader, receiver));
+            case SUBTREE_MODIFIED -> ModifiedDataTreeCandidateNode.create(ModificationType.SUBTREE_MODIFIED,
+                readChildren(reader, receiver));
+            case WRITE -> DataTreeCandidateNodes.written(reader.readNormalizedNode(receiver));
+            case UNMODIFIED -> AbstractDataTreeCandidateNode.createUnmodified();
+            default -> throw new IllegalArgumentException("Unhandled node type " + type);
+        };
+        return new DataTreeCandidateWithVersion(DataTreeCandidates.newDataTreeCandidate(rootPath, rootNode),
+            reader.getVersion());
     }
 
-
     private static void writeChildren(final NormalizedNodeDataOutput out,
             final Collection<DataTreeCandidateNode> children) throws IOException {
         out.writeInt(children.size());
-        for (DataTreeCandidateNode child : children) {
+        for (var child : children) {
             writeNode(out, child);
         }
     }
 
     private static void writeNode(final NormalizedNodeDataOutput out, final DataTreeCandidateNode node)
             throws IOException {
-        switch (node.getModificationType()) {
-            case APPEARED:
+        switch (node.modificationType()) {
+            case APPEARED -> {
                 out.writeByte(APPEARED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case DELETE:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case DELETE -> {
                 out.writeByte(DELETE);
-                out.writePathArgument(node.getIdentifier());
-                break;
-            case DISAPPEARED:
+                out.writePathArgument(node.name());
+            }
+            case DISAPPEARED -> {
                 out.writeByte(DISAPPEARED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case SUBTREE_MODIFIED:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case SUBTREE_MODIFIED -> {
                 out.writeByte(SUBTREE_MODIFIED);
-                out.writePathArgument(node.getIdentifier());
-                writeChildren(out, node.getChildNodes());
-                break;
-            case WRITE:
+                out.writePathArgument(node.name());
+                writeChildren(out, node.childNodes());
+            }
+            case WRITE -> {
                 out.writeByte(WRITE);
-                out.writeNormalizedNode(node.getDataAfter().get());
-                break;
-            case UNMODIFIED:
-                out.writeByte(UNMODIFIED);
-                break;
-            default:
-                throwUnhandledNodeType(node);
+                out.writeNormalizedNode(node.getDataAfter());
+            }
+            case UNMODIFIED -> out.writeByte(UNMODIFIED);
+            default -> throwUnhandledNodeType(node);
         }
     }
 
-    public static void writeDataTreeCandidate(final DataOutput out, DataTreeCandidate candidate) throws IOException {
-        try (NormalizedNodeDataOutput writer = NormalizedNodeInputOutput.newDataOutput(out)) {
+    @VisibleForTesting
+    public static void writeDataTreeCandidate(final DataOutput out, final PayloadVersion version,
+            final DataTreeCandidate candidate) throws IOException {
+        try (var writer = version.getStreamVersion().newDataOutput(out)) {
             writer.writeYangInstanceIdentifier(candidate.getRootPath());
 
-            final DataTreeCandidateNode node = candidate.getRootNode();
-            switch (node.getModificationType()) {
-                case APPEARED:
+            final var node = candidate.getRootNode();
+            switch (node.modificationType()) {
+                case APPEARED -> {
                     writer.writeByte(APPEARED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case DELETE:
-                    writer.writeByte(DELETE);
-                    break;
-                case DISAPPEARED:
+                    writeChildren(writer, node.childNodes());
+                }
+                case DELETE -> writer.writeByte(DELETE);
+                case DISAPPEARED -> {
                     writer.writeByte(DISAPPEARED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case SUBTREE_MODIFIED:
+                    writeChildren(writer, node.childNodes());
+                }
+                case SUBTREE_MODIFIED -> {
                     writer.writeByte(SUBTREE_MODIFIED);
-                    writeChildren(writer, node.getChildNodes());
-                    break;
-                case UNMODIFIED:
-                    writer.writeByte(UNMODIFIED);
-                    break;
-                case WRITE:
+                    writeChildren(writer, node.childNodes());
+                }
+                case UNMODIFIED -> writer.writeByte(UNMODIFIED);
+                case WRITE -> {
                     writer.writeByte(WRITE);
-                    writer.writeNormalizedNode(node.getDataAfter().get());
-                    break;
-                default:
-                    throwUnhandledNodeType(node);
+                    writer.writeNormalizedNode(node.getDataAfter());
+                }
+                default -> throwUnhandledNodeType(node);
             }
         }
     }
 
+    public static void writeDataTreeCandidate(final DataOutput out, final DataTreeCandidate candidate)
+            throws IOException {
+        writeDataTreeCandidate(out, PayloadVersion.current(), candidate);
+    }
+
     private static void throwUnhandledNodeType(final DataTreeCandidateNode node) {
-        throw new IllegalArgumentException("Unhandled node type " + node.getModificationType());
+        throw new IllegalArgumentException("Unhandled node type " + node.modificationType());
     }
 }
index 37d41258585d7f99dc617f63c5a3c50d43cbacdd..9c0a3acc72d12c4a406ef4085aca068f5c9ee29b 100644 (file)
@@ -7,17 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.collect.ImmutableList;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
 import java.util.List;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 
 /**
@@ -25,140 +21,61 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  *
  * @author Thomas Pantelis
  */
-public class DatastoreSnapshot implements Serializable {
+public final class DatastoreSnapshot implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private DatastoreSnapshot datastoreSnapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final DatastoreSnapshot datastoreSnapshot) {
-            this.datastoreSnapshot = datastoreSnapshot;
-        }
-
-        @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(datastoreSnapshot.type);
-            out.writeObject(datastoreSnapshot.shardManagerSnapshot);
-
-            out.writeInt(datastoreSnapshot.shardSnapshots.size());
-            for (ShardSnapshot shardSnapshot: datastoreSnapshot.shardSnapshots) {
-                out.writeObject(shardSnapshot);
-            }
-        }
-
-        @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            String localType = (String)in.readObject();
-            ShardManagerSnapshot localShardManagerSnapshot = (ShardManagerSnapshot) in.readObject();
-
-            int size = in.readInt();
-            List<ShardSnapshot> localShardSnapshots = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                localShardSnapshots.add((ShardSnapshot) in.readObject());
-            }
-
-            datastoreSnapshot = new DatastoreSnapshot(localType, localShardManagerSnapshot, localShardSnapshots);
-        }
-
-        private Object readResolve() {
-            return datastoreSnapshot;
-        }
-    }
-
-    private final String type;
+    private final @NonNull String type;
     private final ShardManagerSnapshot shardManagerSnapshot;
-    private final List<ShardSnapshot> shardSnapshots;
+    private final @NonNull ImmutableList<ShardSnapshot> shardSnapshots;
 
-    public DatastoreSnapshot(@Nonnull String type, @Nullable ShardManagerSnapshot shardManagerSnapshot,
-            @Nonnull List<ShardSnapshot> shardSnapshots) {
-        this.type = Preconditions.checkNotNull(type);
+    public DatastoreSnapshot(final @NonNull String type, final @Nullable ShardManagerSnapshot shardManagerSnapshot,
+            final @NonNull List<ShardSnapshot> shardSnapshots) {
+        this.type = requireNonNull(type);
         this.shardManagerSnapshot = shardManagerSnapshot;
-        this.shardSnapshots = ImmutableList.copyOf(Preconditions.checkNotNull(shardSnapshots));
+        this.shardSnapshots = ImmutableList.copyOf(shardSnapshots);
     }
 
-    @Nonnull
-    public String getType() {
+    public @NonNull String getType() {
         return type;
     }
 
-    @Nullable
-    public ShardManagerSnapshot getShardManagerSnapshot() {
+    public @Nullable ShardManagerSnapshot getShardManagerSnapshot() {
         return shardManagerSnapshot;
     }
 
-    @Nonnull
-    public List<ShardSnapshot> getShardSnapshots() {
+    public @NonNull List<ShardSnapshot> getShardSnapshots() {
         return shardSnapshots;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new DS(this);
     }
 
-    public static class ShardSnapshot implements Serializable {
+    public static final class ShardSnapshot implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
-        private static final class Proxy implements Externalizable {
-            private static final long serialVersionUID = 1L;
-
-            private ShardSnapshot shardSnapshot;
-
-            // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-            // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-            @SuppressWarnings("checkstyle:RedundantModifier")
-            public Proxy() {
-                // For Externalizable
-            }
-
-            Proxy(final ShardSnapshot shardSnapshot) {
-                this.shardSnapshot = shardSnapshot;
-            }
-
-            @Override
-            public void writeExternal(ObjectOutput out) throws IOException {
-                out.writeObject(shardSnapshot.name);
-                out.writeObject(shardSnapshot.snapshot);
-            }
-
-            @Override
-            public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-                shardSnapshot = new ShardSnapshot((String)in.readObject(), (Snapshot) in.readObject());
-            }
-
-            private Object readResolve() {
-                return shardSnapshot;
-            }
-        }
-
-        private final String name;
-        private final Snapshot snapshot;
+        private final @NonNull String name;
+        private final @NonNull Snapshot snapshot;
 
-        public ShardSnapshot(@Nonnull String name, @Nonnull Snapshot snapshot) {
-            this.name = Preconditions.checkNotNull(name);
-            this.snapshot = Preconditions.checkNotNull(snapshot);
+        public ShardSnapshot(final @NonNull String name, final @NonNull Snapshot snapshot) {
+            this.name = requireNonNull(name);
+            this.snapshot = requireNonNull(snapshot);
         }
 
-        @Nonnull
-        public String getName() {
+        public @NonNull String getName() {
             return name;
         }
 
-        @Nonnull
-        public Snapshot getSnapshot() {
+        public @NonNull Snapshot getSnapshot() {
             return snapshot;
         }
 
+        @java.io.Serial
         private Object writeReplace() {
-            return new Proxy(this);
+            return new DSS(this);
         }
     }
 }
index ae881120958eddc82c25c28d12fd4d9de2e9fdb1..54e9e98d517bb095eb066d5d09f2f45c73be3ea4 100644 (file)
@@ -8,11 +8,10 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import java.util.Collection;
-import java.util.Optional;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
  * A deserialized {@link DataTreeCandidateNode} which represents a deletion.
@@ -25,7 +24,7 @@ abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNod
     static DataTreeCandidateNode create() {
         return new DeletedDataTreeCandidateNode() {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
         };
@@ -34,21 +33,20 @@ abstract class DeletedDataTreeCandidateNode extends AbstractDataTreeCandidateNod
     static DataTreeCandidateNode create(final PathArgument identifier) {
         return new DeletedDataTreeCandidateNode() {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 return identifier;
             }
         };
     }
 
     @Override
-    public final Optional<NormalizedNode<?, ?>> getDataAfter() {
-        return Optional.empty();
+    public final NormalizedNode dataAfter() {
+        return null;
     }
 
     @Override
-    public final Collection<DataTreeCandidateNode> getChildNodes() {
-        // We would require the before-image to reconstruct the list of nodes which
-        // were deleted.
+    public final Collection<DataTreeCandidateNode> childNodes() {
+        // We would require the before-image to reconstruct the list of nodes which were deleted.
         throw new UnsupportedOperationException("Children not available after serialization");
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DisableTrackingPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/DisableTrackingPayload.java
new file mode 100644 (file)
index 0000000..293f396
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import com.google.common.io.ByteArrayDataOutput;
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class DisableTrackingPayload extends AbstractIdentifiablePayload<ClientIdentifier> {
+    private static final Logger LOG = LoggerFactory.getLogger(DisableTrackingPayload.class);
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(DT::new);
+
+    DisableTrackingPayload(final ClientIdentifier clientId, final byte[] serialized) {
+        super(clientId, serialized);
+    }
+
+    public static DisableTrackingPayload create(final ClientIdentifier clientId,
+            final int initialSerializedBufferCapacity) {
+        final ByteArrayDataOutput out = ByteStreams.newDataOutput(initialSerializedBufferCapacity);
+        try {
+            clientId.writeTo(out);
+        } catch (IOException e) {
+            // This should never happen
+            LOG.error("Failed to serialize {}", clientId, e);
+            throw new IllegalStateException("Failed to serialize " + clientId, e);
+        }
+        return new DisableTrackingPayload(clientId, out.toByteArray());
+    }
+
+    @Override
+    protected DT externalizableProxy(final byte[] serialized) {
+        return new DT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FM.java
new file mode 100644 (file)
index 0000000..827a0cf
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import com.google.common.collect.ImmutableList;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.List;
+
+/**
+ * Externalizable proxy for {@link FrontendShardDataTreeSnapshotMetadata}.
+ */
+final class FM implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private List<FrontendClientMetadata> clients;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public FM() {
+        // For Externalizable
+    }
+
+    FM(final FrontendShardDataTreeSnapshotMetadata metadata) {
+        clients = metadata.getClients();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(clients.size());
+        for (var c : clients) {
+            c.writeTo(out);
+        }
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        final int size = in.readInt();
+        final var builder = ImmutableList.<FrontendClientMetadata>builderWithExpectedSize(size);
+        for (int i = 0; i < size ; ++i) {
+            builder.add(FrontendClientMetadata.readFrom(in));
+        }
+        clients = builder.build();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new FrontendShardDataTreeSnapshotMetadata(clients);
+    }
+}
\ No newline at end of file
index 4598f68a449c003e43e801af465d50c900f1f3d5..49573e247c6615e9ff79ef736573a969c749713e 100644 (file)
@@ -7,59 +7,47 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.ImmutableRangeSet.Builder;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.primitives.UnsignedLong;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
 import org.opendaylight.yangtools.concepts.WritableObject;
-import org.opendaylight.yangtools.concepts.WritableObjects;
 
-public final class FrontendClientMetadata implements Identifiable<ClientIdentifier>, WritableObject {
-    private final Collection<FrontendHistoryMetadata> currentHistories;
-    private final RangeSet<UnsignedLong> purgedHistories;
-    private final ClientIdentifier identifier;
+public final class FrontendClientMetadata implements WritableObject {
+    private final @NonNull ImmutableList<FrontendHistoryMetadata> currentHistories;
+    private final @NonNull ImmutableUnsignedLongSet purgedHistories;
+    private final @NonNull ClientIdentifier clientId;
 
-    public FrontendClientMetadata(final ClientIdentifier identifier, final RangeSet<UnsignedLong> purgedHistories,
+    public FrontendClientMetadata(final ClientIdentifier clientId, final ImmutableUnsignedLongSet purgedHistories,
             final Collection<FrontendHistoryMetadata> currentHistories) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-        this.purgedHistories = ImmutableRangeSet.copyOf(purgedHistories);
+        this.clientId = requireNonNull(clientId);
+        this.purgedHistories = requireNonNull(purgedHistories);
         this.currentHistories = ImmutableList.copyOf(currentHistories);
     }
 
-    public Collection<FrontendHistoryMetadata> getCurrentHistories() {
-        return currentHistories;
+    public ClientIdentifier clientId() {
+        return clientId;
     }
 
-    public RangeSet<UnsignedLong> getPurgedHistories() {
-        return purgedHistories;
+    public ImmutableList<FrontendHistoryMetadata> getCurrentHistories() {
+        return currentHistories;
     }
 
-    @Override
-    public ClientIdentifier getIdentifier() {
-        return identifier;
+    public ImmutableUnsignedLongSet getPurgedHistories() {
+        return purgedHistories;
     }
 
     @Override
     public void writeTo(final DataOutput out) throws IOException {
-        identifier.writeTo(out);
-
-        final Set<Range<UnsignedLong>> ranges = purgedHistories.asRanges();
-        out.writeInt(ranges.size());
-        for (final Range<UnsignedLong> r : ranges) {
-            WritableObjects.writeLongs(out, r.lowerEndpoint().longValue(), r.upperEndpoint().longValue());
-        }
+        clientId.writeTo(out);
+        purgedHistories.writeTo(out);
 
         out.writeInt(currentHistories.size());
         for (final FrontendHistoryMetadata h : currentHistories) {
@@ -68,30 +56,21 @@ public final class FrontendClientMetadata implements Identifiable<ClientIdentifi
     }
 
     public static FrontendClientMetadata readFrom(final DataInput in) throws IOException {
-        final ClientIdentifier id = ClientIdentifier.readFrom(in);
-
-        final int purgedSize = in.readInt();
-        final Builder<UnsignedLong> b = ImmutableRangeSet.builder();
-        for (int i = 0; i < purgedSize; ++i) {
-            final byte header = WritableObjects.readLongHeader(in);
-            final UnsignedLong lower = UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, header));
-            final UnsignedLong upper = UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, header));
-
-            b.add(Range.closed(lower, upper));
-        }
+        final var clientId = ClientIdentifier.readFrom(in);
+        final var purgedHistories = ImmutableUnsignedLongSet.readFrom(in);
 
         final int currentSize = in.readInt();
-        final Collection<FrontendHistoryMetadata> currentHistories = new ArrayList<>(currentSize);
+        final var currentBuilder = ImmutableList.<FrontendHistoryMetadata>builderWithExpectedSize(currentSize);
         for (int i = 0; i < currentSize; ++i) {
-            currentHistories.add(FrontendHistoryMetadata.readFrom(in));
+            currentBuilder.add(FrontendHistoryMetadata.readFrom(in));
         }
 
-        return new FrontendClientMetadata(id, b.build(), currentHistories);
+        return new FrontendClientMetadata(clientId, purgedHistories, currentBuilder.build());
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(FrontendClientMetadata.class).add("identifer", identifier)
-                .add("current", currentHistories).add("purged", purgedHistories).toString();
+        return MoreObjects.toStringHelper(FrontendClientMetadata.class)
+            .add("clientId", clientId).add("current", currentHistories).add("purged", purgedHistories).toString();
     }
 }
index 2cdda9f54716b41924e94a59cfb73c18110a1f12..a27b2c0311b3a0431af0cf036296b146bde82661 100644 (file)
@@ -7,38 +7,32 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Verify;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
-import com.google.common.primitives.UnsignedLong;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
 import org.opendaylight.yangtools.concepts.WritableObject;
 import org.opendaylight.yangtools.concepts.WritableObjects;
 
 public final class FrontendHistoryMetadata implements WritableObject {
-    private final RangeSet<UnsignedLong> purgedTransactions;
-    private final Map<UnsignedLong, Boolean> closedTransactions;
+    private final @NonNull ImmutableUnsignedLongSet purgedTransactions;
+    private final @NonNull UnsignedLongBitmap closedTransactions;
     private final long historyId;
     private final long cookie;
     private final boolean closed;
 
     public FrontendHistoryMetadata(final long historyId, final long cookie, final boolean closed,
-            final Map<UnsignedLong, Boolean> closedTransactions, final RangeSet<UnsignedLong> purgedTransactions) {
+            final UnsignedLongBitmap closedTransactions, final ImmutableUnsignedLongSet purgedTransactions) {
         this.historyId = historyId;
         this.cookie = cookie;
         this.closed = closed;
-        this.closedTransactions = ImmutableMap.copyOf(closedTransactions);
-        this.purgedTransactions = ImmutableRangeSet.copyOf(purgedTransactions);
+        this.closedTransactions = requireNonNull(closedTransactions);
+        this.purgedTransactions = requireNonNull(purgedTransactions);
     }
 
     public long getHistoryId() {
@@ -53,11 +47,11 @@ public final class FrontendHistoryMetadata implements WritableObject {
         return closed;
     }
 
-    public Map<UnsignedLong, Boolean> getClosedTransactions() {
+    public UnsignedLongBitmap getClosedTransactions() {
         return closedTransactions;
     }
 
-    public RangeSet<UnsignedLong> getPurgedTransactions() {
+    public ImmutableUnsignedLongSet getPurgedTransactions() {
         return purgedTransactions;
     }
 
@@ -66,53 +60,43 @@ public final class FrontendHistoryMetadata implements WritableObject {
         WritableObjects.writeLongs(out, historyId, cookie);
         out.writeBoolean(closed);
 
-        final Set<Range<UnsignedLong>> purgedRanges = purgedTransactions.asRanges();
-        WritableObjects.writeLongs(out, closedTransactions.size(), purgedRanges.size());
-        for (Entry<UnsignedLong, Boolean> e : closedTransactions.entrySet()) {
-            WritableObjects.writeLong(out, e.getKey().longValue());
-            out.writeBoolean(e.getValue().booleanValue());
-        }
-        for (Range<UnsignedLong> r : purgedRanges) {
-            WritableObjects.writeLongs(out, r.lowerEndpoint().longValue(), r.upperEndpoint().longValue());
-        }
+        final int closedSize = closedTransactions.size();
+        final int purgedSize = purgedTransactions.rangeSize();
+        WritableObjects.writeLongs(out, closedSize, purgedSize);
+        closedTransactions.writeEntriesTo(out, closedSize);
+        purgedTransactions.writeRangesTo(out, purgedSize);
     }
 
     public static FrontendHistoryMetadata readFrom(final DataInput in) throws IOException {
-        byte header = WritableObjects.readLongHeader(in);
-        final long historyId = WritableObjects.readFirstLong(in, header);
-        final long cookie = WritableObjects.readSecondLong(in, header);
+        final byte firstHdr = WritableObjects.readLongHeader(in);
+        final long historyId = WritableObjects.readFirstLong(in, firstHdr);
+        final long cookie = WritableObjects.readSecondLong(in, firstHdr);
         final boolean closed = in.readBoolean();
 
-        header = WritableObjects.readLongHeader(in);
-        long ls = WritableObjects.readFirstLong(in, header);
-        Verify.verify(ls >= 0 && ls <= Integer.MAX_VALUE);
-        final int csize = (int) ls;
-
-        ls = WritableObjects.readSecondLong(in, header);
-        Verify.verify(ls >= 0 && ls <= Integer.MAX_VALUE);
-        final int psize = (int) ls;
-
-        final Map<UnsignedLong, Boolean> closedTransactions = new HashMap<>(csize);
-        for (int i = 0; i < csize; ++i) {
-            final UnsignedLong key = UnsignedLong.fromLongBits(WritableObjects.readLong(in));
-            final Boolean value = Boolean.valueOf(in.readBoolean());
-            closedTransactions.put(key, value);
-        }
-        final RangeSet<UnsignedLong> purgedTransactions = TreeRangeSet.create();
-        for (int i = 0; i < psize; ++i) {
-            final byte h = WritableObjects.readLongHeader(in);
-            final UnsignedLong l = UnsignedLong.fromLongBits(WritableObjects.readFirstLong(in, h));
-            final UnsignedLong u = UnsignedLong.fromLongBits(WritableObjects.readSecondLong(in, h));
-            purgedTransactions.add(Range.closed(l, u));
-        }
+        final byte secondHdr = WritableObjects.readLongHeader(in);
+        final int csize = verifySize(WritableObjects.readFirstLong(in, secondHdr));
+        final int psize = verifySize(WritableObjects.readSecondLong(in, secondHdr));
 
-        return new FrontendHistoryMetadata(historyId, cookie, closed, closedTransactions, purgedTransactions);
+        return new FrontendHistoryMetadata(historyId, cookie, closed,
+            UnsignedLongBitmap.readFrom(in, csize),
+            ImmutableUnsignedLongSet.readFrom(in, psize));
     }
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(FrontendHistoryMetadata.class).add("historyId", historyId)
-                .add("cookie", cookie).add("closed", closed).add("closedTransactions", closedTransactions)
-                .add("purgedTransactions", purgedTransactions).toString();
+        return MoreObjects.toStringHelper(FrontendHistoryMetadata.class)
+            .add("historyId", historyId)
+            .add("cookie", cookie)
+            .add("closed", closed)
+            .add("closedTransactions", closedTransactions)
+            .add("purgedTransactions", purgedTransactions)
+            .toString();
+    }
+
+    private static int verifySize(final long size) throws IOException {
+        if (size < 0 || size > Integer.MAX_VALUE) {
+            throw new IOException("Invalid size " + size);
+        }
+        return (int) size;
     }
 }
index b7b1261192aeebe9ba8e689d7cff8d4bd12ba4e5..1d28ccac45ddc7af3858a2086e8005e12c4d8682 100644 (file)
@@ -11,55 +11,12 @@ import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableList;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-public final class FrontendShardDataTreeSnapshotMetadata extends
-        ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
-
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private List<FrontendClientMetadata> clients;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final FrontendShardDataTreeSnapshotMetadata metadata) {
-            this.clients = metadata.getClients();
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(clients.size());
-            for (final FrontendClientMetadata c : clients) {
-                c.writeTo(out);
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            final int size = in.readInt();
-            final List<FrontendClientMetadata> readedClients = new ArrayList<>(size);
-            for (int i = 0; i < size ; ++i) {
-                readedClients.add(FrontendClientMetadata.readFrom(in));
-            }
-            this.clients = ImmutableList.copyOf(readedClients);
-        }
-
-        private Object readResolve() {
-            return new FrontendShardDataTreeSnapshotMetadata(clients);
-        }
-    }
-
+public final class FrontendShardDataTreeSnapshotMetadata
+        extends ShardDataTreeSnapshotMetadata<FrontendShardDataTreeSnapshotMetadata> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -77,7 +34,7 @@ public final class FrontendShardDataTreeSnapshotMetadata extends
 
     @Override
     protected Externalizable externalizableProxy() {
-        return new Proxy(this);
+        return new FM(this);
     }
 
     @Override
@@ -87,7 +44,8 @@ public final class FrontendShardDataTreeSnapshotMetadata extends
 
     @Override
     public String toString() {
-        return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class).add("clients", clients)
-                .toString();
+        return MoreObjects.toStringHelper(FrontendShardDataTreeSnapshotMetadata.class)
+            .add("clients", clients)
+            .toString();
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MS.java
new file mode 100644 (file)
index 0000000..94cd695
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+import com.google.common.collect.ImmutableMap;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Externalizable proxy for {@link MetadataShardDataTreeSnapshot}.
+ */
+final class MS implements Externalizable {
+    private static final Logger LOG = LoggerFactory.getLogger(MS.class);
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
+    private NormalizedNodeStreamVersion version;
+    private NormalizedNode rootNode;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public MS() {
+        // For Externalizable
+    }
+
+    MS(final MetadataShardDataTreeSnapshot snapshot) {
+        rootNode = snapshot.getRootNode().orElseThrow();
+        metadata = snapshot.getMetadata();
+        version = snapshot.version().getStreamVersion();
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeInt(metadata.size());
+        for (var m : metadata.values()) {
+            out.writeObject(m);
+        }
+        try (var stream = version.newDataOutput(out)) {
+            stream.writeNormalizedNode(rootNode);
+        }
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final int metaSize = in.readInt();
+        checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
+
+        // Default pre-allocate is 4, which should be fine
+        final var metaBuilder = ImmutableMap
+            .<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>builder();
+        for (int i = 0; i < metaSize; ++i) {
+            final var m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
+            if (m != null) {
+                metaBuilder.put(m.getType(), m);
+            } else {
+                LOG.warn("Skipping null metadata");
+            }
+        }
+        metadata = metaBuilder.build();
+
+        final var stream = NormalizedNodeDataInput.newDataInput(in);
+        version = stream.getVersion();
+        rootNode = stream.readNormalizedNode();
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return new MetadataShardDataTreeSnapshot(rootNode, metadata);
+    }
+}
\ No newline at end of file
index 05dc4cc3982268003be9c0d17f4a2bce2708ac1e..f1a0d7c5540ced84fca4ac6a5b79949bf4e1f1f9 100644 (file)
@@ -7,23 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
 import java.util.Map;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AbstractVersionedShardDataTreeSnapshot} which contains additional metadata.
@@ -33,61 +25,7 @@ import org.slf4j.LoggerFactory;
 @Beta
 public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardDataTreeSnapshot
         implements Serializable {
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-        private static final Logger LOG = LoggerFactory.getLogger(MetadataShardDataTreeSnapshot.class);
-
-        private Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
-        private NormalizedNode<?, ?> rootNode;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final MetadataShardDataTreeSnapshot snapshot) {
-            this.rootNode = snapshot.getRootNode().get();
-            this.metadata = snapshot.getMetadata();
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(metadata.size());
-            for (ShardDataTreeSnapshotMetadata<?> m : metadata.values()) {
-                out.writeObject(m);
-            }
-
-            SerializationUtils.serializeNormalizedNode(rootNode, out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            final int metaSize = in.readInt();
-            Preconditions.checkArgument(metaSize >= 0, "Invalid negative metadata map length %s", metaSize);
-
-            // Default pre-allocate is 4, which should be fine
-            final Builder<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>>
-                    metaBuilder = ImmutableMap.builder();
-            for (int i = 0; i < metaSize; ++i) {
-                final ShardDataTreeSnapshotMetadata<?> m = (ShardDataTreeSnapshotMetadata<?>) in.readObject();
-                if (m != null) {
-                    metaBuilder.put(m.getType(), m);
-                } else {
-                    LOG.warn("Skipping null metadata");
-                }
-            }
-
-            metadata = metaBuilder.build();
-            rootNode = Verify.verifyNotNull(SerializationUtils.deserializeNormalizedNode(in));
-        }
-
-        private Object readResolve() {
-            return new MetadataShardDataTreeSnapshot(rootNode, metadata);
-        }
-    }
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
@@ -96,15 +34,15 @@ public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardD
     private final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
 
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See above justification.")
-    private final NormalizedNode<?, ?> rootNode;
+    private final NormalizedNode rootNode;
 
-    public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode) {
+    public MetadataShardDataTreeSnapshot(final NormalizedNode rootNode) {
         this(rootNode, ImmutableMap.of());
     }
 
-    public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode,
+    public MetadataShardDataTreeSnapshot(final NormalizedNode rootNode,
             final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata) {
-        this.rootNode = Preconditions.checkNotNull(rootNode);
+        this.rootNode = requireNonNull(rootNode);
         this.metadata = ImmutableMap.copyOf(metadata);
     }
 
@@ -113,17 +51,18 @@ public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardD
     }
 
     @Override
-    NormalizedNode<?, ?> rootNode() {
+    NormalizedNode rootNode() {
         return rootNode;
     }
 
     @Override
     PayloadVersion version() {
-        return PayloadVersion.BORON;
+        return PayloadVersion.POTASSIUM;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new MS(this);
     }
 
     @Override
index 6820e1c885b0b6857523610dc26b1a0d1b4ea6db..8c771886f815e6adccfbab28cf892ff51afb1847 100644 (file)
@@ -7,31 +7,32 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.Collection;
-import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
 
 /**
  * A deserialized {@link DataTreeCandidateNode} which represents a modification in
  * one of its children.
  */
 abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNode {
-    private final Collection<DataTreeCandidateNode> children;
+    private final @NonNull Collection<DataTreeCandidateNode> children;
 
     private ModifiedDataTreeCandidateNode(final ModificationType type,
             final Collection<DataTreeCandidateNode> children) {
         super(type);
-        this.children = Preconditions.checkNotNull(children);
+        this.children = requireNonNull(children);
     }
 
     static DataTreeCandidateNode create(final ModificationType type, final Collection<DataTreeCandidateNode> children) {
         return new ModifiedDataTreeCandidateNode(type, children) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 throw new UnsupportedOperationException("Root node does not have an identifier");
             }
         };
@@ -41,19 +42,19 @@ abstract class ModifiedDataTreeCandidateNode extends AbstractDataTreeCandidateNo
             final Collection<DataTreeCandidateNode> children) {
         return new ModifiedDataTreeCandidateNode(type, children) {
             @Override
-            public PathArgument getIdentifier() {
+            public PathArgument name() {
                 return identifier;
             }
         };
     }
 
     @Override
-    public final Optional<NormalizedNode<?, ?>> getDataAfter() {
+    public final NormalizedNode dataAfter() {
         throw new UnsupportedOperationException("After-image not available after serialization");
     }
 
     @Override
-    public final Collection<DataTreeCandidateNode> getChildNodes() {
+    public final Collection<DataTreeCandidateNode> childNodes() {
         return children;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PH.java
new file mode 100644 (file)
index 0000000..dc95e31
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeLocalHistoryPayload}.
+ */
+final class PH implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PH() {
+        // For Externalizable
+    }
+
+    PH(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(LocalHistoryIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new PurgeLocalHistoryPayload(identifier, bytes);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/PT.java
new file mode 100644 (file)
index 0000000..8ea773f
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+
+/**
+ * Serialization proxy for {@link PurgeTransactionPayload}.
+ */
+final class PT implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private TransactionIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public PT() {
+        // For Externalizable
+    }
+
+    PT(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+        identifier = verifyNotNull(TransactionIdentifier.readFrom(ByteStreams.newDataInput(newBytes)));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new PurgeTransactionPayload(identifier, bytes);
+    }
+}
index 1dbbcba2e5e741ed107b36d44e28cf338f1ddc4d..298f835c5c85c1cc71db2bdb2b9b5834b6629b4a 100644 (file)
@@ -7,14 +7,16 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static com.google.common.base.Preconditions.checkArgument;
+
 import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.concepts.WritableObject;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 
 /**
  * Enumeration of all ABI versions supported by this implementation of persistence. An ABI version has to be bumped
@@ -29,35 +31,62 @@ import org.opendaylight.yangtools.concepts.WritableObject;
  * participant instance should oppose RAFT candidates which produce persistence of an unsupported version. If a follower
  * encounters an unsupported version it must not become fully-operational, as it does not have an accurate view
  * of shard state.
- *
- * @author Robert Varga
  */
 @Beta
 public enum PayloadVersion implements WritableObject {
-    // NOTE: enumeration values need to be sorted in asceding order of their version to keep Comparable working
+    // NOTE: enumeration values need to be sorted in ascending order of their version to keep Comparable working
 
     /**
      * Version which is older than any other version. This version exists purely for testing purposes.
      */
     @VisibleForTesting
-    TEST_PAST_VERSION(0),
+    TEST_PAST_VERSION(0) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            throw new UnsupportedOperationException();
+        }
+    },
+
+    /**
+     * ABI version shipped enabled {@code 2022.09 Chlorine SR2}. This version revises the serialization format of
+     * payloads proxies to reduce their size. Otherwise this format is equivalent to {@code #MAGNESIUM}.
+     *
+     * @deprecated Use {@link #POTASSIUM} instead.
+     */
+    @Deprecated(since = "8.0.0", forRemoval = true)
+    CHLORINE_SR2(9) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            return NormalizedNodeStreamVersion.MAGNESIUM;
+        }
+    },
 
     /**
-     * Initial ABI version, as shipped with Boron Simultaneous release.
+     * ABI version shipped enabled {@code 2023.09 Potassium}. This version removes Augmentation identifier and nodes.
+     * Otherwise this format is equivalent to {@link #CHLORINE_SR2}.
      */
-    // We seed the initial version to be the same as DataStoreVersions.BORON-VERSION for compatibility reasons.
-    BORON(5),
+    POTASSIUM(10) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            return NormalizedNodeStreamVersion.POTASSIUM;
+        }
+    },
 
     /**
      * Version which is newer than any other version. This version exists purely for testing purposes.
      */
     @VisibleForTesting
-    TEST_FUTURE_VERSION(65535);
+    TEST_FUTURE_VERSION(65535) {
+        @Override
+        public NormalizedNodeStreamVersion getStreamVersion() {
+            throw new UnsupportedOperationException();
+        }
+    };
 
     private final short value;
 
     PayloadVersion(final int intVersion) {
-        Preconditions.checkArgument(intVersion >= 0 && intVersion <= 65535);
+        checkArgument(intVersion >= 0 && intVersion <= 65535);
         value = (short) intVersion;
     }
 
@@ -70,15 +99,21 @@ public enum PayloadVersion implements WritableObject {
         return value;
     }
 
+    /**
+     * Return the NormalizedNode stream version corresponding to this particular ABI.
+     *
+     * @return Stream Version to use for this ABI version
+     */
+    public abstract @NonNull NormalizedNodeStreamVersion getStreamVersion();
+
     /**
      * Return the codebase-native persistence version. This version is the default version allocated to messages
      * at runtime. Conversion to previous versions may incur additional overhead (such as object allocation).
      *
      * @return Current {@link PayloadVersion}
      */
-    @Nonnull
-    public static PayloadVersion current() {
-        return BORON;
+    public static @NonNull PayloadVersion current() {
+        return POTASSIUM;
     }
 
     /**
@@ -90,20 +125,14 @@ public enum PayloadVersion implements WritableObject {
      * @throws FutureVersionException if the specified integer identifies a future version
      * @throws PastVersionException if the specified integer identifies a past version which is no longer supported
      */
-    @Nonnull
-    public static PayloadVersion valueOf(final short version) throws FutureVersionException, PastVersionException {
-        switch (Short.toUnsignedInt(version)) {
-            case 0:
-            case 1:
-            case 2:
-            case 3:
-            case 4:
-                throw new PastVersionException(version, BORON);
-            case 5:
-                return BORON;
-            default:
-                throw new FutureVersionException(version, BORON);
-        }
+    public static @NonNull PayloadVersion valueOf(final short version)
+            throws FutureVersionException, PastVersionException {
+        return switch (Short.toUnsignedInt(version)) {
+            case 0, 1, 2, 3, 4, 5, 6, 7, 8 -> throw new PastVersionException(version, CHLORINE_SR2);
+            case 9 -> CHLORINE_SR2;
+            case 10 -> POTASSIUM;
+            default -> throw new FutureVersionException(version, CHLORINE_SR2);
+        };
     }
 
     @Override
@@ -119,14 +148,12 @@ public enum PayloadVersion implements WritableObject {
      * @return An {@link PayloadVersion}
      * @throws IOException If read fails or an unsupported version is encountered
      */
-    @Nonnull
-    public static PayloadVersion readFrom(@Nonnull final DataInput in) throws IOException {
+    public static @NonNull PayloadVersion readFrom(final @NonNull DataInput in) throws IOException {
         final short s = in.readShort();
         try {
             return valueOf(s);
         } catch (FutureVersionException | PastVersionException e) {
-            throw new IOException("Unsupported version", e);
+            throw new IOException(e);
         }
     }
-
 }
index 8d9a8d217a67e201dda8c8f9ba9f705ac939557c..3608e7589fea0640616ba72039b98fc3624848aa 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
 import org.slf4j.Logger;
@@ -22,34 +21,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class PurgeLocalHistoryPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
-    private static final class Proxy extends AbstractProxy<LocalHistoryIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected LocalHistoryIdentifier readIdentifier(final DataInput in) throws IOException {
-            return LocalHistoryIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected PurgeLocalHistoryPayload createObject(final LocalHistoryIdentifier identifier,
-                final byte[] serialized) {
-            return new PurgeLocalHistoryPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(PurgeLocalHistoryPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(PH::new);
 
     PurgeLocalHistoryPayload(final LocalHistoryIdentifier historyId, final byte[] serialized) {
         super(historyId, serialized);
@@ -63,13 +38,18 @@ public final class PurgeLocalHistoryPayload extends AbstractIdentifiablePayload<
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", historyId, e);
-            throw new RuntimeException("Failed to serialize " + historyId, e);
+            throw new IllegalStateException("Failed to serialize " + historyId, e);
         }
         return new PurgeLocalHistoryPayload(historyId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected PH externalizableProxy(final byte[] serialized) {
+        return new PH(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
index ac849723e168b1b439c1a90dd4f4b9f87504a07d..e63fa3b72dbbe74ff6a9578b9485029486e763a4 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.io.ByteArrayDataOutput;
 import com.google.common.io.ByteStreams;
-import java.io.DataInput;
 import java.io.IOException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.slf4j.Logger;
@@ -21,34 +20,10 @@ import org.slf4j.LoggerFactory;
  * @author Robert Varga
  */
 public final class PurgeTransactionPayload extends AbstractIdentifiablePayload<TransactionIdentifier> {
-    private static final class Proxy extends AbstractProxy<TransactionIdentifier> {
-        private static final long serialVersionUID = 1L;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final byte[] serialized) {
-            super(serialized);
-        }
-
-        @Override
-        protected TransactionIdentifier readIdentifier(final DataInput in) throws IOException {
-            return TransactionIdentifier.readFrom(in);
-        }
-
-        @Override
-        protected PurgeTransactionPayload createObject(final TransactionIdentifier identifier,
-                final byte[] serialized) {
-            return new PurgeTransactionPayload(identifier, serialized);
-        }
-    }
-
     private static final Logger LOG = LoggerFactory.getLogger(PurgeTransactionPayload.class);
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(PT::new);
 
     PurgeTransactionPayload(final TransactionIdentifier transactionId, final byte[] serialized) {
         super(transactionId, serialized);
@@ -62,13 +37,18 @@ public final class PurgeTransactionPayload extends AbstractIdentifiablePayload<T
         } catch (IOException e) {
             // This should never happen
             LOG.error("Failed to serialize {}", transactionId, e);
-            throw new RuntimeException("Failed to serialize " + transactionId, e);
+            throw new IllegalStateException("Failed to serialize " + transactionId, e);
         }
         return new PurgeTransactionPayload(transactionId, out.toByteArray());
     }
 
     @Override
-    protected Proxy externalizableProxy(final byte[] serialized) {
-        return new Proxy(serialized);
+    protected PT externalizableProxy(final byte[] serialized) {
+        return new PT(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SM.java
new file mode 100644 (file)
index 0000000..dc39f5c
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+
+/**
+ * Serialization proxy for {@link ShardManagerSnapshot}.
+ */
+final class SM implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardManagerSnapshot snapshot;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SM() {
+        // For Externalizable
+    }
+
+    SM(final ShardManagerSnapshot snapshot) {
+        this.snapshot = requireNonNull(snapshot);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        final int size = in.readInt();
+        final var shardList = new ArrayList<String>(size);
+        for (int i = 0; i < size; i++) {
+            shardList.add((String) in.readObject());
+        }
+        snapshot = new ShardManagerSnapshot(shardList);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        final var shardList = snapshot.getShardList();
+        out.writeInt(shardList.size());
+        for (var shardName : shardList) {
+            out.writeObject(shardName);
+        }
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshot);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SS.java
new file mode 100644 (file)
index 0000000..f719e1b
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+
+/**
+ * Serialization proxy for {@link ShardSnapshotState}.
+ */
+final class SS implements Externalizable {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ShardSnapshotState snapshotState;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public SS() {
+        // For Externalizable
+    }
+
+    SS(final ShardSnapshotState snapshotState) {
+        this.snapshotState = requireNonNull(snapshotState);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException {
+        snapshotState = ShardDataTreeSnapshot.deserialize(in);
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        snapshotState.getSnapshot().serialize(out);
+    }
+
+    @java.io.Serial
+    private Object readResolve() {
+        return verifyNotNull(snapshotState);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/ST.java
new file mode 100644 (file)
index 0000000..ef082c7
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import java.io.IOException;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.AbstractIdentifiablePayload.SerialForm;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+
+/**
+ * Serialization proxy for {@link SkipTransactionsPayload}.
+ */
+final class ST implements SerialForm {
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+
+    private ImmutableUnsignedLongSet transactionIds;
+    private LocalHistoryIdentifier identifier;
+    private byte[] bytes;
+
+    @SuppressWarnings("checkstyle:RedundantModifier")
+    public ST() {
+        // For Externalizable
+    }
+
+    ST(final byte[] bytes) {
+        this.bytes = requireNonNull(bytes);
+    }
+
+    @Override
+    public byte[] bytes() {
+        return bytes;
+    }
+
+    @Override
+    public void readExternal(final byte[] newBytes) throws IOException {
+        bytes = requireNonNull(newBytes);
+
+        final var in = ByteStreams.newDataInput(newBytes);
+        identifier = LocalHistoryIdentifier.readFrom(in);
+        transactionIds = verifyNotNull(ImmutableUnsignedLongSet.readFrom(in));
+    }
+
+    @Override
+    public Object readResolve() {
+        return new SkipTransactionsPayload(identifier, bytes, transactionIds);
+    }
+}
index 7a8bd4648bad5ff0b49bc0bed74d6b47c8d55653..7ae9fa288679601f2c17c8708f668808fdfd8ab4 100644 (file)
@@ -12,6 +12,7 @@ import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 /**
@@ -25,8 +26,8 @@ public abstract class ShardDataTreeSnapshot {
         // Hidden to prevent subclassing from outside of this package
     }
 
-    public static ShardDataTreeSnapshot deserialize(final ObjectInput in) throws IOException {
-        final ShardDataTreeSnapshot ret = AbstractVersionedShardDataTreeSnapshot.versionedDeserialize(in);
+    public static @NonNull ShardSnapshotState deserialize(final ObjectInput in) throws IOException {
+        final ShardSnapshotState ret = AbstractVersionedShardDataTreeSnapshot.versionedDeserialize(in);
 
         // Make sure we consume all bytes, otherwise something went very wrong
         final int bytesLeft = in.available();
@@ -34,7 +35,6 @@ public abstract class ShardDataTreeSnapshot {
             throw new IOException("Deserialization left " + bytesLeft + " in the buffer");
         }
 
-
         return ret;
     }
 
@@ -43,7 +43,7 @@ public abstract class ShardDataTreeSnapshot {
      *
      * @return An optional root node.
      */
-    public abstract Optional<NormalizedNode<?, ?>> getRootNode();
+    public abstract Optional<NormalizedNode> getRootNode();
 
     public abstract void serialize(ObjectOutput out) throws IOException;
 }
index 3ba5a91a9cd63e70333b7216966ab9e6a8fb2edb..98302ee503710f9a95cf75b31f69bea6dc5ceefd 100644 (file)
@@ -7,10 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
 import java.io.Externalizable;
 import java.io.Serializable;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Base class for various bits of metadata attached to a {@link MetadataShardDataTreeSnapshot}. This class is not
@@ -38,7 +39,7 @@ public abstract class ShardDataTreeSnapshotMetadata<T extends ShardDataTreeSnaps
     }
 
     final Object writeReplace() {
-        return Verify.verifyNotNull(externalizableProxy(), "Null externalizable proxy from %s", getClass());
+        return verifyNotNull(externalizableProxy(), "Null externalizable proxy from %s", getClass());
     }
 
     /**
@@ -46,8 +47,7 @@ public abstract class ShardDataTreeSnapshotMetadata<T extends ShardDataTreeSnaps
      *
      * @return Externalizable proxy, may not be null
      */
-    @Nonnull
-    protected abstract Externalizable externalizableProxy();
+    protected abstract @NonNull Externalizable externalizableProxy();
 
     public abstract Class<T> getType();
 }
index 0c1969b216bf550ebd17cf6d163ea119f3bc701c..86d293528a9dab2bdecf7c2b30800dd707b271a7 100644 (file)
@@ -8,96 +8,32 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
 import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.eclipse.jdt.annotation.NonNull;
 
 /**
  * Represents the persisted snapshot state for the ShardManager.
  *
  * @author Thomas Pantelis
  */
-public class ShardManagerSnapshot implements Serializable {
+public final class ShardManagerSnapshot implements Serializable {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ShardManagerSnapshot snapshot;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ShardManagerSnapshot snapshot) {
-            this.snapshot = snapshot;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeInt(snapshot.shardList.size());
-            for (String shard: snapshot.shardList) {
-                out.writeObject(shard);
-            }
-
-            out.writeInt(snapshot.prefixShardConfiguration.size());
-            for (Map.Entry<?, ?> prefixShardConfigEntry : snapshot.prefixShardConfiguration.entrySet()) {
-                out.writeObject(prefixShardConfigEntry.getKey());
-                out.writeObject(prefixShardConfigEntry.getValue());
-            }
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            int size = in.readInt();
-            List<String> localShardList = new ArrayList<>(size);
-            for (int i = 0; i < size; i++) {
-                localShardList.add((String) in.readObject());
-            }
-
-            size = in.readInt();
-            Map<DOMDataTreeIdentifier, PrefixShardConfiguration> localPrefixShardConfiguration = new HashMap<>(size);
-            for (int i = 0; i < size; i++) {
-                localPrefixShardConfiguration.put((DOMDataTreeIdentifier) in.readObject(),
-                        (PrefixShardConfiguration) in.readObject());
-            }
-
-            snapshot = new ShardManagerSnapshot(localShardList, localPrefixShardConfiguration);
-        }
-
-        private Object readResolve() {
-            return snapshot;
-        }
-    }
-
     private final List<String> shardList;
-    private final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixShardConfiguration;
 
-    public ShardManagerSnapshot(@Nonnull final List<String> shardList,
-                                final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> prefixShardConfiguration) {
+    public ShardManagerSnapshot(final @NonNull List<String> shardList) {
         this.shardList = ImmutableList.copyOf(shardList);
-        this.prefixShardConfiguration = ImmutableMap.copyOf(prefixShardConfiguration);
     }
 
     public List<String> getShardList() {
-        return this.shardList;
+        return shardList;
     }
 
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new SM(this);
     }
 
     @Override
index 8f22c0ec2dc2b2fe5bed0033b2bd18657263c27d..c06c5cf3189d404d38271ef094eccb4204131481 100644 (file)
@@ -7,13 +7,11 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.VisibleForTesting;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Externalizable;
-import java.io.IOException;
-import java.io.ObjectInput;
-import java.io.ObjectOutput;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 
 /**
@@ -21,55 +19,37 @@ import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
  *
  * @author Thomas Pantelis
  */
-public class ShardSnapshotState implements Snapshot.State {
+public final class ShardSnapshotState implements Snapshot.State {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    private static final class Proxy implements Externalizable {
-        private static final long serialVersionUID = 1L;
-
-        private ShardSnapshotState snapshotState;
-
-        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
-        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
-        @SuppressWarnings("checkstyle:RedundantModifier")
-        public Proxy() {
-            // For Externalizable
-        }
-
-        Proxy(final ShardSnapshotState snapshotState) {
-            this.snapshotState = snapshotState;
-        }
-
-        @Override
-        public void writeExternal(final ObjectOutput out) throws IOException {
-            snapshotState.snapshot.serialize(out);
-        }
-
-        @Override
-        public void readExternal(final ObjectInput in) throws IOException {
-            snapshotState = new ShardSnapshotState(ShardDataTreeSnapshot.deserialize(in));
-        }
-
-        private Object readResolve() {
-            return snapshotState;
-        }
-    }
-
     @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
             + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
             + "aren't serialized. FindBugs does not recognize this.")
-    private final ShardDataTreeSnapshot snapshot;
+    private final @NonNull ShardDataTreeSnapshot snapshot;
+    private final boolean migrated;
+
+    @VisibleForTesting
+    public ShardSnapshotState(final @NonNull ShardDataTreeSnapshot snapshot, final boolean migrated) {
+        this.snapshot = requireNonNull(snapshot);
+        this.migrated = migrated;
+    }
 
-    public ShardSnapshotState(@Nonnull final ShardDataTreeSnapshot snapshot) {
-        this.snapshot = Preconditions.checkNotNull(snapshot);
+    public ShardSnapshotState(final @NonNull ShardDataTreeSnapshot snapshot) {
+        this(snapshot, false);
     }
 
-    @Nonnull
-    public ShardDataTreeSnapshot getSnapshot() {
+    public @NonNull ShardDataTreeSnapshot getSnapshot() {
         return snapshot;
     }
 
+    @Override
+    public boolean needsMigration() {
+        return migrated;
+    }
+
+    @java.io.Serial
     private Object writeReplace() {
-        return new Proxy(this);
+        return new SS(this);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayload.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayload.java
new file mode 100644 (file)
index 0000000..a8fb52c
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.io.ByteStreams;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.IOException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Payload persisted when a local history is instructed some transaction identifiers, i.e. the frontend has used them
+ * for other purposes. It contains a {@link LocalHistoryIdentifier} and a list of transaction identifiers within that
+ * local history.
+ */
+public final class SkipTransactionsPayload extends AbstractIdentifiablePayload<LocalHistoryIdentifier> {
+    private static final Logger LOG = LoggerFactory.getLogger(SkipTransactionsPayload.class);
+    @java.io.Serial
+    private static final long serialVersionUID = 1L;
+    private static final int PROXY_SIZE = externalizableProxySize(ST::new);
+
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Handled via externalizable proxy")
+    private final @NonNull ImmutableUnsignedLongSet transactionIds;
+
+    SkipTransactionsPayload(final @NonNull LocalHistoryIdentifier historyId,
+            final byte @NonNull [] serialized, final ImmutableUnsignedLongSet transactionIds) {
+        super(historyId, serialized);
+        this.transactionIds = requireNonNull(transactionIds);
+    }
+
+    public static @NonNull SkipTransactionsPayload create(final LocalHistoryIdentifier historyId,
+            final ImmutableUnsignedLongSet transactionIds, final int initialSerializedBufferCapacity) {
+        final var out = ByteStreams.newDataOutput(initialSerializedBufferCapacity);
+        try {
+            historyId.writeTo(out);
+            transactionIds.writeTo(out);
+        } catch (IOException e) {
+            // This should never happen
+            LOG.error("Failed to serialize {} ids {}", historyId, transactionIds, e);
+            throw new IllegalStateException("Failed to serialize " + historyId + " ids " + transactionIds, e);
+        }
+
+        return new SkipTransactionsPayload(historyId, out.toByteArray(), transactionIds);
+    }
+
+    public @NonNull ImmutableUnsignedLongSet getTransactionIds() {
+        return transactionIds;
+    }
+
+    @Override
+    protected ST externalizableProxy(final byte[] serialized) {
+        return new ST(serialized);
+    }
+
+    @Override
+    protected int externalizableProxySize() {
+        return PROXY_SIZE;
+    }
+}
index eb6ebb3d6220164290b24c8dbbe7d4d6e58869f6..560b5344b0052bd09e455b396780cc5579211097 100644 (file)
@@ -7,22 +7,25 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Props;
-import com.google.common.base.Preconditions;
-import java.util.concurrent.CountDownLatch;
+import com.google.common.util.concurrent.SettableFuture;
 import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
+import org.opendaylight.yangtools.yang.common.Empty;
 
 public abstract class AbstractShardManagerCreator<T extends AbstractShardManagerCreator<T>> {
+    private SettableFuture<Empty> readinessFuture;
     private ClusterWrapper cluster;
     private Configuration configuration;
     private DatastoreContextFactory datastoreContextFactory;
     private AbstractDataStore distributedDataStore;
-    private CountDownLatch waitTillReadyCountDownLatch;
     private PrimaryShardInfoFutureCache primaryShardInfoCache;
     private DatastoreSnapshot restoreFromSnapshot;
     private volatile boolean sealed;
@@ -37,14 +40,14 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
     }
 
     protected final void checkSealed() {
-        Preconditions.checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
+        checkState(!sealed, "Builder is already sealed - further modifications are not allowed");
     }
 
     ClusterWrapper getCluster() {
         return cluster;
     }
 
-    public T cluster(ClusterWrapper newCluster) {
+    public T cluster(final ClusterWrapper newCluster) {
         checkSealed();
         this.cluster = newCluster;
         return self();
@@ -54,7 +57,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return configuration;
     }
 
-    public T configuration(Configuration newConfiguration) {
+    public T configuration(final Configuration newConfiguration) {
         checkSealed();
         this.configuration = newConfiguration;
         return self();
@@ -66,7 +69,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
 
     public T datastoreContextFactory(final DatastoreContextFactory newDatastoreContextFactory) {
         checkSealed();
-        this.datastoreContextFactory = Preconditions.checkNotNull(newDatastoreContextFactory);
+        this.datastoreContextFactory = requireNonNull(newDatastoreContextFactory);
         return self();
     }
 
@@ -80,13 +83,13 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return self();
     }
 
-    CountDownLatch getWaitTillReadyCountDownLatch() {
-        return waitTillReadyCountDownLatch;
+    SettableFuture<Empty> getReadinessFuture() {
+        return readinessFuture;
     }
 
-    public T waitTillReadyCountDownLatch(CountDownLatch newWaitTillReadyCountDownLatch) {
+    public T readinessFuture(final SettableFuture<Empty> newReadinessFuture) {
         checkSealed();
-        this.waitTillReadyCountDownLatch = newWaitTillReadyCountDownLatch;
+        this.readinessFuture = newReadinessFuture;
         return self();
     }
 
@@ -94,7 +97,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return primaryShardInfoCache;
     }
 
-    public T primaryShardInfoCache(PrimaryShardInfoFutureCache newPrimaryShardInfoCache) {
+    public T primaryShardInfoCache(final PrimaryShardInfoFutureCache newPrimaryShardInfoCache) {
         checkSealed();
         this.primaryShardInfoCache = newPrimaryShardInfoCache;
         return self();
@@ -104,7 +107,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return restoreFromSnapshot;
     }
 
-    public T restoreFromSnapshot(DatastoreSnapshot newRestoreFromSnapshot) {
+    public T restoreFromSnapshot(final DatastoreSnapshot newRestoreFromSnapshot) {
         checkSealed();
         this.restoreFromSnapshot = newRestoreFromSnapshot;
         return self();
@@ -112,12 +115,12 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
 
     protected void verify() {
         sealed = true;
-        Preconditions.checkNotNull(cluster, "cluster should not be null");
-        Preconditions.checkNotNull(configuration, "configuration should not be null");
-        Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
-        Preconditions.checkNotNull(distributedDataStore, "distributedDataStore should not be null");
-        Preconditions.checkNotNull(waitTillReadyCountDownLatch, "waitTillReadyCountdownLatch should not be null");
-        Preconditions.checkNotNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
+        requireNonNull(cluster, "cluster should not be null");
+        requireNonNull(configuration, "configuration should not be null");
+        requireNonNull(datastoreContextFactory, "datastoreContextFactory should not be null");
+        requireNonNull(distributedDataStore, "distributedDataStore should not be null");
+        requireNonNull(readinessFuture, "readinessFuture should not be null");
+        requireNonNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
     }
 
     public Props props() {
index 6a4e9824405c57d72b839f757d6caa2bc1ad7c5a..6a8e392b96891bb0b1bbfeef127b20ac24060f33 100644 (file)
@@ -7,16 +7,17 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
-import com.google.common.base.Verify;
+import static com.google.common.base.Verify.verifyNotNull;
+
 import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
-final class AtomicShardContextProvider extends AtomicReference<SchemaContext> implements SchemaContextProvider {
+final class AtomicShardContextProvider extends AtomicReference<EffectiveModelContext> {
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
-    @Override
-    public SchemaContext getSchemaContext() {
-        return Verify.verifyNotNull(get());
+    @NonNull EffectiveModelContext modelContext() {
+        return verifyNotNull(get());
     }
 }
\ No newline at end of file
index ef7e4b0cfa7ae396abee28c0c7143665eff5bb70..f5c94413c7840cb1247e9f36cb3d59358334beae 100644 (file)
@@ -7,10 +7,12 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.serialization.Serialization;
-import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Strings;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -18,23 +20,21 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
 import java.util.Set;
-import javax.annotation.Nullable;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardInitialized;
 import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManager.OnShardReady;
 import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-final class ShardInformation {
+@VisibleForTesting
+public final class ShardInformation {
     private static final Logger LOG = LoggerFactory.getLogger(ShardInformation.class);
 
     private final Set<OnShardInitialized> onShardInitializedSet = new HashSet<>();
@@ -49,7 +49,7 @@ final class ShardInformation {
     private final AtomicShardContextProvider schemaContextProvider = new AtomicShardContextProvider();
     private ActorRef actor;
 
-    private Optional<DataTree> localShardDataTree;
+    private Optional<ReadOnlyDataTree> localShardDataTree;
     private boolean leaderAvailable = false;
 
     // flag that determines if the actor is ready for business
@@ -77,9 +77,8 @@ final class ShardInformation {
     }
 
     Props newProps() {
-        Preconditions.checkNotNull(builder);
-        Props props = builder.id(shardId).peerAddresses(initialPeerAddresses).datastoreContext(datastoreContext)
-                .schemaContextProvider(schemaContextProvider).props();
+        Props props = requireNonNull(builder).id(shardId).peerAddresses(initialPeerAddresses)
+                .datastoreContext(datastoreContext).schemaContextProvider(schemaContextProvider::modelContext).props();
         builder = null;
         return props;
     }
@@ -88,8 +87,8 @@ final class ShardInformation {
         return shardName;
     }
 
-    @Nullable
-    ActorRef getActor() {
+    @VisibleForTesting
+    @Nullable public ActorRef getActor() {
         return actor;
     }
 
@@ -101,11 +100,11 @@ final class ShardInformation {
         return shardId;
     }
 
-    void setLocalDataTree(final Optional<DataTree> dataTree) {
-        this.localShardDataTree = dataTree;
+    void setLocalDataTree(final ReadOnlyDataTree dataTree) {
+        localShardDataTree = Optional.ofNullable(dataTree);
     }
 
-    Optional<DataTree> getLocalShardDataTree() {
+    Optional<ReadOnlyDataTree> getLocalShardDataTree() {
         return localShardDataTree;
     }
 
@@ -114,10 +113,10 @@ final class ShardInformation {
     }
 
     void setDatastoreContext(final DatastoreContext newDatastoreContext, final ActorRef sender) {
-        this.datastoreContext = newDatastoreContext;
+        datastoreContext = newDatastoreContext;
         if (actor != null) {
             LOG.debug("Sending new DatastoreContext to {}", shardId);
-            actor.tell(this.datastoreContext, sender);
+            actor.tell(datastoreContext, sender);
         }
     }
 
@@ -134,18 +133,6 @@ final class ShardInformation {
         notifyOnShardInitializedCallbacks();
     }
 
-    void peerDown(final MemberName memberName, final String peerId, final ActorRef sender) {
-        if (actor != null) {
-            actor.tell(new PeerDown(memberName, peerId), sender);
-        }
-    }
-
-    void peerUp(final MemberName memberName, final String peerId, final ActorRef sender) {
-        if (actor != null) {
-            actor.tell(new PeerUp(memberName, peerId), sender);
-        }
-    }
-
     boolean isShardReady() {
         return !RaftState.Candidate.name().equals(role) && !Strings.isNullOrEmpty(role);
     }
@@ -165,17 +152,13 @@ final class ShardInformation {
     }
 
     String getSerializedLeaderActor() {
-        if (isLeader()) {
-            return Serialization.serializedActorPath(getActor());
-        } else {
-            return addressResolver.resolve(leaderId);
-        }
+        return isLeader() ? Serialization.serializedActorPath(getActor()) : addressResolver.resolve(leaderId);
     }
 
     void setActorInitialized() {
         LOG.debug("Shard {} is initialized", shardId);
 
-        this.actorInitialized = true;
+        actorInitialized = true;
 
         notifyOnShardInitializedCallbacks();
     }
@@ -185,10 +168,10 @@ final class ShardInformation {
             return;
         }
 
-        boolean ready = isShardReadyWithLeaderId();
-
-        LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId,
-            ready ? "ready" : "initialized", onShardInitializedSet.size());
+        final boolean ready = isShardReadyWithLeaderId();
+        final String readyStr = ready ? "ready" : "initialized";
+        LOG.debug("Shard {} is {} - notifying {} OnShardInitialized callbacks", shardId, readyStr,
+            onShardInitializedSet.size());
 
         Iterator<OnShardInitialized> iter = onShardInitializedSet.iterator();
         while (iter.hasNext()) {
@@ -210,7 +193,7 @@ final class ShardInformation {
     }
 
     void setRole(final String newRole) {
-        this.role = newRole;
+        role = newRole;
 
         notifyOnShardInitializedCallbacks();
     }
@@ -220,13 +203,13 @@ final class ShardInformation {
     }
 
     void setFollowerSyncStatus(final boolean syncStatus) {
-        this.followerSyncStatus = syncStatus;
+        followerSyncStatus = syncStatus;
     }
 
     boolean isInSync() {
-        if (RaftState.Follower.name().equals(this.role)) {
+        if (RaftState.Follower.name().equals(role)) {
             return followerSyncStatus;
-        } else if (RaftState.Leader.name().equals(this.role)) {
+        } else if (RaftState.Leader.name().equals(role)) {
             return true;
         }
 
@@ -234,10 +217,10 @@ final class ShardInformation {
     }
 
     boolean setLeaderId(final String newLeaderId) {
-        final boolean changed = !Objects.equals(this.leaderId, newLeaderId);
-        this.leaderId = newLeaderId;
+        final boolean changed = !Objects.equals(leaderId, newLeaderId);
+        leaderId = newLeaderId;
         if (newLeaderId != null) {
-            this.leaderAvailable = true;
+            leaderAvailable = true;
         }
         notifyOnShardInitializedCallbacks();
 
@@ -269,15 +252,20 @@ final class ShardInformation {
     }
 
     void setActiveMember(final boolean isActiveMember) {
-        this.activeMember = isActiveMember;
+        activeMember = isActiveMember;
+    }
+
+    EffectiveModelContext getSchemaContext() {
+        return schemaContextProvider.modelContext();
     }
 
-    SchemaContext getSchemaContext() {
-        return schemaContextProvider.getSchemaContext();
+    void setSchemaContext(final EffectiveModelContext schemaContext) {
+        schemaContextProvider.set(requireNonNull(schemaContext));
     }
 
-    void setSchemaContext(final SchemaContext schemaContext) {
-        schemaContextProvider.set(Preconditions.checkNotNull(schemaContext));
+    @VisibleForTesting
+    Shard.AbstractBuilder<?, ?> getBuilder() {
+        return builder;
     }
 
     @Override
index 56bdd7f8ea2cb6329dbdfe9cf79f38759f0375d0..adc686723bd67fc602af2c19005fb5a44358284f 100644 (file)
@@ -5,10 +5,9 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
-import static akka.pattern.Patterns.ask;
+import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import akka.actor.Address;
@@ -34,17 +33,16 @@ import akka.persistence.SnapshotOffer;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.SettableFuture;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.function.Consumer;
@@ -52,22 +50,18 @@ import java.util.function.Supplier;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
 import org.opendaylight.controller.cluster.datastore.exceptions.AlreadyExistsException;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.ActorInitialized;
-import org.opendaylight.controller.cluster.datastore.messages.AddPrefixShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
 import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
@@ -81,13 +75,11 @@ import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.RemoteFindPrimary;
 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
-import org.opendaylight.controller.cluster.datastore.messages.RemovePrefixShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
 import org.opendaylight.controller.cluster.datastore.messages.ShardLeaderStateChanged;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.datastore.utils.CompositeOnComplete;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
@@ -108,16 +100,9 @@ import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.sharding.PrefixedShardConfigUpdateHandler;
-import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.ExecutionContext;
@@ -139,7 +124,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     // Stores a mapping between a shard name and it's corresponding information
     // Shard names look like inventory, topology etc and are as specified in
     // configuration
-    private final Map<String, ShardInformation> localShards = new HashMap<>();
+    @VisibleForTesting
+    final Map<String, ShardInformation> localShards = new HashMap<>();
 
     // The type of a ShardManager reflects the type of the datastore itself
     // A data store could be of type config/operational
@@ -149,19 +135,21 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private final Configuration configuration;
 
-    private final String shardDispatcherPath;
+    @VisibleForTesting
+    final String shardDispatcherPath;
 
     private final ShardManagerInfo shardManagerMBean;
 
     private DatastoreContextFactory datastoreContextFactory;
 
-    private final CountDownLatch waitTillReadyCountdownLatch;
+    private final SettableFuture<Empty> readinessFuture;
 
     private final PrimaryShardInfoFutureCache primaryShardInfoCache;
 
-    private final ShardPeerAddressResolver peerAddressResolver;
+    @VisibleForTesting
+    final ShardPeerAddressResolver peerAddressResolver;
 
-    private SchemaContext schemaContext;
+    private EffectiveModelContext modelContext;
 
     private DatastoreSnapshot restoreFromSnapshot;
 
@@ -174,21 +162,18 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     private final Set<Consumer<String>> shardAvailabilityCallbacks = new HashSet<>();
 
     private final String persistenceId;
-    private final AbstractDataStore dataStore;
-
-    private ListenerRegistration<DOMDataTreeChangeListener> configListenerReg = null;
-    private PrefixedShardConfigUpdateHandler configUpdateHandler;
 
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR", justification = "Akka class design")
     ShardManager(final AbstractShardManagerCreator<?> builder) {
-        this.cluster = builder.getCluster();
-        this.configuration = builder.getConfiguration();
-        this.datastoreContextFactory = builder.getDatastoreContextFactory();
-        this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
-        this.shardDispatcherPath =
-                new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
-        this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
-        this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
-        this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
+        cluster = builder.getCluster();
+        configuration = builder.getConfiguration();
+        datastoreContextFactory = builder.getDatastoreContextFactory();
+        type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
+        shardDispatcherPath = new Dispatchers(context().system().dispatchers())
+            .getDispatcherPath(Dispatchers.DispatcherType.Shard);
+        readinessFuture = builder.getReadinessFuture();
+        primaryShardInfoCache = builder.getPrimaryShardInfoCache();
+        restoreFromSnapshot = builder.getRestoreFromSnapshot();
 
         String possiblePersistenceId = datastoreContextFactory.getBaseDatastoreContext().getShardManagerPersistenceId();
         persistenceId = possiblePersistenceId != null ? possiblePersistenceId : "shard-manager-" + type;
@@ -199,11 +184,9 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         cluster.subscribeToMemberEvents(getSelf());
 
         shardManagerMBean = new ShardManagerInfo(getSelf(), cluster.getCurrentMemberName(),
-                "shard-manager-" + this.type,
+                "shard-manager-" + type,
                 datastoreContextFactory.getBaseDatastoreContext().getDataStoreMXBeanType());
         shardManagerMBean.registerMBean();
-
-        dataStore = builder.getDistributedDataStore();
     }
 
     @Override
@@ -216,110 +199,90 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         LOG.info("Stopping ShardManager {}", persistenceId());
 
         shardManagerMBean.unregisterMBean();
-
-        if (configListenerReg != null) {
-            configListenerReg.close();
-            configListenerReg = null;
-        }
     }
 
     @Override
     public void handleCommand(final Object message) throws Exception {
-        if (message  instanceof FindPrimary) {
-            findPrimary((FindPrimary)message);
-        } else if (message instanceof FindLocalShard) {
-            findLocalShard((FindLocalShard) message);
-        } else if (message instanceof UpdateSchemaContext) {
-            updateSchemaContext(message);
-        } else if (message instanceof ActorInitialized) {
-            onActorInitialized(message);
-        } else if (message instanceof ClusterEvent.MemberUp) {
-            memberUp((ClusterEvent.MemberUp) message);
-        } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
-            memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
-        } else if (message instanceof ClusterEvent.MemberExited) {
-            memberExited((ClusterEvent.MemberExited) message);
-        } else if (message instanceof ClusterEvent.MemberRemoved) {
-            memberRemoved((ClusterEvent.MemberRemoved) message);
-        } else if (message instanceof ClusterEvent.UnreachableMember) {
-            memberUnreachable((ClusterEvent.UnreachableMember) message);
-        } else if (message instanceof ClusterEvent.ReachableMember) {
-            memberReachable((ClusterEvent.ReachableMember) message);
-        } else if (message instanceof DatastoreContextFactory) {
-            onDatastoreContextFactory((DatastoreContextFactory) message);
-        } else if (message instanceof RoleChangeNotification) {
-            onRoleChangeNotification((RoleChangeNotification) message);
-        } else if (message instanceof FollowerInitialSyncUpStatus) {
-            onFollowerInitialSyncStatus((FollowerInitialSyncUpStatus) message);
-        } else if (message instanceof ShardNotInitializedTimeout) {
-            onShardNotInitializedTimeout((ShardNotInitializedTimeout) message);
-        } else if (message instanceof ShardLeaderStateChanged) {
-            onLeaderStateChanged((ShardLeaderStateChanged) message);
-        } else if (message instanceof SwitchShardBehavior) {
-            onSwitchShardBehavior((SwitchShardBehavior) message);
-        } else if (message instanceof CreateShard) {
-            onCreateShard((CreateShard)message);
-        } else if (message instanceof AddShardReplica) {
-            onAddShardReplica((AddShardReplica) message);
-        } else if (message instanceof AddPrefixShardReplica) {
-            onAddPrefixShardReplica((AddPrefixShardReplica) message);
-        } else if (message instanceof PrefixShardCreated) {
-            onPrefixShardCreated((PrefixShardCreated) message);
-        } else if (message instanceof PrefixShardRemoved) {
-            onPrefixShardRemoved((PrefixShardRemoved) message);
-        } else if (message instanceof InitConfigListener) {
-            onInitConfigListener();
-        } else if (message instanceof ForwardedAddServerReply) {
-            ForwardedAddServerReply msg = (ForwardedAddServerReply)message;
-            onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath,
-                    msg.removeShardOnFailure);
-        } else if (message instanceof ForwardedAddServerFailure) {
-            ForwardedAddServerFailure msg = (ForwardedAddServerFailure)message;
+        if (message instanceof FindPrimary msg) {
+            findPrimary(msg);
+        } else if (message instanceof FindLocalShard msg) {
+            findLocalShard(msg);
+        } else if (message instanceof UpdateSchemaContext msg) {
+            updateSchemaContext(msg);
+        } else if (message instanceof ActorInitialized msg) {
+            onActorInitialized(msg);
+        } else if (message instanceof ClusterEvent.MemberUp msg) {
+            memberUp(msg);
+        } else if (message instanceof ClusterEvent.MemberWeaklyUp msg) {
+            memberWeaklyUp(msg);
+        } else if (message instanceof ClusterEvent.MemberExited msg) {
+            memberExited(msg);
+        } else if (message instanceof ClusterEvent.MemberRemoved msg) {
+            memberRemoved(msg);
+        } else if (message instanceof ClusterEvent.UnreachableMember msg) {
+            memberUnreachable(msg);
+        } else if (message instanceof ClusterEvent.ReachableMember msg) {
+            memberReachable(msg);
+        } else if (message instanceof DatastoreContextFactory msg) {
+            onDatastoreContextFactory(msg);
+        } else if (message instanceof RoleChangeNotification msg) {
+            onRoleChangeNotification(msg);
+        } else if (message instanceof FollowerInitialSyncUpStatus msg) {
+            onFollowerInitialSyncStatus(msg);
+        } else if (message instanceof ShardNotInitializedTimeout msg) {
+            onShardNotInitializedTimeout(msg);
+        } else if (message instanceof ShardLeaderStateChanged msg) {
+            onLeaderStateChanged(msg);
+        } else if (message instanceof SwitchShardBehavior msg) {
+            onSwitchShardBehavior(msg);
+        } else if (message instanceof CreateShard msg) {
+            onCreateShard(msg);
+        } else if (message instanceof AddShardReplica msg) {
+            onAddShardReplica(msg);
+        } else if (message instanceof ForwardedAddServerReply msg) {
+            onAddServerReply(msg.shardInfo, msg.addServerReply, getSender(), msg.leaderPath, msg.removeShardOnFailure);
+        } else if (message instanceof ForwardedAddServerFailure msg) {
             onAddServerFailure(msg.shardName, msg.failureMessage, msg.failure, getSender(), msg.removeShardOnFailure);
-        } else if (message instanceof RemoveShardReplica) {
-            onRemoveShardReplica((RemoveShardReplica) message);
-        } else if (message instanceof RemovePrefixShardReplica) {
-            onRemovePrefixShardReplica((RemovePrefixShardReplica) message);
-        } else if (message instanceof WrappedShardResponse) {
-            onWrappedShardResponse((WrappedShardResponse) message);
-        } else if (message instanceof GetSnapshot) {
-            onGetSnapshot();
-        } else if (message instanceof ServerRemoved) {
-            onShardReplicaRemoved((ServerRemoved) message);
-        } else if (message instanceof ChangeShardMembersVotingStatus) {
-            onChangeShardServersVotingStatus((ChangeShardMembersVotingStatus) message);
-        } else if (message instanceof FlipShardMembersVotingStatus) {
-            onFlipShardMembersVotingStatus((FlipShardMembersVotingStatus) message);
-        } else if (message instanceof SaveSnapshotSuccess) {
-            onSaveSnapshotSuccess((SaveSnapshotSuccess) message);
-        } else if (message instanceof SaveSnapshotFailure) {
-            LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(),
-                    ((SaveSnapshotFailure) message).cause());
+        } else if (message instanceof RemoveShardReplica msg) {
+            onRemoveShardReplica(msg);
+        } else if (message instanceof WrappedShardResponse msg) {
+            onWrappedShardResponse(msg);
+        } else if (message instanceof GetSnapshot msg) {
+            onGetSnapshot(msg);
+        } else if (message instanceof ServerRemoved msg) {
+            onShardReplicaRemoved(msg);
+        } else if (message instanceof ChangeShardMembersVotingStatus msg) {
+            onChangeShardServersVotingStatus(msg);
+        } else if (message instanceof FlipShardMembersVotingStatus msg) {
+            onFlipShardMembersVotingStatus(msg);
+        } else if (message instanceof SaveSnapshotSuccess msg) {
+            onSaveSnapshotSuccess(msg);
+        } else if (message instanceof SaveSnapshotFailure msg) {
+            LOG.error("{}: SaveSnapshotFailure received for saving snapshot of shards", persistenceId(), msg.cause());
         } else if (message instanceof Shutdown) {
             onShutDown();
         } else if (message instanceof GetLocalShardIds) {
             onGetLocalShardIds();
-        } else if (message instanceof GetShardRole) {
-            onGetShardRole((GetShardRole) message);
-        } else if (message instanceof RunnableMessage) {
-            ((RunnableMessage)message).run();
-        } else if (message instanceof RegisterForShardAvailabilityChanges) {
-            onRegisterForShardAvailabilityChanges((RegisterForShardAvailabilityChanges)message);
-        } else if (message instanceof DeleteSnapshotsFailure) {
-            LOG.warn("{}: Failed to delete prior snapshots", persistenceId(),
-                    ((DeleteSnapshotsFailure) message).cause());
+        } else if (message instanceof GetShardRole msg) {
+            onGetShardRole(msg);
+        } else if (message instanceof RunnableMessage msg) {
+            msg.run();
+        } else if (message instanceof RegisterForShardAvailabilityChanges msg) {
+            onRegisterForShardAvailabilityChanges(msg);
+        } else if (message instanceof DeleteSnapshotsFailure msg) {
+            LOG.warn("{}: Failed to delete prior snapshots", persistenceId(), msg.cause());
         } else if (message instanceof DeleteSnapshotsSuccess) {
             LOG.debug("{}: Successfully deleted prior snapshots", persistenceId());
         } else if (message instanceof RegisterRoleChangeListenerReply) {
             LOG.trace("{}: Received RegisterRoleChangeListenerReply", persistenceId());
-        } else if (message instanceof ClusterEvent.MemberEvent) {
-            LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), message);
+        } else if (message instanceof ClusterEvent.MemberEvent msg) {
+            LOG.trace("{}: Received other ClusterEvent.MemberEvent: {}", persistenceId(), msg);
         } else {
             unknownMessage(message);
         }
     }
 
-    private void onRegisterForShardAvailabilityChanges(RegisterForShardAvailabilityChanges message) {
+    private void onRegisterForShardAvailabilityChanges(final RegisterForShardAvailabilityChanges message) {
         LOG.debug("{}: onRegisterForShardAvailabilityChanges: {}", persistenceId(), message);
 
         final Consumer<String> callback = message.getCallback();
@@ -346,22 +309,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         getSender().tell(new GetShardRoleReply(shardInformation.getRole()), ActorRef.noSender());
     }
 
-    private void onInitConfigListener() {
-        LOG.debug("{}: Initializing config listener on {}", persistenceId(), cluster.getCurrentMemberName());
-
-        final org.opendaylight.mdsal.common.api.LogicalDatastoreType datastoreType =
-                org.opendaylight.mdsal.common.api.LogicalDatastoreType
-                        .valueOf(datastoreContextFactory.getBaseDatastoreContext().getLogicalStoreType().name());
-
-        if (configUpdateHandler != null) {
-            configUpdateHandler.close();
-        }
-
-        configUpdateHandler = new PrefixedShardConfigUpdateHandler(self(), cluster.getCurrentMemberName());
-        configUpdateHandler.initListener(dataStore, datastoreType);
-    }
-
-    private void onShutDown() {
+    void onShutDown() {
         List<Future<Boolean>> stopFutures = new ArrayList<>(localShards.size());
         for (ShardInformation info : localShards.values()) {
             if (info.getActor() != null) {
@@ -430,47 +378,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
     }
 
-    private void removePrefixShardReplica(final RemovePrefixShardReplica contextMessage, final String shardName,
-                                          final String primaryPath, final ActorRef sender) {
-        if (isShardReplicaOperationInProgress(shardName, sender)) {
-            return;
-        }
-
-        shardReplicaOperationsInProgress.add(shardName);
-
-        final ShardIdentifier shardId = getShardIdentifier(contextMessage.getMemberName(), shardName);
-
-        final DatastoreContext datastoreContext = newShardDatastoreContextBuilder(shardName).build();
-
-        //inform ShardLeader to remove this shard as a replica by sending an RemoveServer message
-        LOG.debug("{}: Sending RemoveServer message to peer {} for shard {}", persistenceId(),
-                primaryPath, shardId);
-
-        Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
-        Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
-                new RemoveServer(shardId.toString()), removeServerTimeout);
-
-        futureObj.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object response) {
-                if (failure != null) {
-                    shardReplicaOperationsInProgress.remove(shardName);
-
-                    LOG.debug("{}: RemoveServer request to leader {} for shard {} failed", persistenceId(), primaryPath,
-                        shardName, failure);
-
-                    // FAILURE
-                    sender.tell(new Status.Failure(new RuntimeException(
-                        String.format("RemoveServer request to leader %s for shard %s failed", primaryPath, shardName),
-                        failure)), self());
-                } else {
-                    // SUCCESS
-                    self().tell(new WrappedShardResponse(shardId, response, primaryPath), sender);
-                }
-            }
-        }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
-    }
-
     private void removeShardReplica(final RemoveShardReplica contextMessage, final String shardName,
             final String primaryPath, final ActorRef sender) {
         if (isShardReplicaOperationInProgress(shardName, sender)) {
@@ -488,10 +395,10 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 primaryPath, shardId);
 
         Timeout removeServerTimeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration());
-        Future<Object> futureObj = ask(getContext().actorSelection(primaryPath),
+        Future<Object> futureObj = Patterns.ask(getContext().actorSelection(primaryPath),
                 new RemoveServer(shardId.toString()), removeServerTimeout);
 
-        futureObj.onComplete(new OnComplete<Object>() {
+        futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
@@ -535,7 +442,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             final Future<Boolean> stopFuture = Patterns.gracefulStop(shardActor,
                     FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS), Shutdown.INSTANCE);
 
-            final CompositeOnComplete<Boolean> onComplete = new CompositeOnComplete<Boolean>() {
+            final CompositeOnComplete<Boolean> onComplete = new CompositeOnComplete<>() {
                 @Override
                 public void onComplete(final Throwable failure, final Boolean result) {
                     if (failure == null) {
@@ -563,7 +470,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         persistShardList();
     }
 
-    private void onGetSnapshot() {
+    private void onGetSnapshot(final GetSnapshot getSnapshot) {
         LOG.debug("{}: onGetSnapshot", persistenceId());
 
         List<String> notInitialized = null;
@@ -588,7 +495,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 datastoreContextFactory.getBaseDatastoreContext().getShardInitializationTimeout().duration()));
 
         for (ShardInformation shardInfo: localShards.values()) {
-            shardInfo.getActor().tell(GetSnapshot.INSTANCE, replyActor);
+            shardInfo.getActor().tell(getSnapshot, replyActor);
         }
     }
 
@@ -616,32 +523,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
     }
 
-    private void onPrefixShardCreated(final PrefixShardCreated message) {
-        LOG.debug("{}: onPrefixShardCreated: {}", persistenceId(), message);
-
-        final PrefixShardConfiguration config = message.getConfiguration();
-        final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
-                ClusterUtils.getCleanShardName(config.getPrefix().getRootIdentifier()));
-        final String shardName = shardId.getShardName();
-
-        if (isPreviousShardActorStopInProgress(shardName, message)) {
-            return;
-        }
-
-        if (localShards.containsKey(shardName)) {
-            LOG.debug("{}: Received create for an already existing shard {}", persistenceId(), shardName);
-            final PrefixShardConfiguration existing =
-                    configuration.getAllPrefixShardConfigurations().get(config.getPrefix());
-
-            if (existing != null && existing.equals(config)) {
-                // we don't have to do nothing here
-                return;
-            }
-        }
-
-        doCreatePrefixShard(config, shardId, shardName);
-    }
-
     private boolean isPreviousShardActorStopInProgress(final String shardName, final Object messageToDefer) {
         final CompositeOnComplete<Boolean> stopOnComplete = shardActorsStopping.get(shardName);
         if (stopOnComplete == null) {
@@ -662,43 +543,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         return true;
     }
 
-    private void doCreatePrefixShard(final PrefixShardConfiguration config, final ShardIdentifier shardId,
-            final String shardName) {
-        configuration.addPrefixShardConfiguration(config);
-
-        final Builder builder = newShardDatastoreContextBuilder(shardName);
-        builder.logicalStoreType(config.getPrefix().getDatastoreType())
-                .storeRoot(config.getPrefix().getRootIdentifier());
-        DatastoreContext shardDatastoreContext = builder.build();
-
-        final Map<String, String> peerAddresses = getPeerAddresses(shardName);
-        final boolean isActiveMember = true;
-
-        LOG.debug("{} doCreatePrefixShard: shardId: {}, memberNames: {}, peerAddresses: {}, isActiveMember: {}",
-                persistenceId(), shardId, config.getShardMemberNames(), peerAddresses, isActiveMember);
-
-        final ShardInformation info = new ShardInformation(shardName, shardId, peerAddresses,
-                shardDatastoreContext, Shard.builder(), peerAddressResolver);
-        info.setActiveMember(isActiveMember);
-        localShards.put(info.getShardName(), info);
-
-        if (schemaContext != null) {
-            info.setSchemaContext(schemaContext);
-            info.setActor(newShardActor(info));
-        }
-    }
-
-    private void onPrefixShardRemoved(final PrefixShardRemoved message) {
-        LOG.debug("{}: onPrefixShardRemoved : {}", persistenceId(), message);
-
-        final DOMDataTreeIdentifier prefix = message.getPrefix();
-        final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
-                ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
-        configuration.removePrefixShardConfiguration(prefix);
-        removeShard(shardId);
-    }
-
     private void doCreateShard(final CreateShard createShard) {
         final ModuleShardConfiguration moduleShardConfig = createShard.getModuleShardConfig();
         final String shardName = moduleShardConfig.getShardName();
@@ -730,7 +574,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             // the shard with no peers and with elections disabled so it stays as follower. A
             // subsequent AddServer request will be needed to make it an active member.
             isActiveMember = false;
-            peerAddresses = Collections.emptyMap();
+            peerAddresses = Map.of();
             shardDatastoreContext = DatastoreContext.newBuilderFrom(shardDatastoreContext)
                     .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName()).build();
         }
@@ -744,8 +588,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         info.setActiveMember(isActiveMember);
         localShards.put(info.getShardName(), info);
 
-        if (schemaContext != null) {
-            info.setSchemaContext(schemaContext);
+        if (modelContext != null) {
+            info.setSchemaContext(modelContext);
             info.setActor(newShardActor(info));
         }
     }
@@ -761,10 +605,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private void checkReady() {
         if (isReadyWithLeaderId()) {
-            LOG.info("{}: All Shards are ready - data store {} is ready, available count is {}",
-                    persistenceId(), type, waitTillReadyCountdownLatch.getCount());
-
-            waitTillReadyCountdownLatch.countDown();
+            LOG.info("{}: All Shards are ready - data store {} is ready", persistenceId(), type);
+            readinessFuture.set(Empty.value());
         }
     }
 
@@ -773,7 +615,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         ShardInformation shardInformation = findShardInformation(leaderStateChanged.getMemberId());
         if (shardInformation != null) {
-            shardInformation.setLocalDataTree(leaderStateChanged.getLocalShardDataTree());
+            shardInformation.setLocalDataTree(leaderStateChanged.localShardDataTree());
             shardInformation.setLeaderVersion(leaderStateChanged.getLeaderPayloadVersion());
             if (shardInformation.setLeaderId(leaderStateChanged.getLeaderId())) {
                 primaryShardInfoCache.remove(shardInformation.getShardName());
@@ -787,7 +629,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
     }
 
-    private void notifyShardAvailabilityCallbacks(ShardInformation shardInformation) {
+    private void notifyShardAvailabilityCallbacks(final ShardInformation shardInformation) {
         shardAvailabilityCallbacks.forEach(callback -> callback.accept(shardInformation.getShardName()));
     }
 
@@ -804,7 +646,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             message.getSender().tell(createNotInitializedException(shardInfo.getShardId()), getSelf());
         } else {
             LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(), shardInfo.getShardName());
-            message.getSender().tell(createNoShardLeaderException(shardInfo.getShardId()), getSelf());
+            message.getSender().tell(new NoShardLeaderException(shardInfo.getShardId()), getSelf());
         }
     }
 
@@ -865,13 +707,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         return true;
     }
 
-    private void onActorInitialized(final Object message) {
-        final ActorRef sender = getSender();
-
-        if (sender == null) {
-            // why is a non-actor sending this message? Just ignore.
-            return;
-        }
+    private void onActorInitialized(final ActorInitialized message) {
+        final var sender = message.actorRef();
 
         String actorName = sender.path().name();
         //find shard name from actor name; actor name is stringified shardId
@@ -902,8 +739,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     protected void handleRecover(final Object message) throws Exception {
         if (message instanceof RecoveryCompleted) {
             onRecoveryCompleted();
-        } else if (message instanceof SnapshotOffer) {
-            applyShardManagerSnapshot((ShardManagerSnapshot)((SnapshotOffer) message).snapshot());
+        } else if (message instanceof SnapshotOffer msg) {
+            applyShardManagerSnapshot((ShardManagerSnapshot) msg.snapshot());
         }
     }
 
@@ -963,7 +800,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             } else {
                 LOG.debug("{}: Returning NoShardLeaderException for shard {}", persistenceId(),
                         shardInformation.getShardName());
-                getSender().tell(createNoShardLeaderException(shardInformation.getShardId()), getSelf());
+                getSender().tell(new NoShardLeaderException(shardInformation.getShardId()), getSelf());
             }
 
             return;
@@ -972,10 +809,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         getSender().tell(messageSupplier.get(), getSelf());
     }
 
-    private static NoShardLeaderException createNoShardLeaderException(final ShardIdentifier shardId) {
-        return new NoShardLeaderException(null, shardId.toString());
-    }
-
     private static NotInitializedException createNotInitializedException(final ShardIdentifier shardId) {
         return new NotInitializedException(String.format(
                 "Found primary shard %s but it's not initialized yet. Please try again later", shardId));
@@ -993,10 +826,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 message.member().address());
 
         peerAddressResolver.removePeerAddress(memberName);
-
-        for (ShardInformation info : localShards.values()) {
-            info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
-        }
     }
 
     private void memberExited(final ClusterEvent.MemberExited message) {
@@ -1006,10 +835,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 message.member().address());
 
         peerAddressResolver.removePeerAddress(memberName);
-
-        for (ShardInformation info : localShards.values()) {
-            info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
-        }
     }
 
     private void memberUp(final ClusterEvent.MemberUp message) {
@@ -1042,8 +867,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             String shardName = info.getShardName();
             String peerId = getShardIdentifier(memberName, shardName).toString();
             info.updatePeerAddress(peerId, peerAddressResolver.getShardActorAddress(shardName, memberName), getSelf());
-
-            info.peerUp(memberName, peerId, getSelf());
         }
     }
 
@@ -1074,8 +897,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
                 notifyShardAvailabilityCallbacks(info);
             }
-
-            info.peerDown(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
         }
     }
 
@@ -1086,8 +907,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 LOG.debug("Marking Leader {} as available.", leaderId);
                 info.setLeaderAvailable(true);
             }
-
-            info.peerUp(memberName, getShardIdentifier(memberName, info.getShardName()).toString(), getSelf());
         }
     }
 
@@ -1144,13 +963,13 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
      *
      * @param message the message to send
      */
-    private void updateSchemaContext(final Object message) {
-        schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+    private void updateSchemaContext(final UpdateSchemaContext message) {
+        modelContext = message.modelContext();
 
-        LOG.debug("Got updated SchemaContext: # of modules {}", schemaContext.getModules().size());
+        LOG.debug("Got updated SchemaContext: # of modules {}", modelContext.getModules().size());
 
         for (ShardInformation info : localShards.values()) {
-            info.setSchemaContext(schemaContext);
+            info.setSchemaContext(modelContext);
 
             if (info.getActor() == null) {
                 LOG.debug("Creating Shard {}", info.getShardId());
@@ -1162,7 +981,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                     String peerId = getShardIdentifier(memberName, shardName).toString() ;
                     String peerAddress = peerAddressResolver.getShardActorAddress(shardName, memberName);
                     info.updatePeerAddress(peerId, peerAddress, getSelf());
-                    info.peerUp(memberName, peerId, getSelf());
                     LOG.debug("{}: updated peer {} on member {} with address {} on shard {} whose actor address is {}",
                             persistenceId(), peerId, memberName, peerAddress, info.getShardId(), info.getActor());
                 }
@@ -1195,7 +1013,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             sendResponse(info, message.isWaitUntilReady(), true, () -> {
                 String primaryPath = info.getSerializedLeaderActor();
                 Object found = canReturnLocalShardState && info.isLeader()
-                        ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().get()) :
+                        ? new LocalPrimaryShardFound(primaryPath, info.getLocalShardDataTree().orElseThrow()) :
                             new RemotePrimaryShardFound(primaryPath, info.getLeaderVersion());
 
                 LOG.debug("{}: Found primary for {}: {}", persistenceId(), shardName, found);
@@ -1237,20 +1055,18 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         Timeout findPrimaryTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
                 .getShardInitializationTimeout().duration().$times(2));
 
-        Future<Object> futureObj = ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
-        futureObj.onComplete(new OnComplete<Object>() {
+        Future<Object> futureObj = Patterns.ask(getSelf(), new FindPrimary(shardName, true), findPrimaryTimeout);
+        futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     handler.onFailure(failure);
+                } else if (response instanceof RemotePrimaryShardFound msg) {
+                    handler.onRemotePrimaryShardFound(msg);
+                } else if (response instanceof LocalPrimaryShardFound msg) {
+                    handler.onLocalPrimaryFound(msg);
                 } else {
-                    if (response instanceof RemotePrimaryShardFound) {
-                        handler.onRemotePrimaryShardFound((RemotePrimaryShardFound) response);
-                    } else if (response instanceof LocalPrimaryShardFound) {
-                        handler.onLocalPrimaryFound((LocalPrimaryShardFound) response);
-                    } else {
-                        handler.onUnknownResponse(response);
-                    }
+                    handler.onUnknownResponse(response);
                 }
             }
         }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
@@ -1272,8 +1088,8 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
      * Create shards that are local to the member on which the ShardManager runs.
      */
     private void createLocalShards() {
-        MemberName memberName = this.cluster.getCurrentMemberName();
-        Collection<String> memberShardNames = this.configuration.getMemberShardNames(memberName);
+        MemberName memberName = cluster.getCurrentMemberName();
+        Collection<String> memberShardNames = configuration.getMemberShardNames(memberName);
 
         Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots = new HashMap<>();
         if (restoreFromSnapshot != null) {
@@ -1291,25 +1107,34 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             LOG.debug("{}: Creating local shard: {}", persistenceId(), shardId);
 
             Map<String, String> peerAddresses = getPeerAddresses(shardName);
-            localShards.put(shardName, new ShardInformation(shardName, shardId, peerAddresses,
-                    newShardDatastoreContext(shardName), Shard.builder().restoreFromSnapshot(
-                        shardSnapshots.get(shardName)), peerAddressResolver));
+            localShards.put(shardName, createShardInfoFor(shardName, shardId, peerAddresses,
+                    newShardDatastoreContext(shardName), shardSnapshots));
         }
     }
 
+    @VisibleForTesting
+    ShardInformation createShardInfoFor(final String shardName, final ShardIdentifier shardId,
+                                        final Map<String, String> peerAddresses,
+                                        final DatastoreContext datastoreContext,
+                                        final Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots) {
+        return new ShardInformation(shardName, shardId, peerAddresses,
+                datastoreContext, Shard.builder().restoreFromSnapshot(shardSnapshots.get(shardName)),
+                peerAddressResolver);
+    }
+
     /**
      * Given the name of the shard find the addresses of all it's peers.
      *
      * @param shardName the shard name
      */
-    private Map<String, String> getPeerAddresses(final String shardName) {
+    Map<String, String> getPeerAddresses(final String shardName) {
         final Collection<MemberName> members = configuration.getMembersFromShardName(shardName);
         return getPeerAddresses(shardName, members);
     }
 
     private Map<String, String> getPeerAddresses(final String shardName, final Collection<MemberName> members) {
         Map<String, String> peerAddresses = new HashMap<>();
-        MemberName currentMemberName = this.cluster.getCurrentMemberName();
+        MemberName currentMemberName = cluster.getCurrentMemberName();
 
         for (MemberName memberName : members) {
             if (!currentMemberName.equals(memberName)) {
@@ -1352,48 +1177,13 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         return false;
     }
 
-    private void onAddPrefixShardReplica(final AddPrefixShardReplica message) {
-        LOG.debug("{}: onAddPrefixShardReplica: {}", persistenceId(), message);
-
-        final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
-                ClusterUtils.getCleanShardName(message.getShardPrefix()));
-        final String shardName = shardId.getShardName();
-
-        // Create the localShard
-        if (schemaContext == null) {
-            LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
-                persistenceId(), shardName);
-            getSender().tell(new Status.Failure(new IllegalStateException(
-                "No SchemaContext is available in order to create a local shard instance for " + shardName)),
-                getSelf());
-            return;
-        }
-
-        findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(), shardName, persistenceId(),
-                getSelf()) {
-            @Override
-            public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
-                final RunnableMessage runnable = (RunnableMessage) () -> addPrefixShard(getShardName(),
-                        message.getShardPrefix(), response, getSender());
-                if (!isPreviousShardActorStopInProgress(getShardName(), runnable)) {
-                    getSelf().tell(runnable, getTargetActor());
-                }
-            }
-
-            @Override
-            public void onLocalPrimaryFound(final LocalPrimaryShardFound message) {
-                sendLocalReplicaAlreadyExistsReply(getShardName(), getTargetActor());
-            }
-        });
-    }
-
     private void onAddShardReplica(final AddShardReplica shardReplicaMsg) {
         final String shardName = shardReplicaMsg.getShardName();
 
         LOG.debug("{}: onAddShardReplica: {}", persistenceId(), shardReplicaMsg);
 
         // verify the shard with the specified name is present in the cluster configuration
-        if (!this.configuration.isShardConfigured(shardName)) {
+        if (!configuration.isShardConfigured(shardName)) {
             LOG.debug("{}: No module configuration exists for shard {}", persistenceId(), shardName);
             getSender().tell(new Status.Failure(new IllegalArgumentException(
                 "No module configuration exists for shard " + shardName)), getSelf());
@@ -1401,7 +1191,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         }
 
         // Create the localShard
-        if (schemaContext == null) {
+        if (modelContext == null) {
             LOG.debug("{}: No SchemaContext is available in order to create a local shard instance for {}",
                 persistenceId(), shardName);
             getSender().tell(new Status.Failure(new IllegalStateException(
@@ -1434,40 +1224,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             String.format("Local shard %s already exists", shardName))), getSelf());
     }
 
-    private void addPrefixShard(final String shardName, final YangInstanceIdentifier shardPrefix,
-                                final RemotePrimaryShardFound response, final ActorRef sender) {
-        if (isShardReplicaOperationInProgress(shardName, sender)) {
-            return;
-        }
-
-        shardReplicaOperationsInProgress.add(shardName);
-
-        final ShardInformation shardInfo;
-        final boolean removeShardOnFailure;
-        ShardInformation existingShardInfo = localShards.get(shardName);
-        if (existingShardInfo == null) {
-            removeShardOnFailure = true;
-            ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(), shardName);
-
-            final Builder builder = newShardDatastoreContextBuilder(shardName);
-            builder.storeRoot(shardPrefix).customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
-            DatastoreContext datastoreContext = builder.build();
-
-            shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
-                    Shard.builder(), peerAddressResolver);
-            shardInfo.setActiveMember(false);
-            shardInfo.setSchemaContext(schemaContext);
-            localShards.put(shardName, shardInfo);
-            shardInfo.setActor(newShardActor(shardInfo));
-        } else {
-            removeShardOnFailure = false;
-            shardInfo = existingShardInfo;
-        }
-
-        execAddShard(shardName, shardInfo, response, removeShardOnFailure, sender);
-    }
-
     private void addShard(final String shardName, final RemotePrimaryShardFound response, final ActorRef sender) {
         if (isShardReplicaOperationInProgress(shardName, sender)) {
             return;
@@ -1488,7 +1244,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             shardInfo = new ShardInformation(shardName, shardId, getPeerAddresses(shardName), datastoreContext,
                     Shard.builder(), peerAddressResolver);
             shardInfo.setActiveMember(false);
-            shardInfo.setSchemaContext(schemaContext);
+            shardInfo.setSchemaContext(modelContext);
             localShards.put(shardName, shardInfo);
             shardInfo.setActor(newShardActor(shardInfo));
         } else {
@@ -1514,10 +1270,10 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
         final Timeout addServerTimeout = new Timeout(shardInfo.getDatastoreContext()
                 .getShardLeaderElectionTimeout().duration());
-        final Future<Object> futureObj = ask(getContext().actorSelection(response.getPrimaryPath()),
+        final Future<Object> futureObj = Patterns.ask(getContext().actorSelection(response.getPrimaryPath()),
                 new AddServer(shardInfo.getShardId().toString(), localShardAddress, true), addServerTimeout);
 
-        futureObj.onComplete(new OnComplete<Object>() {
+        futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object addServerResponse) {
                 if (failure != null) {
@@ -1581,21 +1337,18 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
 
     private static Exception getServerChangeException(final Class<?> serverChange,
             final ServerChangeStatus serverChangeStatus, final String leaderPath, final ShardIdentifier shardId) {
-        switch (serverChangeStatus) {
-            case TIMEOUT:
-                return new TimeoutException(String.format(
-                        "The shard leader %s timed out trying to replicate the initial data to the new shard %s."
-                        + "Possible causes - there was a problem replicating the data or shard leadership changed "
-                        + "while replicating the shard data", leaderPath, shardId.getShardName()));
-            case NO_LEADER:
-                return createNoShardLeaderException(shardId);
-            case NOT_SUPPORTED:
-                return new UnsupportedOperationException(String.format("%s request is not supported for shard %s",
-                        serverChange.getSimpleName(), shardId.getShardName()));
-            default :
-                return new RuntimeException(String.format("%s request to leader %s for shard %s failed with status %s",
-                        serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
-        }
+        return switch (serverChangeStatus) {
+            case TIMEOUT -> new TimeoutException("""
+                The shard leader %s timed out trying to replicate the initial data to the new shard %s. Possible \
+                causes - there was a problem replicating the data or shard leadership changed while replicating the \
+                shard data""".formatted(leaderPath, shardId.getShardName()));
+            case NO_LEADER -> new NoShardLeaderException(shardId);
+            case NOT_SUPPORTED -> new UnsupportedOperationException(
+                "%s request is not supported for shard %s".formatted(
+                    serverChange.getSimpleName(), shardId.getShardName()));
+            default -> new RuntimeException("%s request to leader %s for shard %s failed with status %s".formatted(
+                serverChange.getSimpleName(), leaderPath, shardId.getShardName(), serverChangeStatus));
+        };
     }
 
     private void onRemoveShardReplica(final RemoveShardReplica shardReplicaMsg) {
@@ -1620,32 +1373,6 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         });
     }
 
-    private void onRemovePrefixShardReplica(final RemovePrefixShardReplica message) {
-        LOG.debug("{}: onRemovePrefixShardReplica: {}", persistenceId(), message);
-
-        final ShardIdentifier shardId = getShardIdentifier(cluster.getCurrentMemberName(),
-                ClusterUtils.getCleanShardName(message.getShardPrefix()));
-        final String shardName = shardId.getShardName();
-
-        findPrimary(shardName, new AutoFindPrimaryFailureResponseHandler(getSender(),
-                shardName, persistenceId(), getSelf()) {
-            @Override
-            public void onRemotePrimaryShardFound(final RemotePrimaryShardFound response) {
-                doRemoveShardReplicaAsync(response.getPrimaryPath());
-            }
-
-            @Override
-            public void onLocalPrimaryFound(final LocalPrimaryShardFound response) {
-                doRemoveShardReplicaAsync(response.getPrimaryPath());
-            }
-
-            private void doRemoveShardReplicaAsync(final String primaryPath) {
-                getSelf().tell((RunnableMessage) () -> removePrefixShardReplica(message, getShardName(),
-                        primaryPath, getSender()), getTargetActor());
-            }
-        });
-    }
-
     private void persistShardList() {
         List<String> shardList = new ArrayList<>(localShards.keySet());
         for (ShardInformation shardInfo : localShards.values()) {
@@ -1654,13 +1381,11 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
             }
         }
         LOG.debug("{}: persisting the shard list {}", persistenceId(), shardList);
-        saveSnapshot(updateShardManagerSnapshot(shardList, configuration.getAllPrefixShardConfigurations()));
+        saveSnapshot(updateShardManagerSnapshot(shardList));
     }
 
-    private ShardManagerSnapshot updateShardManagerSnapshot(
-            final List<String> shardList,
-            final Map<DOMDataTreeIdentifier, PrefixShardConfiguration> allPrefixShardConfigurations) {
-        currentSnapshot = new ShardManagerSnapshot(shardList, allPrefixShardConfigurations);
+    private ShardManagerSnapshot updateShardManagerSnapshot(final List<String> shardList) {
+        currentSnapshot = new ShardManagerSnapshot(shardList);
         return currentSnapshot;
     }
 
@@ -1718,10 +1443,10 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         ActorRef sender = getSender();
         final String shardName = flipMembersVotingStatus.getShardName();
         findLocalShard(shardName, sender, localShardFound -> {
-            Future<Object> future = ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
+            Future<Object> future = Patterns.ask(localShardFound.getPath(), GetOnDemandRaftState.INSTANCE,
                     Timeout.apply(30, TimeUnit.SECONDS));
 
-            future.onComplete(new OnComplete<Object>() {
+            future.onComplete(new OnComplete<>() {
                 @Override
                 public void onComplete(final Throwable failure, final Object response) {
                     if (failure != null) {
@@ -1769,31 +1494,27 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         Timeout findLocalTimeout = new Timeout(datastoreContextFactory.getBaseDatastoreContext()
                 .getShardInitializationTimeout().duration().$times(2));
 
-        Future<Object> futureObj = ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
-        futureObj.onComplete(new OnComplete<Object>() {
+        Future<Object> futureObj = Patterns.ask(getSelf(), new FindLocalShard(shardName, true), findLocalTimeout);
+        futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
                     LOG.debug("{}: Received failure from FindLocalShard for shard {}", persistenceId, shardName,
-                            failure);
+                        failure);
                     sender.tell(new Status.Failure(new RuntimeException(
-                            String.format("Failed to find local shard %s", shardName), failure)), self());
+                        String.format("Failed to find local shard %s", shardName), failure)), self());
+                } if (response instanceof LocalShardFound msg) {
+                    getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept(msg), sender);
+                } else if (response instanceof LocalShardNotFound) {
+                    LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
+                    sender.tell(new Status.Failure(new IllegalArgumentException(
+                        String.format("Local shard %s does not exist", shardName))), self());
                 } else {
-                    if (response instanceof LocalShardFound) {
-                        getSelf().tell((RunnableMessage) () -> onLocalShardFound.accept((LocalShardFound) response),
-                                sender);
-                    } else if (response instanceof LocalShardNotFound) {
-                        LOG.debug("{}: Local shard {} does not exist", persistenceId, shardName);
-                        sender.tell(new Status.Failure(new IllegalArgumentException(
-                            String.format("Local shard %s does not exist", shardName))), self());
-                    } else {
-                        LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
-                            response);
-                        sender.tell(new Status.Failure(response instanceof Throwable ? (Throwable) response
-                                : new RuntimeException(
-                                    String.format("Failed to find local shard %s: received response: %s", shardName,
-                                        response))), self());
-                    }
+                    LOG.debug("{}: Failed to find local shard {}: received response: {}", persistenceId, shardName,
+                        response);
+                    sender.tell(new Status.Failure(response instanceof Throwable throwable ? throwable
+                        : new RuntimeException(String.format("Failed to find local shard %s: received response: %s",
+                            shardName, response))), self());
                 }
             }
         }, new Dispatchers(context().system().dispatchers()).getDispatcher(Dispatchers.DispatcherType.Client));
@@ -1814,9 +1535,9 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 changeServersVotingStatus, shardActorRef.path());
 
         Timeout timeout = new Timeout(datastoreContext.getShardLeaderElectionTimeout().duration().$times(2));
-        Future<Object> futureObj = ask(shardActorRef, changeServersVotingStatus, timeout);
+        Future<Object> futureObj = Patterns.ask(shardActorRef, changeServersVotingStatus, timeout);
 
-        futureObj.onComplete(new OnComplete<Object>() {
+        futureObj.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 shardReplicaOperationsInProgress.remove(shardName);
@@ -1964,10 +1685,10 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
          */
         protected AutoFindPrimaryFailureResponseHandler(final ActorRef targetActor, final String shardName,
                 final String persistenceId, final ActorRef shardManagerActor) {
-            this.targetActor = Preconditions.checkNotNull(targetActor);
-            this.shardName = Preconditions.checkNotNull(shardName);
-            this.persistenceId = Preconditions.checkNotNull(persistenceId);
-            this.shardManagerActor = Preconditions.checkNotNull(shardManagerActor);
+            this.targetActor = requireNonNull(targetActor);
+            this.shardName = requireNonNull(shardName);
+            this.persistenceId = requireNonNull(persistenceId);
+            this.shardManagerActor = requireNonNull(shardManagerActor);
         }
 
         public ActorRef getTargetActor() {
index 219c5bb4958c6da4254eeff397c8b84f71c16b65..8577a5914c0389905f9f55d4fff8fb496ac91cff 100644 (file)
@@ -5,12 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.pattern.Patterns;
-import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
 import java.util.List;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
@@ -37,8 +38,8 @@ final class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoM
     ShardManagerInfo(final ActorRef shardManager, final MemberName memberName, final String name,
         final String mxBeanType) {
         super(name, mxBeanType, JMX_CATEGORY_SHARD_MANAGER);
-        this.shardManager = Preconditions.checkNotNull(shardManager);
-        this.memberName = Preconditions.checkNotNull(memberName);
+        this.shardManager = requireNonNull(shardManager);
+        this.memberName = requireNonNull(memberName);
     }
 
     @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch"})
@@ -47,10 +48,9 @@ final class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoM
         try {
             return (List<String>) Await.result(
                 Patterns.ask(shardManager, GetLocalShardIds.INSTANCE, ASK_TIMEOUT_MILLIS), Duration.Inf());
-        } catch (RuntimeException e) {
-            throw e;
         } catch (Exception e) {
-            throw new RuntimeException(e);
+            Throwables.throwIfUnchecked(e);
+            throw new IllegalStateException(e);
         }
     }
 
@@ -80,10 +80,9 @@ final class ShardManagerInfo extends AbstractMXBean implements ShardManagerInfoM
                 try {
                     Await.result(Patterns.ask(shardManager, new SwitchShardBehavior(shardId, state, term),
                         ASK_TIMEOUT_MILLIS), Duration.Inf());
-                } catch (RuntimeException e) {
-                    throw e;
                 } catch (Exception e) {
-                    throw new RuntimeException(e);
+                    Throwables.throwIfUnchecked(e);
+                    throw new IllegalStateException(e);
                 }
                 break;
             case Candidate:
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerSnapshot.java
deleted file mode 100644 (file)
index 5322420..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2015 Dell Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.shardmanager;
-
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collections;
-import java.util.List;
-import javax.annotation.Nonnull;
-
-/**
- * Persisted data of the ShardManager.
- *
- * @deprecated Use {@link org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot} instead.
- */
-@Deprecated
-public final class ShardManagerSnapshot implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private final List<String> shardList;
-
-    ShardManagerSnapshot(@Nonnull final List<String> shardList) {
-        this.shardList = ImmutableList.copyOf(shardList);
-    }
-
-    public List<String> getShardList() {
-        return this.shardList;
-    }
-
-    /**
-     * Creates a ShardManagerSnapshot.
-     *
-     * @deprecated This method is for migration only and should me removed once
-     *             org.opendaylight.controller.cluster.datastore.ShardManagerSnapshot is removed.
-     */
-    @Deprecated
-    public static ShardManagerSnapshot forShardList(@Nonnull final List<String> shardList) {
-        return new ShardManagerSnapshot(shardList);
-    }
-
-    private Object readResolve() {
-        return new org.opendaylight.controller.cluster.datastore.persisted.ShardManagerSnapshot(shardList,
-                Collections.emptyMap());
-    }
-
-    @Override
-    public String toString() {
-        return "ShardManagerSnapshot [ShardList = " + shardList + " ]";
-    }
-}
index b1e9079f508669163b619fe67ebf7fd3c6b017bb..6c33652bb1a6cde93cad77ec48adbe4068289060 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Address;
 import akka.actor.AddressFromURIString;
-import com.google.common.base.Preconditions;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Map;
@@ -35,17 +36,17 @@ class ShardPeerAddressResolver implements PeerAddressResolver {
     private final String shardManagerType;
     private final MemberName localMemberName;
 
-    ShardPeerAddressResolver(String shardManagerType, MemberName localMemberName) {
+    ShardPeerAddressResolver(final String shardManagerType, final MemberName localMemberName) {
         this.shardManagerIdentifier = ShardManagerIdentifier.builder().type(shardManagerType).build().toString();
         this.shardManagerType = shardManagerType;
-        this.localMemberName = Preconditions.checkNotNull(localMemberName);
+        this.localMemberName = requireNonNull(localMemberName);
     }
 
-    void addPeerAddress(MemberName memberName, Address address) {
+    void addPeerAddress(final MemberName memberName, final Address address) {
         memberNameToAddress.put(memberName, address);
     }
 
-    void removePeerAddress(MemberName memberName) {
+    void removePeerAddress(final MemberName memberName) {
         memberNameToAddress.remove(memberName);
     }
 
@@ -53,7 +54,7 @@ class ShardPeerAddressResolver implements PeerAddressResolver {
         return this.memberNameToAddress.keySet();
     }
 
-    Address getPeerAddress(MemberName memberName) {
+    Address getPeerAddress(final MemberName memberName) {
         return memberNameToAddress.get(memberName);
     }
 
@@ -68,11 +69,11 @@ class ShardPeerAddressResolver implements PeerAddressResolver {
         return peerAddresses;
     }
 
-    ShardIdentifier getShardIdentifier(MemberName memberName, String shardName) {
+    ShardIdentifier getShardIdentifier(final MemberName memberName, final String shardName) {
         return ShardIdentifier.create(shardName, memberName, shardManagerType);
     }
 
-    String getShardActorAddress(String shardName, MemberName memberName) {
+    String getShardActorAddress(final String shardName, final MemberName memberName) {
         Address memberAddress = memberNameToAddress.get(memberName);
         if (memberAddress != null) {
             return getShardManagerActorPathBuilder(memberAddress).append("/").append(
@@ -82,12 +83,12 @@ class ShardPeerAddressResolver implements PeerAddressResolver {
         return null;
     }
 
-    StringBuilder getShardManagerActorPathBuilder(Address address) {
+    StringBuilder getShardManagerActorPathBuilder(final Address address) {
         return new StringBuilder().append(address.toString()).append("/user/").append(shardManagerIdentifier);
     }
 
     @Override
-    public String resolve(String peerId) {
+    public String resolve(final String peerId) {
         if (peerId == null) {
             return null;
         }
@@ -97,7 +98,7 @@ class ShardPeerAddressResolver implements PeerAddressResolver {
     }
 
     @Override
-    public void setResolved(String peerId, String address) {
+    public void setResolved(final String peerId, final String address) {
         memberNameToAddress.put(ShardIdentifier.fromShardIdString(peerId).getMemberName(),
                 AddressFromURIString.parse(address));
     }
index 63b8f65d420ffb9600c148ebc33db28e0d925f63..be5044270be7077b7b6f7697877208bcdbeade1d 100644 (file)
@@ -5,11 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
-import com.google.common.base.Preconditions;
-import javax.annotation.Nullable;
+import static java.util.Objects.requireNonNull;
+
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.raft.RaftState;
 
@@ -19,7 +19,7 @@ final class SwitchShardBehavior {
     private final long term;
 
     SwitchShardBehavior(final ShardIdentifier shardId, final RaftState newState, final long term) {
-        this.newState = Preconditions.checkNotNull(newState);
+        this.newState = requireNonNull(newState);
         this.shardId = shardId;
         this.term = term;
     }
index 8628b56461285d5c34f9f3f9ae9e4512385640d1..a0712b67146cca8d79ef27e9463bb0eb15c3fb47 100644 (file)
@@ -31,9 +31,4 @@ public final class DefaultShardStrategy implements ShardStrategy {
     public String findShard(YangInstanceIdentifier path) {
         return DEFAULT_SHARD;
     }
-
-    @Override
-    public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
-        return YangInstanceIdentifier.EMPTY;
-    }
 }
index 64e24a9cb965a56cdee48ec2d6c333c37fc44a26..c90baf2760c00a07fd08812ec3869cfc59727f10 100644 (file)
@@ -28,11 +28,4 @@ public class ModuleShardStrategy implements ShardStrategy {
         String shardName = configuration.getShardNameForModule(moduleName);
         return shardName != null ? shardName : DefaultShardStrategy.DEFAULT_SHARD;
     }
-
-    @Override
-    public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
-        return YangInstanceIdentifier.EMPTY;
-    }
-
-
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/PrefixShardStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardstrategy/PrefixShardStrategy.java
deleted file mode 100644 (file)
index 25e1160..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.shardstrategy;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Shard Strategy that resolves a path to a prefix shard name.
- */
-public class PrefixShardStrategy implements ShardStrategy {
-
-    public static final String NAME = "prefix";
-
-    private final String shardName;
-    private final YangInstanceIdentifier prefix;
-
-    public PrefixShardStrategy(final String shardName,
-                               final YangInstanceIdentifier prefix) {
-        this.shardName = shardName != null ? shardName : DefaultShardStrategy.DEFAULT_SHARD;
-        this.prefix = prefix;
-    }
-
-    @Override
-    public String findShard(final YangInstanceIdentifier path) {
-        return shardName;
-    }
-
-    @Override
-    public YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path) {
-        return prefix;
-    }
-}
index 0a4c54b656bad9c77e4f2500c5df72c64d14819a..496069d0a198b354ca8a671000366a229209ab97 100644 (file)
@@ -25,11 +25,4 @@ public interface ShardStrategy {
      * @return the corresponding shard name.
      */
     String findShard(YangInstanceIdentifier path);
-
-    /**
-     * Get the prefix of the shard that contains the data pointed to by the specified path.
-     * @param path the location of the data in the logical tree.
-     * @return the corresponding shards prefix.
-     */
-    YangInstanceIdentifier getPrefixForPath(YangInstanceIdentifier path);
 }
index e63ab9744514cf2fd94e4d822df3fc020ce52058..0a93c0d6b3458710950e318495cad9861057b7c2 100644 (file)
@@ -5,41 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardstrategy;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkState;
+import static java.util.Objects.requireNonNull;
+
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 public class ShardStrategyFactory {
     private static final String UNKNOWN_MODULE_NAME = "unknown";
 
     private final Configuration configuration;
-    private final LogicalDatastoreType logicalStoreType;
 
-    public ShardStrategyFactory(final Configuration configuration, final LogicalDatastoreType logicalStoreType) {
-        Preconditions.checkState(configuration != null, "configuration should not be missing");
+    public ShardStrategyFactory(final Configuration configuration) {
+        checkState(configuration != null, "configuration should not be missing");
         this.configuration = configuration;
-        this.logicalStoreType = Preconditions.checkNotNull(logicalStoreType);
     }
 
     public ShardStrategy getStrategy(final YangInstanceIdentifier path) {
-        Preconditions.checkNotNull(path, "path should not be null");
-
-        // try with the legacy module based shard mapping
-        final String moduleName = getModuleName(path);
+        final String moduleName = getModuleName(requireNonNull(path, "path should not be null"));
         final ShardStrategy shardStrategy = configuration.getStrategyForModule(moduleName);
         if (shardStrategy == null) {
-            // retry with prefix based sharding
-            final ShardStrategy strategyForPrefix =
-                    configuration.getStrategyForPrefix(new DOMDataTreeIdentifier(logicalStoreType, path));
-            if (strategyForPrefix == null) {
-                return DefaultShardStrategy.getInstance();
-            }
-            return strategyForPrefix;
+            return DefaultShardStrategy.getInstance();
         }
 
         return shardStrategy;
@@ -59,7 +47,7 @@ public class ShardStrategyFactory {
             return UNKNOWN_MODULE_NAME;
         }
 
-        String namespace = path.getPathArguments().get(0).getNodeType().getNamespace().toASCIIString();
+        String namespace = path.getPathArguments().get(0).getNodeType().getNamespace().toString();
         String moduleName = configuration.getModuleNameFromNameSpace(namespace);
         return moduleName != null ? moduleName : UNKNOWN_MODULE_NAME;
     }
index d859f6790531d80f3375fe674964b5517019c22d..d740461fdf11c9db988d96866f3ec42e26cada29 100644 (file)
@@ -20,8 +20,8 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractBatchedModificationsCursor extends AbstractDataTreeModificationCursor {
-
     protected abstract BatchedModifications getModifications();
 
     @Override
@@ -30,12 +30,12 @@ public abstract class AbstractBatchedModificationsCursor extends AbstractDataTre
     }
 
     @Override
-    public final void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+    public final void merge(final PathArgument child, final NormalizedNode data) {
         getModifications().addModification(new MergeModification(current().node(child), data));
     }
 
     @Override
-    public final void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+    public final void write(final PathArgument child, final NormalizedNode data) {
         getModifications().addModification(new WriteModification(current().node(child), data));
     }
 }
index 3d45f647a759ef19839f7d398d5bf2e2d635db9e..8e61c569faa422f32f7dd13c7bfac2479a5d5f00 100644 (file)
@@ -7,13 +7,10 @@
  */
 package org.opendaylight.controller.cluster.datastore.utils;
 
-import static akka.pattern.Patterns.ask;
-
 import akka.actor.ActorPath;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
-import akka.actor.Address;
 import akka.dispatch.Mapper;
 import akka.dispatch.OnComplete;
 import akka.pattern.AskTimeoutException;
@@ -21,10 +18,12 @@ import akka.pattern.Patterns;
 import akka.util.Timeout;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
+import java.lang.invoke.VarHandle;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.LongAdder;
 import java.util.function.Function;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
@@ -50,9 +49,8 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
 import org.opendaylight.controller.cluster.reporting.MetricsReporter;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.ReadOnlyDataTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -66,45 +64,70 @@ import scala.concurrent.duration.FiniteDuration;
  * not be passed to actors especially remote actors.
  */
 public class ActorUtils {
+    private static final class AskTimeoutCounter extends OnComplete<Object> implements ExecutionContext {
+        private LongAdder ateExceptions = new LongAdder();
+
+        @Override
+        public void onComplete(final Throwable failure, final Object success) throws Throwable {
+            if (failure instanceof AskTimeoutException) {
+                ateExceptions.increment();
+            }
+        }
+
+        void reset() {
+            ateExceptions = new LongAdder();
+        }
+
+        long sum() {
+            return ateExceptions.sum();
+        }
+
+        @Override
+        public void execute(final Runnable runnable) {
+            // Yes, we are this ugly, but then we are just doing a check + an increment
+            runnable.run();
+        }
+
+        @Override
+        public void reportFailure(final Throwable cause) {
+            LOG.warn("Unexpected failure updating counters", cause);
+        }
+    }
+
     private static final Logger LOG = LoggerFactory.getLogger(ActorUtils.class);
     private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
     private static final String METRIC_RATE = "rate";
-    private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
-                                                              new Mapper<Throwable, Throwable>() {
+    private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER = new Mapper<>() {
         @Override
         public Throwable apply(final Throwable failure) {
-            Throwable actualFailure = failure;
             if (failure instanceof AskTimeoutException) {
                 // A timeout exception most likely means the shard isn't initialized.
-                actualFailure = new NotInitializedException(
+                return new NotInitializedException(
                         "Timed out trying to find the primary shard. Most likely cause is the "
                         + "shard is not initialized yet.");
             }
-
-            return actualFailure;
+            return failure;
         }
     };
     public static final String BOUNDED_MAILBOX = "bounded-mailbox";
     public static final String COMMIT = "commit";
 
+    private final AskTimeoutCounter askTimeoutCounter = new AskTimeoutCounter();
     private final ActorSystem actorSystem;
     private final ActorRef shardManager;
     private final ClusterWrapper clusterWrapper;
     private final Configuration configuration;
+    private final String selfAddressHostPort;
+    private final Dispatchers dispatchers;
+
     private DatastoreContext datastoreContext;
     private FiniteDuration operationDuration;
     private Timeout operationTimeout;
-    private final String selfAddressHostPort;
     private TransactionRateLimiter txRateLimiter;
     private Timeout transactionCommitOperationTimeout;
     private Timeout shardInitializationTimeout;
-    private final Dispatchers dispatchers;
-
-    private volatile SchemaContext schemaContext;
 
-    // Used as a write memory barrier.
-    @SuppressWarnings("unused")
-    private volatile boolean updated;
+    private volatile EffectiveModelContext schemaContext;
 
     private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN)
             .getMetricsRegistry();
@@ -126,22 +149,18 @@ public class ActorUtils {
         this.clusterWrapper = clusterWrapper;
         this.configuration = configuration;
         this.datastoreContext = datastoreContext;
-        this.dispatchers = new Dispatchers(actorSystem.dispatchers());
+        dispatchers = new Dispatchers(actorSystem.dispatchers());
         this.primaryShardInfoCache = primaryShardInfoCache;
-
-        final LogicalDatastoreType convertedType =
-                LogicalDatastoreType.valueOf(datastoreContext.getLogicalStoreType().name());
-        this.shardStrategyFactory = new ShardStrategyFactory(configuration, convertedType);
+        shardStrategyFactory = new ShardStrategyFactory(configuration);
 
         setCachedProperties();
 
-        Address selfAddress = clusterWrapper.getSelfAddress();
+        final var selfAddress = clusterWrapper.getSelfAddress();
         if (selfAddress != null && !selfAddress.host().isEmpty()) {
             selfAddressHostPort = selfAddress.host().get() + ":" + selfAddress.port().get();
         } else {
             selfAddressHostPort = null;
         }
-
     }
 
     private void setCachedProperties() {
@@ -151,7 +170,7 @@ public class ActorUtils {
             TimeUnit.MILLISECONDS);
         operationTimeout = new Timeout(operationDuration);
 
-        transactionCommitOperationTimeout =  new Timeout(FiniteDuration.create(
+        transactionCommitOperationTimeout = new Timeout(FiniteDuration.create(
                 datastoreContext.getShardTransactionCommitTimeoutInSeconds(), TimeUnit.SECONDS));
 
         shardInitializationTimeout = new Timeout(datastoreContext.getShardInitializationTimeout().duration().$times(2));
@@ -177,7 +196,7 @@ public class ActorUtils {
         return actorSystem.actorSelection(actorPath);
     }
 
-    public void setSchemaContext(final SchemaContext schemaContext) {
+    public void setSchemaContext(final EffectiveModelContext schemaContext) {
         this.schemaContext = schemaContext;
 
         if (shardManager != null) {
@@ -186,65 +205,60 @@ public class ActorUtils {
     }
 
     public void setDatastoreContext(final DatastoreContextFactory contextFactory) {
-        this.datastoreContext = contextFactory.getBaseDatastoreContext();
+        datastoreContext = contextFactory.getBaseDatastoreContext();
         setCachedProperties();
 
-        // We write the 'updated' volatile to trigger a write memory barrier so that the writes above
-        // will be published immediately even though they may not be immediately visible to other
-        // threads due to unsynchronized reads. That's OK though - we're going for eventual
-        // consistency here as immediately visible updates to these members aren't critical. These
-        // members could've been made volatile but wanted to avoid volatile reads as these are
-        // accessed often and updates will be infrequent.
-
-        updated = true;
+        // Trigger a write memory barrier so that the writes above will be published immediately even though they may
+        // not be immediately visible to other threads due to unsynchronized reads. That is OK though - we are going for
+        // eventual consistency here as immediately visible updates to these members are not critical. These members
+        // could have been made volatile but wanted to avoid volatile reads as these are accessed often and updates will
+        // be infrequent.
+        VarHandle.fullFence();
 
         if (shardManager != null) {
             shardManager.tell(contextFactory, ActorRef.noSender());
         }
     }
 
-    public SchemaContext getSchemaContext() {
+    public EffectiveModelContext getSchemaContext() {
         return schemaContext;
     }
 
     public Future<PrimaryShardInfo> findPrimaryShardAsync(final String shardName) {
-        Future<PrimaryShardInfo> ret = primaryShardInfoCache.getIfPresent(shardName);
+        final var ret = primaryShardInfoCache.getIfPresent(shardName);
         if (ret != null) {
             return ret;
         }
-        Future<Object> future = executeOperationAsync(shardManager,
-                new FindPrimary(shardName, true), shardInitializationTimeout);
-
-        return future.transform(new Mapper<Object, PrimaryShardInfo>() {
-            @Override
-            public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
-                if (response instanceof RemotePrimaryShardFound) {
-                    LOG.debug("findPrimaryShardAsync received: {}", response);
-                    RemotePrimaryShardFound found = (RemotePrimaryShardFound)response;
-                    return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
-                } else if (response instanceof LocalPrimaryShardFound) {
-                    LOG.debug("findPrimaryShardAsync received: {}", response);
-                    LocalPrimaryShardFound found = (LocalPrimaryShardFound)response;
-                    return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
+
+        return executeOperationAsync(shardManager, new FindPrimary(shardName, true), shardInitializationTimeout)
+            .transform(new Mapper<>() {
+                @Override
+                public PrimaryShardInfo checkedApply(final Object response) throws UnknownMessageException {
+                    if (response instanceof RemotePrimaryShardFound found) {
+                        LOG.debug("findPrimaryShardAsync received: {}", found);
+                        return onPrimaryShardFound(shardName, found.getPrimaryPath(), found.getPrimaryVersion(), null);
+                    } else if (response instanceof LocalPrimaryShardFound found) {
+                        LOG.debug("findPrimaryShardAsync received: {}", found);
+                        return onPrimaryShardFound(shardName, found.getPrimaryPath(), DataStoreVersions.CURRENT_VERSION,
                             found.getLocalShardDataTree());
-                } else if (response instanceof NotInitializedException) {
-                    throw (NotInitializedException)response;
-                } else if (response instanceof PrimaryNotFoundException) {
-                    throw (PrimaryNotFoundException)response;
-                } else if (response instanceof NoShardLeaderException) {
-                    throw (NoShardLeaderException)response;
-                }
+                    } else if (response instanceof NotInitializedException notInitialized) {
+                        throw notInitialized;
+                    } else if (response instanceof PrimaryNotFoundException primaryNotFound) {
+                        throw primaryNotFound;
+                    } else if (response instanceof NoShardLeaderException noShardLeader) {
+                        throw noShardLeader;
+                    }
 
-                throw new UnknownMessageException(String.format(
+                    throw new UnknownMessageException(String.format(
                         "FindPrimary returned unkown response: %s", response));
-            }
-        }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
+                }
+            }, FIND_PRIMARY_FAILURE_TRANSFORMER, getClientDispatcher());
     }
 
     private PrimaryShardInfo onPrimaryShardFound(final String shardName, final String primaryActorPath,
-            final short primaryVersion, final DataTree localShardDataTree) {
-        ActorSelection actorSelection = actorSystem.actorSelection(primaryActorPath);
-        PrimaryShardInfo info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
+            final short primaryVersion, final ReadOnlyDataTree localShardDataTree) {
+        final var actorSelection = actorSystem.actorSelection(primaryActorPath);
+        final var info = localShardDataTree == null ? new PrimaryShardInfo(actorSelection, primaryVersion) :
             new PrimaryShardInfo(actorSelection, primaryVersion, localShardDataTree);
         primaryShardInfoCache.putSuccessful(shardName, info);
         return info;
@@ -258,15 +272,13 @@ public class ActorUtils {
      *         specified by the shardName
      */
     public Optional<ActorRef> findLocalShard(final String shardName) {
-        Object result = executeOperation(shardManager, new FindLocalShard(shardName, false));
-
-        if (result instanceof LocalShardFound) {
-            LocalShardFound found = (LocalShardFound) result;
+        final var result = executeOperation(shardManager, new FindLocalShard(shardName, false));
+        if (result instanceof LocalShardFound found) {
             LOG.debug("Local shard found {}", found.getPath());
             return Optional.of(found.getPath());
         }
 
-        return Optional.absent();
+        return Optional.empty();
     }
 
     /**
@@ -276,27 +288,23 @@ public class ActorUtils {
      * @param shardName the name of the local shard that needs to be found
      */
     public Future<ActorRef> findLocalShardAsync(final String shardName) {
-        Future<Object> future = executeOperationAsync(shardManager,
-                new FindLocalShard(shardName, true), shardInitializationTimeout);
-
-        return future.map(new Mapper<Object, ActorRef>() {
-            @Override
-            public ActorRef checkedApply(final Object response) throws Throwable {
-                if (response instanceof LocalShardFound) {
-                    LocalShardFound found = (LocalShardFound)response;
-                    LOG.debug("Local shard found {}", found.getPath());
-                    return found.getPath();
-                } else if (response instanceof NotInitializedException) {
-                    throw (NotInitializedException)response;
-                } else if (response instanceof LocalShardNotFound) {
-                    throw new LocalShardNotFoundException(
+        return executeOperationAsync(shardManager, new FindLocalShard(shardName, true), shardInitializationTimeout)
+            .map(new Mapper<>() {
+                @Override
+                public ActorRef checkedApply(final Object response) throws Throwable {
+                    if (response instanceof LocalShardFound found) {
+                        LOG.debug("Local shard found {}", found.getPath());
+                        return found.getPath();
+                    } else if (response instanceof NotInitializedException) {
+                        throw (NotInitializedException)response;
+                    } else if (response instanceof LocalShardNotFound) {
+                        throw new LocalShardNotFoundException(
                             String.format("Local shard for %s does not exist.", shardName));
-                }
+                    }
 
-                throw new UnknownMessageException(String.format(
-                        "FindLocalShard returned unkown response: %s", response));
-            }
-        }, getClientDispatcher());
+                    throw new UnknownMessageException("FindLocalShard returned unkown response: " + response);
+                }
+            }, getClientDispatcher());
     }
 
     /**
@@ -392,7 +400,7 @@ public class ActorUtils {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void shutdown() {
-        FiniteDuration duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
+        final var duration = datastoreContext.getShardRaftConfig().getElectionTimeOutInterval().$times(3);
         try {
             Await.ready(Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE), duration);
         } catch (Exception e) {
@@ -414,15 +422,15 @@ public class ActorUtils {
     public void broadcast(final Function<Short, Object> messageSupplier, final Class<?> messageClass) {
         for (final String shardName : configuration.getAllShardNames()) {
 
-            Future<PrimaryShardInfo> primaryFuture = findPrimaryShardAsync(shardName);
-            primaryFuture.onComplete(new OnComplete<PrimaryShardInfo>() {
+            final var primaryFuture = findPrimaryShardAsync(shardName);
+            primaryFuture.onComplete(new OnComplete<>() {
                 @Override
                 public void onComplete(final Throwable failure, final PrimaryShardInfo primaryShardInfo) {
                     if (failure != null) {
                         LOG.warn("broadcast failed to send message {} to shard {}", messageClass.getSimpleName(),
                             shardName, failure);
                     } else {
-                        Object message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
+                        final var message = messageSupplier.apply(primaryShardInfo.getPrimaryShardVersion());
                         primaryShardInfo.getPrimaryShardActor().tell(message, ActorRef.noSender());
                     }
                 }
@@ -456,7 +464,7 @@ public class ActorUtils {
                 return false;
             }
 
-            String hostPort = path.substring(pathAtIndex + 1, slashIndex);
+            final var hostPort = path.substring(pathAtIndex + 1, slashIndex);
             return hostPort.equals(selfAddressHostPort);
 
         } else {
@@ -477,8 +485,8 @@ public class ActorUtils {
     }
 
     public Timer getOperationTimer(final String dataStoreType, final String operationName) {
-        final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
-                operationName, METRIC_RATE);
+        final var rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType, operationName,
+            METRIC_RATE);
         return metricRegistry.timer(rate);
     }
 
@@ -500,6 +508,14 @@ public class ActorUtils {
         return txRateLimiter.getTxCreationLimit();
     }
 
+    public long getAskTimeoutExceptionCount() {
+        return askTimeoutCounter.sum();
+    }
+
+    public void resetAskTimeoutExceptionCount() {
+        askTimeoutCounter.reset();
+    }
+
     /**
      * Try to acquire a transaction creation permit. Will block if no permits are available.
      */
@@ -523,11 +539,11 @@ public class ActorUtils {
      * @return the dispatcher
      */
     public ExecutionContext getClientDispatcher() {
-        return this.dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
+        return dispatchers.getDispatcher(Dispatchers.DispatcherType.Client);
     }
 
     public String getNotificationDispatcherPath() {
-        return this.dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
+        return dispatchers.getDispatcherPath(Dispatchers.DispatcherType.Notification);
     }
 
     public Configuration getConfiguration() {
@@ -539,11 +555,13 @@ public class ActorUtils {
     }
 
     protected Future<Object> doAsk(final ActorRef actorRef, final Object message, final Timeout timeout) {
-        return ask(actorRef, message, timeout);
+        return Patterns.ask(actorRef, message, timeout);
     }
 
     protected Future<Object> doAsk(final ActorSelection actorRef, final Object message, final Timeout timeout) {
-        return ask(actorRef, message, timeout);
+        final var ret = Patterns.ask(actorRef, message, timeout);
+        ret.onComplete(askTimeoutCounter, askTimeoutCounter);
+        return ret;
     }
 
     public PrimaryShardInfoFutureCache getPrimaryShardInfoCache() {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ClusterUtils.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ClusterUtils.java
deleted file mode 100644 (file)
index dea8320..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.utils;
-
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Utils for encoding prefix shard name.
- */
-public final class ClusterUtils {
-    private static final Logger LOG = LoggerFactory.getLogger(ClusterUtils.class);
-
-    // id for the shard used to store prefix configuration
-    public static final String PREFIX_CONFIG_SHARD_ID = "prefix-configuration-shard";
-
-    public static final QName PREFIX_SHARDS_QNAME =
-            QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:prefix-shard-configuration",
-                    "2017-01-10", "prefix-shards").intern();
-    public static final QName SHARD_LIST_QNAME =
-            QName.create(PREFIX_SHARDS_QNAME, "shard").intern();
-    public static final QName SHARD_PREFIX_QNAME =
-            QName.create(PREFIX_SHARDS_QNAME, "prefix").intern();
-    public static final QName SHARD_REPLICAS_QNAME =
-            QName.create(PREFIX_SHARDS_QNAME, "replicas").intern();
-    public static final QName SHARD_REPLICA_QNAME =
-            QName.create(PREFIX_SHARDS_QNAME, "replica").intern();
-
-    public static final YangInstanceIdentifier PREFIX_SHARDS_PATH =
-            YangInstanceIdentifier.of(PREFIX_SHARDS_QNAME).toOptimized();
-    public static final YangInstanceIdentifier SHARD_LIST_PATH =
-            PREFIX_SHARDS_PATH.node(SHARD_LIST_QNAME).toOptimized();
-
-    private ClusterUtils() {
-    }
-
-    public static ShardIdentifier getShardIdentifier(final MemberName memberName, final DOMDataTreeIdentifier prefix) {
-        final String type;
-        switch (prefix.getDatastoreType()) {
-            case OPERATIONAL:
-                type = "operational";
-                break;
-            case CONFIGURATION:
-                type = "config";
-                break;
-            default:
-                type = prefix.getDatastoreType().name();
-                LOG.warn("Unknown data store type {}", type);
-        }
-
-        return ShardIdentifier.create(getCleanShardName(prefix.getRootIdentifier()), memberName, type);
-    }
-
-    /**
-     * Returns an encoded shard name based on the provided path that should doesn't contain characters that cannot be
-     * present in akka actor paths.
-     *
-     * @param path Path on which to base the shard name
-     * @return encoded name that doesn't contain characters that cannot be in actor path.
-     */
-    public static String getCleanShardName(final YangInstanceIdentifier path) {
-        if (path.isEmpty()) {
-            return "default";
-        }
-
-        final StringBuilder builder = new StringBuilder();
-        // TODO need a better mapping that includes namespace, but we'll need to cleanup the string beforehand
-        // we have to fight both javax and akka url path restrictions..
-        path.getPathArguments().forEach(p -> {
-            builder.append(p.getNodeType().getLocalName());
-            if (p instanceof NodeIdentifierWithPredicates) {
-                builder.append("-key_");
-                ((NodeIdentifierWithPredicates) p).getKeyValues().forEach((key, value) -> {
-                    builder.append(key.getLocalName());
-                    builder.append(value);
-                    builder.append("-");
-                });
-                builder.append("_");
-            }
-            builder.append("!");
-        });
-        return builder.toString();
-    }
-}
index 9dc308e209573a073a656c6f00fa0739746913e6..5b073e856f6f163daf7af51f197f4b194ee04016 100644 (file)
@@ -16,7 +16,7 @@ import javax.xml.stream.XMLStreamException;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,21 +54,21 @@ public final class DataTreeModificationOutput {
                 output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
                 output.writeByte('\n');
             } catch (IOException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
 
         @Override
-        public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
+        public void merge(final PathArgument child, final NormalizedNode data) {
             outputPathAndNode("MERGE", child, data);
         }
 
         @Override
-        public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
+        public void write(final PathArgument child, final NormalizedNode data) {
             outputPathAndNode("WRITE", child, data);
         }
 
-        private void outputPathAndNode(final String name, final PathArgument child, final NormalizedNode<?, ?> data) {
+        private void outputPathAndNode(final String name, final PathArgument child, final NormalizedNode data) {
             try {
                 output.writeByte('\n');
                 output.write(name.getBytes(StandardCharsets.UTF_8));
@@ -78,7 +78,7 @@ public final class DataTreeModificationOutput {
                 NormalizedNodeXMLOutput.toStream(output, data);
                 output.writeByte('\n');
             } catch (IOException | XMLStreamException e) {
-                throw new RuntimeException(e);
+                throw new IllegalStateException(e);
             }
         }
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ImmutableUnsignedLongSet.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ImmutableUnsignedLongSet.java
new file mode 100644 (file)
index 0000000..3934871
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.ImmutableSortedSet;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObject;
+
+@Beta
+public final class ImmutableUnsignedLongSet extends UnsignedLongSet implements Immutable, WritableObject {
+    // Do not all
+    private static final int ARRAY_MAX_ELEMENTS = 4096;
+
+    private static final @NonNull ImmutableUnsignedLongSet EMPTY =
+        new ImmutableUnsignedLongSet(ImmutableSortedSet.of());
+
+    private ImmutableUnsignedLongSet(final NavigableSet<Entry> ranges) {
+        super(ranges);
+    }
+
+    static @NonNull ImmutableUnsignedLongSet copyOf(final MutableUnsignedLongSet mutable) {
+        if (mutable.isEmpty()) {
+            return of();
+        }
+        if (mutable.rangeSize() <= ARRAY_MAX_ELEMENTS) {
+            return new ImmutableUnsignedLongSet(ImmutableSortedSet.copyOfSorted(mutable.trustedRanges()));
+        }
+        return new ImmutableUnsignedLongSet(new TreeSet<>(mutable.trustedRanges()));
+    }
+
+    public static @NonNull ImmutableUnsignedLongSet of() {
+        return EMPTY;
+    }
+
+    @Override
+    public ImmutableUnsignedLongSet immutableCopy() {
+        return this;
+    }
+
+    public static @NonNull ImmutableUnsignedLongSet readFrom(final DataInput in) throws IOException {
+        return readFrom(in, in.readInt());
+    }
+
+    public static @NonNull ImmutableUnsignedLongSet readFrom(final DataInput in, final int size) throws IOException {
+        if (size == 0) {
+            return EMPTY;
+        }
+
+        final NavigableSet<Entry> ranges;
+        if (size <= ARRAY_MAX_ELEMENTS) {
+            final var entries = new ArrayList<Entry>(size);
+            for (int i = 0; i < size; ++i) {
+                entries.add(Entry.readUnsigned(in));
+            }
+            ranges = ImmutableSortedSet.copyOf(entries);
+        } else {
+            ranges = new TreeSet<>();
+            for (int i = 0; i < size; ++i) {
+                ranges.add(Entry.readUnsigned(in));
+            }
+        }
+        return new ImmutableUnsignedLongSet(ranges);
+    }
+
+    @Override
+    public void writeTo(final DataOutput out) throws IOException {
+        out.writeInt(rangeSize());
+        writeRanges(out);
+    }
+
+    public void writeRangesTo(final @NonNull DataOutput out, final int size) throws IOException {
+        final int rangeSize = rangeSize();
+        if (size != rangeSize) {
+            throw new IOException("Mismatched size: expected " + rangeSize + ", got " + size);
+        }
+        writeRanges(out);
+    }
+
+    private void writeRanges(final @NonNull DataOutput out) throws IOException {
+        for (var range : trustedRanges()) {
+            range.writeUnsigned(out);
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/MutableUnsignedLongSet.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/MutableUnsignedLongSet.java
new file mode 100644 (file)
index 0000000..d225033
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.ImmutableRangeSet;
+import com.google.common.collect.Range;
+import com.google.common.primitives.UnsignedLong;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Mutable;
+
+@Beta
+public final class MutableUnsignedLongSet extends UnsignedLongSet implements Mutable {
+    MutableUnsignedLongSet(final TreeSet<Entry> ranges) {
+        super(ranges);
+    }
+
+    public static @NonNull MutableUnsignedLongSet of() {
+        return new MutableUnsignedLongSet(new TreeSet<>());
+    }
+
+    public static @NonNull MutableUnsignedLongSet of(final long... ulongs) {
+        final var ret = MutableUnsignedLongSet.of();
+        for (long longBits : ulongs) {
+            ret.add(longBits);
+        }
+        return ret;
+    }
+
+    @Override
+    public ImmutableUnsignedLongSet immutableCopy() {
+        return ImmutableUnsignedLongSet.copyOf(this);
+    }
+
+    public void add(final long longBits) {
+        addOne(trustedRanges(), Entry.of(longBits));
+    }
+
+    public void addAll(final UnsignedLongSet other) {
+        final var ranges = trustedRanges();
+        for (var range : other.trustedRanges()) {
+            if (range.lowerBits == range.upperBits) {
+                addOne(ranges, range);
+            } else {
+                addRange(ranges, range);
+            }
+        }
+    }
+
+    private static void addOne(final NavigableSet<Entry> ranges, final Entry range) {
+        final long longBits = range.lowerBits;
+
+        // We need Iterator.remove() to perform efficient merge below
+        final var headIt = ranges.headSet(range, true).descendingIterator();
+        if (headIt.hasNext()) {
+            final var head = headIt.next();
+            if (Long.compareUnsigned(head.upperBits, longBits) >= 0) {
+                // Already contained, this is a no-op
+                return;
+            }
+
+            // Merge into head entry if possible
+            if (head.upperBits + 1 == longBits) {
+                // We will be replacing head
+                headIt.remove();
+
+                // Potentially merge head entry and tail entry
+                final var tailIt = ranges.tailSet(range, false).iterator();
+                if (tailIt.hasNext()) {
+                    final var tail = tailIt.next();
+                    if (tail.lowerBits - 1 == longBits) {
+                        // Update tail.lowerBits to include contents of head
+                        tailIt.remove();
+                        ranges.add(tail.withLower(head.lowerBits));
+                        return;
+                    }
+                }
+
+                // Update head.upperBits
+                ranges.add(head.withUpper(longBits));
+                return;
+            }
+        }
+
+        final var tailIt = ranges.tailSet(range, false).iterator();
+        if (tailIt.hasNext()) {
+            final var tail = tailIt.next();
+            // Merge into tail entry if possible
+            if (tail.lowerBits - 1 == longBits) {
+                // Update tail.lowerBits
+                tailIt.remove();
+                ranges.add(tail.withLower(longBits));
+                return;
+            }
+        }
+
+        // No luck, store a new entry
+        ranges.add(range);
+    }
+
+    private static void addRange(final NavigableSet<Entry> ranges, final Entry range) {
+        // If the start of the range is already covered by an existing range, we can expand that
+        final var headIt = ranges.headSet(range, true).descendingIterator();
+        final boolean hasFloor = headIt.hasNext();
+        if (hasFloor) {
+            final var floor = headIt.next();
+            if (Long.compareUnsigned(floor.upperBits, range.upperBits) < 0
+                && Long.compareUnsigned(floor.upperBits + 1, range.lowerBits) >= 0) {
+                headIt.remove();
+                ranges.add(expandFloor(ranges, floor, range.upperBits));
+                return;
+            }
+        }
+
+        // If the end of the range is already covered by an existing range, we can expand that
+        final var tailIt = ranges.headSet(Entry.of(range.upperBits), true).descendingIterator();
+        if (tailIt.hasNext()) {
+            final var upper = tailIt.next();
+            tailIt.remove();
+
+            // Quick check: if we did not find a lower range at all, we might be expanding the entire span, in which
+            // case upper needs to become the first entry
+            if (!hasFloor) {
+                ranges.headSet(upper, false).clear();
+            }
+
+            ranges.add(expandCeiling(ranges, upper, range.lowerBits, range.upperBits));
+            return;
+        }
+
+        // No luck, insert
+        ranges.add(range);
+    }
+
+    private static @NonNull Entry expandFloor(final NavigableSet<Entry> ranges, final Entry floor,
+            final long upperBits) {
+        // Acquire any ranges after floor and clean them up
+        final var tailIt = ranges.tailSet(floor, false).iterator();
+        final long nextLower = upperBits + 1;
+        while (tailIt.hasNext()) {
+            final var tail = tailIt.next();
+            if (Long.compareUnsigned(tail.lowerBits, nextLower) > 0) {
+                // There is gap, nothing more to cleanup
+                break;
+            }
+
+            // We can merge this entry into floor...
+            tailIt.remove();
+
+            if (Long.compareUnsigned(tail.upperBits, nextLower) >= 0) {
+                // ... but we need to expand floor accordingly and after that we are done
+                return floor.withUpper(tail.upperBits);
+            }
+        }
+
+        // Expand floor to include this range and we are done
+        return floor.withUpper(upperBits);
+    }
+
+    private static @NonNull Entry expandCeiling(final NavigableSet<Entry> ranges, final Entry ceiling,
+            final long lowerBits, final long upperBits) {
+        if (Long.compareUnsigned(ceiling.upperBits, upperBits) >= 0) {
+            // Upper end is already covered
+            return ceiling.withLower(lowerBits);
+        }
+
+        // We are expanding the entry's upper boundary, we need to check if we need to coalesce following entries
+        long newUpper = upperBits;
+        final var tailIt = ranges.tailSet(ceiling, false).iterator();
+        if (tailIt.hasNext()) {
+            final var tail = tailIt.next();
+            if (Long.compareUnsigned(tail.lowerBits, newUpper + 1) <= 0) {
+                tailIt.remove();
+                newUpper = tail.upperBits;
+            }
+        }
+
+        return Entry.of(lowerBits, newUpper);
+    }
+
+    // Provides compatibility with RangeSet<UnsignedLong> using [lower, upper + 1)
+    public ImmutableRangeSet<UnsignedLong> toRangeSet() {
+        return ImmutableRangeSet.copyOf(Collections2.transform(trustedRanges(), entry -> Range.closedOpen(
+            UnsignedLong.fromLongBits(entry.lowerBits), UnsignedLong.fromLongBits(entry.upperBits + 1))));
+    }
+}
index 8d5994ea399de7f1e95f0724bc57dd185f9e01a4..4a17978f1ca6b81ff737eaa838886c82e29caa1e 100644 (file)
@@ -12,60 +12,59 @@ import java.util.Optional;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public final class NormalizedNodeAggregator {
     private final YangInstanceIdentifier rootIdentifier;
-    private final List<Optional<NormalizedNode<?, ?>>> nodes;
+    private final List<Optional<NormalizedNode>> nodes;
     private final DataTree dataTree;
 
     private NormalizedNodeAggregator(final YangInstanceIdentifier rootIdentifier,
-            final List<Optional<NormalizedNode<?, ?>>> nodes, final SchemaContext schemaContext,
+            final List<Optional<NormalizedNode>> nodes, final EffectiveModelContext schemaContext,
             final LogicalDatastoreType logicalDatastoreType) {
         this.rootIdentifier = rootIdentifier;
         this.nodes = nodes;
-        this.dataTree = new InMemoryDataTreeFactory().create(
-            logicalDatastoreType == LogicalDatastoreType.CONFIGURATION ? DataTreeConfiguration.DEFAULT_CONFIGURATION
-                    : DataTreeConfiguration.DEFAULT_OPERATIONAL);
-        this.dataTree.setSchemaContext(schemaContext);
+        dataTree = new InMemoryDataTreeFactory().create(logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
+            ? DataTreeConfiguration.DEFAULT_CONFIGURATION : DataTreeConfiguration.DEFAULT_OPERATIONAL);
+        dataTree.setEffectiveModelContext(schemaContext);
     }
 
     /**
      * Combine data from all the nodes in the list into a tree with root as rootIdentifier.
      */
-    public static Optional<NormalizedNode<?,?>> aggregate(final YangInstanceIdentifier rootIdentifier,
-            final List<Optional<NormalizedNode<?, ?>>> nodes, final SchemaContext schemaContext,
+    public static Optional<NormalizedNode> aggregate(final YangInstanceIdentifier rootIdentifier,
+            final List<Optional<NormalizedNode>> nodes, final EffectiveModelContext schemaContext,
             final LogicalDatastoreType logicalDatastoreType) throws DataValidationFailedException {
         return new NormalizedNodeAggregator(rootIdentifier, nodes, schemaContext, logicalDatastoreType).aggregate();
     }
 
-    private Optional<NormalizedNode<?,?>> aggregate() throws DataValidationFailedException {
-        return combine().getRootNode();
-    }
-
-    private NormalizedNodeAggregator combine() throws DataValidationFailedException {
+    private Optional<NormalizedNode> aggregate() throws DataValidationFailedException {
         final DataTreeModification mod = dataTree.takeSnapshot().newModification();
+        boolean nodePresent = false;
 
-        for (final Optional<NormalizedNode<?,?>> node : nodes) {
+        for (final Optional<NormalizedNode> node : nodes) {
             if (node.isPresent()) {
-                mod.merge(rootIdentifier, node.get());
+                mod.merge(rootIdentifier, node.orElseThrow());
+                nodePresent = true;
             }
         }
+
+        if (!nodePresent) {
+            return Optional.empty();
+        }
+
+
         mod.ready();
         dataTree.validate(mod);
         final DataTreeCandidate candidate = dataTree.prepare(mod);
         dataTree.commit(candidate);
 
-        return this;
-    }
-
-    private Optional<NormalizedNode<?, ?>> getRootNode() {
         return dataTree.takeSnapshot().readNode(rootIdentifier);
     }
 }
index 49a0f47a4145df163465cb6bc475e091dc45172d..643d107865ab552f80261a77a81d0a8f1b889e60 100644 (file)
@@ -40,7 +40,7 @@ public final class NormalizedNodeXMLOutput {
     private NormalizedNodeXMLOutput() {
     }
 
-    public static void toStream(OutputStream outStream, NormalizedNode<?, ?> node)
+    public static void toStream(final OutputStream outStream, final NormalizedNode node)
             throws XMLStreamException, IOException {
         XMLStreamWriter xmlWriter = XOF.createXMLStreamWriter(outStream);
 
@@ -53,7 +53,7 @@ public final class NormalizedNodeXMLOutput {
         }
     }
 
-    public static void toFile(File file, NormalizedNode<?, ?> node) {
+    public static void toFile(final File file, final NormalizedNode node) {
         try (FileOutputStream outStream = new FileOutputStream(file)) {
             toStream(outStream, node);
         } catch (IOException | XMLStreamException e) {
index 3d4476a972e6ee8491da4143c6fb6174bdc2d366..b3291aad2552bbe23bd9390e6768f6ff8287c306 100644 (file)
@@ -10,8 +10,8 @@ package org.opendaylight.controller.cluster.datastore.utils;
 import akka.dispatch.Futures;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import scala.concurrent.Future;
 
@@ -23,16 +23,15 @@ import scala.concurrent.Future;
 public class PrimaryShardInfoFutureCache {
     private final Cache<String, Future<PrimaryShardInfo>> primaryShardInfoCache = CacheBuilder.newBuilder().build();
 
-    @Nullable
-    public Future<PrimaryShardInfo> getIfPresent(@Nonnull String shardName) {
+    public @Nullable Future<PrimaryShardInfo> getIfPresent(@NonNull String shardName) {
         return primaryShardInfoCache.getIfPresent(shardName);
     }
 
-    public void putSuccessful(@Nonnull String shardName, @Nonnull PrimaryShardInfo info) {
+    public void putSuccessful(@NonNull String shardName, @NonNull PrimaryShardInfo info) {
         primaryShardInfoCache.put(shardName, Futures.successful(info));
     }
 
-    public void remove(@Nonnull String shardName) {
+    public void remove(@NonNull String shardName) {
         primaryShardInfoCache.invalidate(shardName);
     }
 }
index d0699fbcbaf807cd8ce4f421b692cebbd3a77997..afa17aeac6079eda4c7b688d5c20153e73d8c60b 100644 (file)
@@ -5,25 +5,25 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ForwardingObject;
 import java.io.IOException;
 import java.util.Optional;
-import org.opendaylight.controller.cluster.datastore.node.utils.transformer.NormalizedNodePruner;
+import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -31,87 +31,131 @@ import org.slf4j.LoggerFactory;
  * The PruningDataTreeModification first removes all entries from the data which do not belong in the schemaContext
  * before delegating it to the actual DataTreeModification.
  */
-public class PruningDataTreeModification extends ForwardingObject implements DataTreeModification {
+public abstract class PruningDataTreeModification extends ForwardingObject implements DataTreeModification {
+    /**
+     * A PruningDataTreeModification which always performs pruning before attempting an operation. This sacrifices
+     * performance to ensure all data has passed through the pruner -- such that data adaptations are performed.
+     */
+    public static final class Proactive extends PruningDataTreeModification {
+        public Proactive(final DataTreeModification delegate, final DataTree dataTree,
+                final ReusableNormalizedNodePruner pruner) {
+            super(delegate, dataTree, pruner);
+        }
 
-    private static final Logger LOG = LoggerFactory.getLogger(PruningDataTreeModification.class);
-    private DataTreeModification delegate;
-    private final SchemaContext schemaContext;
-    private final DataTree dataTree;
+        @Override
+        public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+            pruneAndMergeNode(path, data);
+        }
 
-    public PruningDataTreeModification(final DataTreeModification delegate, final DataTree dataTree,
-            final SchemaContext schemaContext) {
-        this.delegate = Preconditions.checkNotNull(delegate);
-        this.dataTree = Preconditions.checkNotNull(dataTree);
-        this.schemaContext = Preconditions.checkNotNull(schemaContext);
-    }
+        @Override
+        public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+            pruneAndWriteNode(path, data);
+        }
 
-    @Override
-    public DataTreeModification delegate() {
-        return delegate;
+        @Override
+        PruningDataTreeModification createNew(final DataTreeModification delegate, final DataTree dataTree,
+                final ReusableNormalizedNodePruner pruner) {
+            return new Proactive(delegate, dataTree, pruner);
+        }
     }
 
-    @Override
-    public void delete(final YangInstanceIdentifier yangInstanceIdentifier) {
-        try {
-            delegate.delete(yangInstanceIdentifier);
-        } catch (SchemaValidationFailedException e) {
-            LOG.warn("Node at path : {} does not exist ignoring delete", yangInstanceIdentifier);
+    /**
+     * A PruningDataTreeModification which performs pruning only when an operation results in an
+     * {@link SchemaValidationFailedException}. This offers superior performance in the normal case of not needing
+     * pruning.
+     */
+    public static final class Reactive extends PruningDataTreeModification {
+        public Reactive(final DataTreeModification delegate, final DataTree dataTree,
+                final ReusableNormalizedNodePruner pruner) {
+            super(delegate, dataTree, pruner);
         }
-    }
 
-    @Override
-    public void merge(final YangInstanceIdentifier yangInstanceIdentifier, final NormalizedNode<?, ?> normalizedNode) {
-        try {
-            if (YangInstanceIdentifier.EMPTY.equals(yangInstanceIdentifier)) {
-                pruneAndMergeNode(yangInstanceIdentifier, normalizedNode);
-            } else {
-                delegate.merge(yangInstanceIdentifier, normalizedNode);
+        @Override
+        public void merge(final YangInstanceIdentifier path, final NormalizedNode data) {
+            if (path.isEmpty()) {
+                pruneAndMergeNode(path, data);
+                return;
             }
-        } catch (SchemaValidationFailedException e) {
-            LOG.warn("Node at path {} was pruned during merge due to validation error: {}",
-                    yangInstanceIdentifier, e.getMessage());
 
-            pruneAndMergeNode(yangInstanceIdentifier, normalizedNode);
+            try {
+                delegate().merge(path, data);
+            } catch (SchemaValidationFailedException e) {
+                LOG.warn("Node at path {} was pruned during merge due to validation error: {}", path, e.getMessage());
+                pruneAndMergeNode(path, data);
+            }
         }
 
-    }
+        @Override
+        public void write(final YangInstanceIdentifier path, final NormalizedNode data) {
+            if (path.isEmpty()) {
+                pruneAndWriteNode(path, data);
+                return;
+            }
 
-    private void pruneAndMergeNode(final YangInstanceIdentifier yangInstanceIdentifier,
-            final NormalizedNode<?, ?> normalizedNode) {
-        NormalizedNode<?,?> pruned = pruneNormalizedNode(yangInstanceIdentifier, normalizedNode);
+            try {
+                delegate().write(path, data);
+            } catch (SchemaValidationFailedException e) {
+                LOG.warn("Node at path : {} was pruned during write due to validation error: {}", path, e.getMessage());
+                pruneAndWriteNode(path, data);
+            }
+        }
 
-        if (pruned != null) {
-            delegate.merge(yangInstanceIdentifier, pruned);
+        @Override
+        PruningDataTreeModification createNew(final DataTreeModification delegate, final DataTree dataTree,
+                final ReusableNormalizedNodePruner pruner) {
+            return new Reactive(delegate, dataTree, pruner);
         }
     }
 
+    private static final Logger LOG = LoggerFactory.getLogger(PruningDataTreeModification.class);
+
+    private final ReusableNormalizedNodePruner pruner;
+    private final DataTree dataTree;
+
+    private DataTreeModification delegate;
+
+    PruningDataTreeModification(final DataTreeModification delegate, final DataTree dataTree,
+            final ReusableNormalizedNodePruner pruner) {
+        this.delegate = requireNonNull(delegate);
+        this.dataTree = requireNonNull(dataTree);
+        this.pruner = requireNonNull(pruner);
+    }
+
+    @Override
+    protected final DataTreeModification delegate() {
+        return delegate;
+    }
+
+    @Override
+    public final EffectiveModelContext modelContext() {
+        return delegate.modelContext();
+    }
+
     @Override
-    public void write(final YangInstanceIdentifier yangInstanceIdentifier, final NormalizedNode<?, ?> normalizedNode) {
+    public final void delete(final YangInstanceIdentifier path) {
         try {
-            if (YangInstanceIdentifier.EMPTY.equals(yangInstanceIdentifier)) {
-                pruneAndWriteNode(yangInstanceIdentifier, normalizedNode);
-            } else {
-                delegate.write(yangInstanceIdentifier, normalizedNode);
-            }
+            delegate.delete(path);
         } catch (SchemaValidationFailedException e) {
-            LOG.warn("Node at path : {} was pruned during write due to validation error: {}",
-                    yangInstanceIdentifier, e.getMessage());
-
-            pruneAndWriteNode(yangInstanceIdentifier, normalizedNode);
+            LOG.warn("Node at path : {} does not exist ignoring delete", path);
         }
     }
 
-    private void pruneAndWriteNode(final YangInstanceIdentifier yangInstanceIdentifier,
-            final NormalizedNode<?, ?> normalizedNode) {
-        NormalizedNode<?,?> pruned = pruneNormalizedNode(yangInstanceIdentifier, normalizedNode);
+    final void pruneAndMergeNode(final YangInstanceIdentifier path, final NormalizedNode data) {
+        final NormalizedNode pruned = pruneNormalizedNode(path, data);
+        if (pruned != null) {
+            delegate.merge(path, pruned);
+        }
+    }
 
+    final void pruneAndWriteNode(final YangInstanceIdentifier path, final NormalizedNode data) {
+        final NormalizedNode pruned = pruneNormalizedNode(path, data);
         if (pruned != null) {
-            delegate.write(yangInstanceIdentifier, pruned);
+            delegate.write(path, pruned);
         }
     }
 
     @Override
-    public void ready() {
+    public final void ready() {
         try {
             delegate.ready();
         } catch (SchemaValidationFailedException e) {
@@ -124,33 +168,37 @@ public class PruningDataTreeModification extends ForwardingObject implements Dat
     }
 
     @Override
-    public void applyToCursor(final DataTreeModificationCursor dataTreeModificationCursor) {
+    public final void applyToCursor(final DataTreeModificationCursor dataTreeModificationCursor) {
         delegate.applyToCursor(dataTreeModificationCursor);
     }
 
     @Override
-    public Optional<NormalizedNode<?, ?>> readNode(final YangInstanceIdentifier yangInstanceIdentifier) {
+    public final Optional<NormalizedNode> readNode(final YangInstanceIdentifier yangInstanceIdentifier) {
         return delegate.readNode(yangInstanceIdentifier);
     }
 
     @Override
-    public DataTreeModification newModification() {
-        return new PruningDataTreeModification(delegate.newModification(), dataTree, schemaContext);
+    public final DataTreeModification newModification() {
+        return createNew(delegate.newModification(), dataTree, pruner.duplicate());
     }
 
     @VisibleForTesting
-    NormalizedNode<?, ?> pruneNormalizedNode(final YangInstanceIdentifier path, final NormalizedNode<?,?> input) {
-        NormalizedNodePruner pruner = new NormalizedNodePruner(path, schemaContext);
+    final NormalizedNode pruneNormalizedNode(final YangInstanceIdentifier path, final NormalizedNode input) {
+        pruner.initializeForPath(path);
         try {
             NormalizedNodeWriter.forStreamWriter(pruner).write(input);
         } catch (IOException ioe) {
             LOG.error("Unexpected IOException when pruning normalizedNode", ioe);
+            return null;
         }
 
-        return pruner.normalizedNode();
+        return pruner.getResult().orElse(null);
     }
 
-    private static class PruningDataTreeModificationCursor extends AbstractDataTreeModificationCursor {
+    abstract PruningDataTreeModification createNew(DataTreeModification delegate, DataTree dataTree,
+            ReusableNormalizedNodePruner pruner);
+
+    private static final class PruningDataTreeModificationCursor extends AbstractDataTreeModificationCursor {
         private final DataTreeModification toModification;
         private final PruningDataTreeModification pruningModification;
 
@@ -161,18 +209,18 @@ public class PruningDataTreeModification extends ForwardingObject implements Dat
         }
 
         @Override
-        public void write(final PathArgument child, final NormalizedNode<?, ?> data) {
-            YangInstanceIdentifier path = current().node(child);
-            NormalizedNode<?, ?> prunedNode = pruningModification.pruneNormalizedNode(path, data);
+        public void write(final PathArgument child, final NormalizedNode data) {
+            final YangInstanceIdentifier path = current().node(child);
+            final NormalizedNode prunedNode = pruningModification.pruneNormalizedNode(path, data);
             if (prunedNode != null) {
                 toModification.write(path, prunedNode);
             }
         }
 
         @Override
-        public void merge(final PathArgument child, final NormalizedNode<?, ?> data) {
-            YangInstanceIdentifier path = current().node(child);
-            NormalizedNode<?, ?> prunedNode = pruningModification.pruneNormalizedNode(path, data);
+        public void merge(final PathArgument child, final NormalizedNode data) {
+            final YangInstanceIdentifier path = current().node(child);
+            final NormalizedNode prunedNode = pruningModification.pruneNormalizedNode(path, data);
             if (prunedNode != null) {
                 toModification.merge(path, prunedNode);
             }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/RootScatterGather.java
new file mode 100644 (file)
index 0000000..d755adc
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.NonNullByDefault;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+
+/**
+ * Utility methods for dealing with datastore root {@link ContainerNode} with respect to module shards.
+ */
+public final class RootScatterGather {
+    @NonNullByDefault
+    public record ShardContainer<T>(T shard, ContainerNode container) {
+        public ShardContainer {
+            requireNonNull(shard);
+            requireNonNull(container);
+        }
+
+        @Override
+        public String toString() {
+            return MoreObjects.toStringHelper(this).add("shard", shard).toString();
+        }
+    }
+
+    private RootScatterGather() {
+        // Hidden on purpose
+    }
+
+    /**
+     * Check whether a {@link NormalizedNode} represents a root container and return it cast to {@link ContainerNode}.
+     *
+     * @param node a normalized node
+     * @return {@code node} cast to ContainerNode
+     * @throws NullPointerException if {@code node} is null
+     * @throws IllegalArgumentException if {@code node} is not a {@link ContainerNode}
+     */
+    public static @NonNull ContainerNode castRootNode(final NormalizedNode node) {
+        final var nonnull = requireNonNull(node);
+        checkArgument(nonnull instanceof ContainerNode, "Invalid root data %s", nonnull);
+        return (ContainerNode) nonnull;
+    }
+
+    /**
+     * Reconstruct root container from a set of constituents.
+     *
+     * @param actorUtils {@link ActorUtils} reference
+     * @param readFutures Consitutent read futures
+     * @return A composite future
+     */
+    public static @NonNull FluentFuture<Optional<NormalizedNode>> gather(final ActorUtils actorUtils,
+            final Stream<FluentFuture<Optional<NormalizedNode>>> readFutures) {
+        return FluentFuture.from(Futures.transform(
+            Futures.allAsList(readFutures.collect(ImmutableList.toImmutableList())), input -> {
+                try {
+                    return NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(), input,
+                        actorUtils.getSchemaContext(), actorUtils.getDatastoreContext().getLogicalStoreType());
+                } catch (DataValidationFailedException e) {
+                    throw new IllegalArgumentException("Failed to aggregate", e);
+                }
+            }, MoreExecutors.directExecutor()));
+    }
+
+    public static <T> @NonNull Stream<ShardContainer<T>> scatterAll(final ContainerNode rootNode,
+            final Function<PathArgument, T> childToShard, final Stream<T> allShards) {
+        final var builders = allShards
+            .collect(Collectors.toUnmodifiableMap(Function.identity(), unused -> ImmutableNodes.newContainerBuilder()));
+        for (var child : rootNode.body()) {
+            final var shard = childToShard.apply(child.name());
+            verifyNotNull(builders.get(shard), "Failed to find builder for %s", shard).addChild(child);
+        }
+        return streamContainers(rootNode.name(), builders);
+    }
+
+    /**
+     * Split root container into per-shard root containers.
+     *
+     * @param <T> Shard reference type
+     * @param rootNode Root container to be split up
+     * @param childToShard Mapping function from child {@link PathArgument} to shard reference
+     * @return Stream of {@link ShardContainer}s, one for each touched shard
+     */
+    public static <T> @NonNull Stream<ShardContainer<T>> scatterTouched(final ContainerNode rootNode,
+            final Function<PathArgument, T> childToShard) {
+        final var builders = new HashMap<T, ContainerNode.Builder>();
+        for (var child : rootNode.body()) {
+            builders.computeIfAbsent(childToShard.apply(child.name()), unused -> ImmutableNodes.newContainerBuilder())
+                .addChild(child);
+        }
+        return streamContainers(rootNode.name(), builders);
+    }
+
+    private static <T> @NonNull Stream<ShardContainer<T>> streamContainers(final NodeIdentifier rootId,
+            final Map<T, ContainerNode.Builder> builders) {
+        return builders.entrySet().stream()
+            .map(entry -> new ShardContainer<>(entry.getKey(), entry.getValue().withNodeIdentifier(rootId).build()));
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmap.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmap.java
new file mode 100644 (file)
index 0000000..ffef55d
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Maps;
+import com.google.common.primitives.UnsignedLong;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * A more efficient equivalent of {@code ImmutableMap<UnsignedLong, Boolean>}.
+ */
+@Beta
+public abstract class UnsignedLongBitmap implements Immutable {
+    @VisibleForTesting
+    static final class Regular extends UnsignedLongBitmap {
+        private final long[] keys;
+        private final boolean[] values;
+
+        Regular(final long[] keys, final boolean[] values) {
+            this.keys = requireNonNull(keys);
+            this.values = requireNonNull(values);
+            verify(keys.length == values.length);
+        }
+
+        @Override
+        public boolean isEmpty() {
+            return keys.length == 0;
+        }
+
+        @Override
+        public int size() {
+            return keys.length;
+        }
+
+        @Override
+        void writeEntriesTo(final DataOutput out) throws IOException {
+            for (int i = 0; i < keys.length; ++i) {
+                writeEntry(out, keys[i], values[i]);
+            }
+        }
+
+        @Override
+        StringBuilder appendEntries(final StringBuilder sb) {
+            final int last = keys.length - 1;
+            for (int i = 0; i < last; ++i) {
+                appendEntry(sb, keys[i], values[i]).append(", ");
+            }
+            return appendEntry(sb, keys[last], values[last]);
+        }
+
+        @Override
+        void putEntries(final HashMap<UnsignedLong, Boolean> ret) {
+            for (int i = 0; i < keys.length; ++i) {
+                ret.put(UnsignedLong.fromLongBits(keys[i]), values[i]);
+            }
+        }
+
+        @Override
+        public int hashCode() {
+            return Arrays.hashCode(keys) ^ Arrays.hashCode(values);
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (obj == this) {
+                return true;
+            }
+            if (!(obj instanceof Regular)) {
+                return false;
+            }
+            final var other = (Regular) obj;
+            return Arrays.equals(keys, other.keys) && Arrays.equals(values, other.values);
+        }
+    }
+
+    private static final class Singleton extends UnsignedLongBitmap {
+        private final long key;
+        private final boolean value;
+
+        Singleton(final long key, final boolean value) {
+            this.key = key;
+            this.value = value;
+        }
+
+        @Override
+        public boolean isEmpty() {
+            return false;
+        }
+
+        @Override
+        public int size() {
+            return 1;
+        }
+
+        @Override
+        void writeEntriesTo(final DataOutput out) throws IOException {
+            writeEntry(out, key, value);
+        }
+
+        @Override
+        StringBuilder appendEntries(final StringBuilder sb) {
+            return sb.append(Long.toUnsignedString(key)).append('=').append(value);
+        }
+
+        @Override
+        void putEntries(final HashMap<UnsignedLong, Boolean> ret) {
+            ret.put(UnsignedLong.fromLongBits(key), value);
+        }
+
+        @Override
+        public int hashCode() {
+            return Long.hashCode(key) ^ Boolean.hashCode(value);
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (obj == this) {
+                return true;
+            }
+            if (!(obj instanceof Singleton)) {
+                return false;
+            }
+            final var other = (Singleton) obj;
+            return key == other.key && value == other.value;
+        }
+    }
+
+    private static final @NonNull UnsignedLongBitmap EMPTY = new Regular(new long[0], new boolean[0]);
+
+    private UnsignedLongBitmap() {
+        // Hidden on purpose
+    }
+
+    public static @NonNull UnsignedLongBitmap of() {
+        return EMPTY;
+    }
+
+    public static @NonNull UnsignedLongBitmap of(final long keyBits, final boolean value) {
+        return new Singleton(keyBits, value);
+    }
+
+    public static @NonNull UnsignedLongBitmap copyOf(final Map<UnsignedLong, Boolean> map) {
+        final int size = map.size();
+        switch (size) {
+            case 0:
+                return of();
+            case 1:
+                final var entry = map.entrySet().iterator().next();
+                return of(entry.getKey().longValue(), entry.getValue());
+            default:
+                final var entries = new ArrayList<>(map.entrySet());
+                entries.sort(Comparator.comparing(Entry::getKey));
+
+                final var keys = new long[size];
+                final var values = new boolean[size];
+
+                int idx = 0;
+                for (var e : entries) {
+                    keys[idx] = e.getKey().longValue();
+                    values[idx] = e.getValue();
+                    ++idx;
+                }
+
+                return new Regular(keys, values);
+        }
+    }
+
+    public abstract boolean isEmpty();
+
+    public abstract int size();
+
+    public final @NonNull HashMap<UnsignedLong, Boolean> mutableCopy() {
+        final int size = size();
+        switch (size) {
+            case 0:
+                return new HashMap<>();
+            default:
+                final var ret = Maps.<UnsignedLong, Boolean>newHashMapWithExpectedSize(size);
+                putEntries(ret);
+                return ret;
+        }
+    }
+
+    public static @NonNull UnsignedLongBitmap readFrom(final @NonNull DataInput in, final int size) throws IOException {
+        switch (size) {
+            case 0:
+                return of();
+            case 1:
+                return new Singleton(WritableObjects.readLong(in), in.readBoolean());
+            default:
+                final var keys = new long[size];
+                final var values = new boolean[size];
+                for (int i = 0; i < size; ++i) {
+                    keys[i] = WritableObjects.readLong(in);
+                    values[i] = in.readBoolean();
+                }
+
+                // There should be no duplicates and the IDs need to be increasing
+                long prevKey = keys[0];
+                for (int i = 1; i < size; ++i) {
+                    final long key = keys[i];
+                    if (Long.compareUnsigned(prevKey, key) >= 0) {
+                        throw new IOException("Key " + Long.toUnsignedString(key) + " may not be used after key "
+                            + Long.toUnsignedString(prevKey));
+                    }
+                    prevKey = key;
+                }
+
+                return new Regular(keys, values);
+        }
+    }
+
+    public void writeEntriesTo(final @NonNull DataOutput out, final int size) throws IOException {
+        if (size != size()) {
+            throw new IOException("Mismatched size: expected " + size() + ", got " + size);
+        }
+        writeEntriesTo(out);
+    }
+
+    abstract void writeEntriesTo(@NonNull DataOutput out) throws IOException;
+
+    abstract StringBuilder appendEntries(StringBuilder sb);
+
+    abstract void putEntries(HashMap<UnsignedLong, Boolean> ret);
+
+    /**
+     * {@inheritDoc}
+     *
+     * <p>
+     * Implementations of this method return a deterministic value.
+     */
+    @Override
+    public abstract int hashCode();
+
+    @Override
+    public abstract boolean equals(Object obj);
+
+    @Override
+    public final String toString() {
+        return isEmpty() ? "{}" : appendEntries(new StringBuilder().append('{')).append('}').toString();
+    }
+
+    private static StringBuilder appendEntry(final StringBuilder sb, final long key, final boolean value) {
+        return sb.append(Long.toUnsignedString(key)).append('=').append(value);
+    }
+
+    private static void writeEntry(final @NonNull DataOutput out, final long key, final boolean value)
+            throws IOException {
+        // FIXME: This serialization format is what we inherited. We could do better by storing the boolean in
+        //        writeLong()'s flags. On the other had, we could also be writing longs by twos, which might be
+        //        benefitial.
+        WritableObjects.writeLong(out, key);
+        out.writeBoolean(value);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongRangeSet.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongRangeSet.java
deleted file mode 100644 (file)
index eeb39ea..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.utils;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableRangeSet;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
-import com.google.common.primitives.UnsignedLong;
-import org.opendaylight.yangtools.concepts.Mutable;
-
-/**
- * Utility {@link RangeSet}-like class, specialized for holding {@link UnsignedLong}. It does not directly implement
- * the {@link RangeSet} interface, but allows converting to and from it. Internal implementation takes advantage of
- * knowing that {@link UnsignedLong} is a discrete type and that it can be stored in a long.
- *
- * @author Robert Varga
- */
-@Beta
-public final class UnsignedLongRangeSet implements Mutable {
-    // FIXME: this is just to get us started
-    private final RangeSet<UnsignedLong> rangeset;
-
-    private UnsignedLongRangeSet(final RangeSet<UnsignedLong> rangeset) {
-        this.rangeset = Preconditions.checkNotNull(rangeset);
-    }
-
-    public static UnsignedLongRangeSet create() {
-        return new UnsignedLongRangeSet(TreeRangeSet.create());
-    }
-
-    public static UnsignedLongRangeSet create(final RangeSet<UnsignedLong> input) {
-        return new UnsignedLongRangeSet(TreeRangeSet.create(input));
-    }
-
-    public RangeSet<UnsignedLong> toImmutable() {
-        return ImmutableRangeSet.copyOf(rangeset);
-    }
-
-    public void add(final long longBits) {
-        add(UnsignedLong.fromLongBits(longBits));
-    }
-
-    public void add(final UnsignedLong value) {
-        rangeset.add(Range.closedOpen(value, UnsignedLong.ONE.plus(value)));
-    }
-
-    public boolean contains(final UnsignedLong value) {
-        return rangeset.contains(value);
-    }
-
-    public boolean contains(final long longBits) {
-        return contains(UnsignedLong.fromLongBits(longBits));
-    }
-
-    public UnsignedLongRangeSet copy() {
-        return new UnsignedLongRangeSet(TreeRangeSet.create(rangeset));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSet.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSet.java
new file mode 100644 (file)
index 0000000..59393a3
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.RangeSet;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.concepts.Immutable;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+/**
+ * A class holding an equivalent of {@code Set<UnsignedLong>}. It is geared towards efficiently tracking ranges of
+ * objects, similar to what a {@link RangeSet} would do.
+ *
+ * <p>
+ * Unlike a {@code RangeSet}, though, this class takes advantage of knowing that an unsigned long is a discrete unit
+ * and can be stored in a simple {@code long}.
+ *
+ * @author Robert Varga
+ */
+abstract class UnsignedLongSet {
+    @Beta
+    @VisibleForTesting
+    public static final class Entry implements Comparable<Entry>, Immutable {
+        public final long lowerBits;
+        public final long upperBits;
+
+        private Entry(final long lowerBits, final long upperBits) {
+            this.lowerBits = lowerBits;
+            this.upperBits = upperBits;
+        }
+
+        static @NonNull Entry of(final long longBits) {
+            return of(longBits, longBits);
+        }
+
+        static @NonNull Entry of(final long lowerBits, final long upperBits) {
+            return new Entry(lowerBits, upperBits);
+        }
+
+        @NonNull Entry withLower(final long newLowerBits) {
+            return of(newLowerBits, upperBits);
+        }
+
+        @NonNull Entry withUpper(final long newUpperBits) {
+            return of(lowerBits, newUpperBits);
+        }
+
+        // These two methods provide the same serialization format as the one we've used to serialize
+        // Range<UnsignedLong>
+        static @NonNull Entry readUnsigned(final DataInput in) throws IOException {
+            final byte hdr = WritableObjects.readLongHeader(in);
+            final long first = WritableObjects.readFirstLong(in, hdr);
+            final long second = WritableObjects.readSecondLong(in, hdr) - 1;
+            if (Long.compareUnsigned(first, second) > 0) {
+                throw new IOException("Lower endpoint " + Long.toUnsignedString(first) + " is greater than upper "
+                    + "endpoint " + Long.toUnsignedString(second));
+            }
+
+            return new Entry(first, second);
+        }
+
+        void writeUnsigned(final @NonNull DataOutput out) throws IOException {
+            WritableObjects.writeLongs(out, lowerBits, upperBits + 1);
+        }
+
+        @Override
+        @SuppressWarnings("checkstyle:parameterName")
+        public int compareTo(final Entry o) {
+            return Long.compareUnsigned(lowerBits, o.lowerBits);
+        }
+
+        @Override
+        public int hashCode() {
+            return Long.hashCode(lowerBits) * 31 + Long.hashCode(upperBits);
+        }
+
+        @Override
+        public boolean equals(final Object obj) {
+            if (obj == this) {
+                return true;
+            }
+            if (!(obj instanceof Entry)) {
+                return false;
+            }
+            final var other = (Entry) obj;
+            return lowerBits == other.lowerBits && upperBits == other.upperBits;
+        }
+
+        @Override
+        public String toString() {
+            return "[" + Long.toUnsignedString(lowerBits) + ".." + Long.toUnsignedString(upperBits) + "]";
+        }
+    }
+
+    // The idea is rather simple, we track a NavigableSet of range entries, ordered by their lower bound. This means
+    // that for a contains() operation we just need the first headSet() entry. For insert operations we just update
+    // either the lower bound or the upper bound of an existing entry. When we do, we also look at prev/next entry and
+    // if they are contiguous with the updated entry, we adjust the entry once more and remove the prev/next entry.
+    private final @NonNull NavigableSet<Entry> ranges;
+
+    UnsignedLongSet(final NavigableSet<Entry> ranges) {
+        this.ranges = requireNonNull(ranges);
+    }
+
+    public final boolean contains(final long longBits) {
+        final var head = ranges.floor(Entry.of(longBits));
+        return head != null
+            && Long.compareUnsigned(head.lowerBits, longBits) <= 0
+            && Long.compareUnsigned(head.upperBits, longBits) >= 0;
+    }
+
+    public final boolean isEmpty() {
+        return ranges.isEmpty();
+    }
+
+    public final int rangeSize() {
+        return ranges.size();
+    }
+
+    public abstract @NonNull ImmutableUnsignedLongSet immutableCopy();
+
+    public final @NonNull MutableUnsignedLongSet mutableCopy() {
+        return new MutableUnsignedLongSet(new TreeSet<>(ranges));
+    }
+
+    public final @NonNull NavigableSet<Entry> ranges() {
+        return Collections.unmodifiableNavigableSet(ranges);
+    }
+
+    final @NonNull NavigableSet<Entry> trustedRanges() {
+        return ranges;
+    }
+
+    @Override
+    public final int hashCode() {
+        return ranges.hashCode();
+    }
+
+    @Override
+    public final boolean equals(final Object obj) {
+        return obj == this || obj instanceof UnsignedLongSet && ranges.equals(((UnsignedLongSet) obj).ranges);
+    }
+
+    @Override
+    public final String toString() {
+        final var helper = MoreObjects.toStringHelper(this);
+
+        final int size = ranges.size();
+        switch (size) {
+            case 0:
+                break;
+            case 1:
+                helper.add("span", ranges.first());
+                break;
+            default:
+                helper.add("span", Entry.of(ranges.first().lowerBits, ranges.last().upperBits));
+        }
+
+        return helper.add("size", size).toString();
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImpl.java
deleted file mode 100644 (file)
index dc0fabf..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.dispatch.Futures;
-import akka.dispatch.Mapper;
-import akka.dispatch.OnComplete;
-import akka.util.Timeout;
-import com.google.common.base.Optional;
-import java.util.Collection;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.ConcurrentHashMap;
-import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
-import org.opendaylight.controller.cluster.datastore.messages.MakeLeaderLocal;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListenerRegistration;
-import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-
-/**
- * Default {@link CDSShardAccess} implementation. Listens on leader location
- * change events and distributes them to registered listeners. Also updates
- * current information about leader location accordingly.
- *
- * <p>
- * Sends {@link MakeLeaderLocal} message to local shards and translates its result
- * on behalf users {@link #makeLeaderLocal()} calls.
- *
- * <p>
- * {@link org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer} that
- * creates instances of this class has to call {@link #close()} once it is no
- * longer valid.
- */
-final class CDSShardAccessImpl implements CDSShardAccess, LeaderLocationListener, AutoCloseable {
-    private static final Logger LOG = LoggerFactory.getLogger(CDSShardAccessImpl.class);
-
-    private final Collection<LeaderLocationListener> listeners = ConcurrentHashMap.newKeySet();
-    private final DOMDataTreeIdentifier prefix;
-    private final ActorUtils actorUtils;
-    private final Timeout makeLeaderLocalTimeout;
-
-    private ActorRef roleChangeListenerActor;
-
-    private volatile LeaderLocation currentLeader = LeaderLocation.UNKNOWN;
-    private volatile boolean closed = false;
-
-    CDSShardAccessImpl(final DOMDataTreeIdentifier prefix, final ActorUtils actorUtils) {
-        this.prefix = requireNonNull(prefix);
-        this.actorUtils = requireNonNull(actorUtils);
-        this.makeLeaderLocalTimeout =
-                new Timeout(actorUtils.getDatastoreContext().getShardLeaderElectionTimeout().duration().$times(2));
-
-        // register RoleChangeListenerActor
-        // TODO Maybe we should do this in async
-        final Optional<ActorRef> localShardReply =
-                actorUtils.findLocalShard(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-        checkState(localShardReply.isPresent(),
-                "Local shard for {} not present. Cannot register RoleChangeListenerActor", prefix);
-        roleChangeListenerActor =
-                actorUtils.getActorSystem().actorOf(RoleChangeListenerActor.props(localShardReply.get(), this));
-    }
-
-    private void checkNotClosed() {
-        checkState(!closed, "CDSDataTreeProducer, that this CDSShardAccess is associated with, is no longer valid");
-    }
-
-    @Override
-    public DOMDataTreeIdentifier getShardIdentifier() {
-        checkNotClosed();
-        return prefix;
-    }
-
-    @Override
-    public LeaderLocation getLeaderLocation() {
-        checkNotClosed();
-        // TODO before getting first notification from roleChangeListenerActor
-        // we will always return UNKNOWN
-        return currentLeader;
-    }
-
-    @Override
-    public CompletionStage<Void> makeLeaderLocal() {
-        // TODO when we have running make leader local operation
-        // we should just return the same completion stage
-        checkNotClosed();
-
-        // TODO can we cache local shard actorRef?
-        final Future<ActorRef> localShardReply =
-                actorUtils.findLocalShardAsync(ClusterUtils.getCleanShardName(prefix.getRootIdentifier()));
-
-        // we have to tell local shard to make leader local
-        final scala.concurrent.Promise<Object> makeLeaderLocalAsk = Futures.promise();
-        localShardReply.onComplete(new OnComplete<ActorRef>() {
-            @Override
-            public void onComplete(final Throwable failure, final ActorRef actorRef) {
-                if (failure instanceof LocalShardNotFoundException) {
-                    LOG.debug("No local shard found for {} - Cannot request leadership transfer to local shard.",
-                            getShardIdentifier(), failure);
-                    makeLeaderLocalAsk.failure(failure);
-                } else if (failure != null) {
-                    // TODO should this be WARN?
-                    LOG.debug("Failed to find local shard for {} - Cannot request leadership transfer to local shard.",
-                            getShardIdentifier(), failure);
-                    makeLeaderLocalAsk.failure(failure);
-                } else {
-                    makeLeaderLocalAsk
-                            .completeWith(actorUtils
-                                    .executeOperationAsync(actorRef, MakeLeaderLocal.INSTANCE, makeLeaderLocalTimeout));
-                }
-            }
-        }, actorUtils.getClientDispatcher());
-
-        // we have to transform make leader local request result
-        Future<Void> makeLeaderLocalFuture = makeLeaderLocalAsk.future()
-                .transform(new Mapper<Object, Void>() {
-                    @Override
-                    public Void apply(final Object parameter) {
-                        return null;
-                    }
-                }, new Mapper<Throwable, Throwable>() {
-                    @Override
-                    public Throwable apply(final Throwable parameter) {
-                        if (parameter instanceof LeadershipTransferFailedException) {
-                            // do nothing with exception and just pass it as it is
-                            return parameter;
-                        }
-                        // wrap exception in LeadershipTransferFailedEx
-                        return new LeadershipTransferFailedException("Leadership transfer failed", parameter);
-                    }
-                }, actorUtils.getClientDispatcher());
-
-        return FutureConverters.toJava(makeLeaderLocalFuture);
-    }
-
-    @Override
-    public <L extends LeaderLocationListener> LeaderLocationListenerRegistration<L>
-            registerLeaderLocationListener(final L listener) {
-        checkNotClosed();
-        requireNonNull(listener);
-        checkArgument(!listeners.contains(listener), "Listener %s is already registered with ShardAccess %s", listener,
-            this);
-
-        LOG.debug("Registering LeaderLocationListener {}", listener);
-
-        listeners.add(listener);
-
-        return new LeaderLocationListenerRegistration<L>() {
-            @Override
-            public L getInstance() {
-                return listener;
-            }
-
-            @Override
-            public void close() {
-                listeners.remove(listener);
-            }
-        };
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void onLeaderLocationChanged(final LeaderLocation location) {
-        if (closed) {
-            // we are closed already. Do not dispatch any new leader location
-            // change events.
-            return;
-        }
-
-        LOG.debug("Received leader location change notification. New leader location: {}", location);
-        currentLeader = location;
-        listeners.forEach(listener -> {
-            try {
-                listener.onLeaderLocationChanged(location);
-            } catch (Exception e) {
-                LOG.warn("Ignoring uncaught exception thrown be LeaderLocationListener {} "
-                        + "during processing leader location change {}", listener, location, e);
-            }
-        });
-    }
-
-    @Override
-    public void close() {
-        // TODO should we also remove all listeners?
-        LOG.debug("Closing {} ShardAccess", prefix);
-        closed = true;
-
-        if (roleChangeListenerActor != null) {
-            // stop RoleChangeListenerActor
-            roleChangeListenerActor.tell(PoisonPill.getInstance(), noSender());
-            roleChangeListenerActor = null;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DOMDataTreeShardCreationFailedException.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DOMDataTreeShardCreationFailedException.java
deleted file mode 100644 (file)
index eda1153..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.Beta;
-import javax.annotation.Nonnull;
-
-/**
- * Exception thrown when there was a at any point during the creation of a shard via {@link DistributedShardFactory}.
- */
-@Beta
-public class DOMDataTreeShardCreationFailedException extends Exception {
-    private static final long serialVersionUID = 1L;
-
-    public DOMDataTreeShardCreationFailedException(@Nonnull final String message) {
-        super(message);
-    }
-
-    public DOMDataTreeShardCreationFailedException(@Nonnull final String message, @Nonnull final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardChangePublisher.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardChangePublisher.java
deleted file mode 100644 (file)
index 5d1e4b5..0000000
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
-import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.shard.ChildShardContext;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardChangePublisher
-        extends AbstractRegistrationTree<AbstractDOMDataTreeChangeListenerRegistration<?>>
-        implements DOMStoreTreeChangePublisher {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedShardChangePublisher.class);
-
-    private final AbstractDataStore distributedDataStore;
-    private final YangInstanceIdentifier shardPath;
-
-    private final Map<DOMDataTreeIdentifier, ChildShardContext> childShards;
-
-    @GuardedBy("this")
-    private final DataTree dataTree;
-
-    public DistributedShardChangePublisher(final DataStoreClient client,
-                                           final AbstractDataStore distributedDataStore,
-                                           final DOMDataTreeIdentifier prefix,
-                                           final Map<DOMDataTreeIdentifier, ChildShardContext> childShards) {
-        this.distributedDataStore = distributedDataStore;
-        // TODO keeping the whole dataTree thats contained in subshards doesn't seem like a good idea
-        // maybe the whole listener logic would be better in the backend shards where we have direct access to the
-        // dataTree and wont have to cache it redundantly.
-
-        final DataTreeConfiguration baseConfig;
-        switch (prefix.getDatastoreType()) {
-            case CONFIGURATION:
-                baseConfig = DataTreeConfiguration.DEFAULT_CONFIGURATION;
-                break;
-            case OPERATIONAL:
-                baseConfig = DataTreeConfiguration.DEFAULT_OPERATIONAL;
-                break;
-            default:
-                throw new UnsupportedOperationException("Unknown prefix type " + prefix.getDatastoreType());
-        }
-
-        this.dataTree = new InMemoryDataTreeFactory().create(new DataTreeConfiguration.Builder(baseConfig.getTreeType())
-                .setMandatoryNodesValidation(baseConfig.isMandatoryNodesValidationEnabled())
-                .setUniqueIndexes(baseConfig.isUniqueIndexEnabled())
-                .setRootPath(prefix.getRootIdentifier())
-                .build());
-
-        // XXX: can we guarantee that the root is present in the schemacontext?
-        this.dataTree.setSchemaContext(distributedDataStore.getActorUtils().getSchemaContext());
-        this.shardPath = prefix.getRootIdentifier();
-        this.childShards = childShards;
-    }
-
-    protected void registrationRemoved(final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
-        LOG.debug("Closing registration {}", registration);
-    }
-
-    @Override
-    public <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
-            registerTreeChangeListener(final YangInstanceIdentifier path, final L listener) {
-        takeLock();
-        try {
-            return setupListenerContext(path, listener);
-        } finally {
-            releaseLock();
-        }
-    }
-
-    private <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
-            setupListenerContext(final YangInstanceIdentifier listenerPath, final L listener) {
-        // we need to register the listener registration path based on the shards root
-        // we have to strip the shard path from the listener path and then register
-        YangInstanceIdentifier strippedIdentifier = listenerPath;
-        if (!shardPath.isEmpty()) {
-            strippedIdentifier = YangInstanceIdentifier.create(stripShardPath(shardPath, listenerPath));
-        }
-
-        final DOMDataTreeListenerWithSubshards subshardListener =
-                new DOMDataTreeListenerWithSubshards(strippedIdentifier, listener);
-        final AbstractDOMDataTreeChangeListenerRegistration<L> reg =
-                setupContextWithoutSubshards(listenerPath, strippedIdentifier, subshardListener);
-
-        for (final ChildShardContext maybeAffected : childShards.values()) {
-            if (listenerPath.contains(maybeAffected.getPrefix().getRootIdentifier())) {
-                // consumer has initialDataChangeEvent subshard somewhere on lower level
-                // register to the notification manager with snapshot and forward child notifications to parent
-                LOG.debug("Adding new subshard{{}} to listener at {}", maybeAffected.getPrefix(), listenerPath);
-                subshardListener.addSubshard(maybeAffected);
-            } else if (maybeAffected.getPrefix().getRootIdentifier().contains(listenerPath)) {
-                // bind path is inside subshard
-                // TODO can this happen? seems like in ShardedDOMDataTree we are
-                // already registering to the lowest shard possible
-                throw new UnsupportedOperationException("Listener should be registered directly "
-                        + "into initialDataChangeEvent subshard");
-            }
-        }
-
-        return reg;
-    }
-
-    private <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
-            setupContextWithoutSubshards(final YangInstanceIdentifier shardLookup,
-                                         final YangInstanceIdentifier listenerPath,
-                                         final DOMDataTreeListenerWithSubshards listener) {
-
-        LOG.debug("Registering root listener full path: {}, path inside shard: {}", shardLookup, listenerPath);
-
-        // register in the shard tree
-        final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> node =
-                findNodeFor(listenerPath.getPathArguments());
-
-        // register listener in CDS
-        ListenerRegistration<DOMDataTreeChangeListener> listenerReg = distributedDataStore
-                .registerProxyListener(shardLookup, listenerPath, listener);
-
-        @SuppressWarnings("unchecked")
-        final AbstractDOMDataTreeChangeListenerRegistration<L> registration =
-            new AbstractDOMDataTreeChangeListenerRegistration<L>((L) listener) {
-                @Override
-                protected void removeRegistration() {
-                    listener.close();
-                    DistributedShardChangePublisher.this.removeRegistration(node, this);
-                    registrationRemoved(this);
-                    listenerReg.close();
-                }
-            };
-        addRegistration(node, registration);
-
-        return registration;
-    }
-
-    private static Iterable<PathArgument> stripShardPath(final YangInstanceIdentifier shardPath,
-                                                         final YangInstanceIdentifier listenerPath) {
-        if (shardPath.isEmpty()) {
-            return listenerPath.getPathArguments();
-        }
-
-        final List<PathArgument> listenerPathArgs = new ArrayList<>(listenerPath.getPathArguments());
-        final Iterator<PathArgument> shardIter = shardPath.getPathArguments().iterator();
-        final Iterator<PathArgument> listenerIter = listenerPathArgs.iterator();
-
-        while (shardIter.hasNext()) {
-            if (shardIter.next().equals(listenerIter.next())) {
-                listenerIter.remove();
-            } else {
-                break;
-            }
-        }
-
-        return listenerPathArgs;
-    }
-
-    synchronized DataTreeCandidate applyChanges(final YangInstanceIdentifier listenerPath,
-            final Collection<DataTreeCandidate> changes) throws DataValidationFailedException {
-        final DataTreeModification modification = dataTree.takeSnapshot().newModification();
-        for (final DataTreeCandidate change : changes) {
-            try {
-                DataTreeCandidates.applyToModification(modification, change);
-            } catch (SchemaValidationFailedException e) {
-                LOG.error("Validation failed {}", e);
-            }
-        }
-
-        modification.ready();
-
-        final DataTreeCandidate candidate;
-
-        dataTree.validate(modification);
-
-        // strip nodes we dont need since this listener doesn't have to be registered at the root of the DataTree
-        candidate = dataTree.prepare(modification);
-        dataTree.commit(candidate);
-
-
-        DataTreeCandidateNode modifiedChild = candidate.getRootNode();
-
-        for (final PathArgument pathArgument : listenerPath.getPathArguments()) {
-            modifiedChild = modifiedChild.getModifiedChild(pathArgument);
-        }
-
-        if (modifiedChild == null) {
-            modifiedChild = new EmptyDataTreeCandidateNode(dataTree.getRootPath().getLastPathArgument());
-        }
-
-        return DataTreeCandidates.newDataTreeCandidate(dataTree.getRootPath(), modifiedChild);
-    }
-
-
-    private final class DOMDataTreeListenerWithSubshards implements DOMDataTreeChangeListener {
-
-        private final YangInstanceIdentifier listenerPath;
-        private final DOMDataTreeChangeListener delegate;
-        private final Map<YangInstanceIdentifier, ListenerRegistration<DOMDataTreeChangeListener>> registrations =
-                new ConcurrentHashMap<>();
-
-        @GuardedBy("this")
-        private final Collection<DataTreeCandidate> stashedDataTreeCandidates = new LinkedList<>();
-
-        DOMDataTreeListenerWithSubshards(final YangInstanceIdentifier listenerPath,
-                                         final DOMDataTreeChangeListener delegate) {
-            this.listenerPath = Preconditions.checkNotNull(listenerPath);
-            this.delegate = Preconditions.checkNotNull(delegate);
-        }
-
-        @Override
-        public synchronized void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
-            LOG.debug("Received data changed {}", changes);
-
-            if (!stashedDataTreeCandidates.isEmpty()) {
-                LOG.debug("Adding stashed subshards' changes {}", stashedDataTreeCandidates);
-                changes.addAll(stashedDataTreeCandidates);
-                stashedDataTreeCandidates.clear();
-            }
-
-            try {
-                applyChanges(listenerPath, changes);
-            } catch (final DataValidationFailedException e) {
-                // TODO should we fail here? What if stashed changes
-                // (changes from subshards) got ahead more than one generation
-                // from current shard. Than we can fail to apply this changes
-                // upon current data tree, but once we get respective changes
-                // from current shard, we can apply also changes from
-                // subshards.
-                //
-                // However, we can loose ability to notice and report some
-                // errors then. For example, we cannot detect potential lost
-                // changes from current shard.
-                LOG.error("Validation failed for modification built from changes {}, current data tree: {}",
-                        changes, dataTree, e);
-                throw new RuntimeException("Notification validation failed", e);
-            }
-
-            delegate.onDataTreeChanged(changes);
-        }
-
-        synchronized void onDataTreeChanged(final YangInstanceIdentifier pathFromRoot,
-                                            final Collection<DataTreeCandidate> changes) {
-            final YangInstanceIdentifier changeId =
-                    YangInstanceIdentifier.create(stripShardPath(dataTree.getRootPath(), pathFromRoot));
-
-            final List<DataTreeCandidate> newCandidates = changes.stream()
-                    .map(candidate -> DataTreeCandidates.newDataTreeCandidate(changeId, candidate.getRootNode()))
-                    .collect(Collectors.toList());
-
-            try {
-                delegate.onDataTreeChanged(Collections.singleton(applyChanges(listenerPath, newCandidates)));
-            } catch (final DataValidationFailedException e) {
-                // We cannot apply changes from subshard to current data tree.
-                // Maybe changes from current shard haven't been applied to
-                // data tree yet. Postpone processing of these changes till we
-                // receive changes from current shard.
-                LOG.debug("Validation for modification built from subshard {} changes {} failed, current data tree {}.",
-                        pathFromRoot, changes, dataTree, e);
-                stashedDataTreeCandidates.addAll(newCandidates);
-            }
-        }
-
-        void addSubshard(final ChildShardContext context) {
-            Preconditions.checkState(context.getShard() instanceof DOMStoreTreeChangePublisher,
-                    "All subshards that are initialDataChangeEvent part of ListenerContext need to be listenable");
-
-            final DOMStoreTreeChangePublisher listenableShard = (DOMStoreTreeChangePublisher) context.getShard();
-            // since this is going into subshard we want to listen for ALL changes in the subshard
-            registrations.put(context.getPrefix().getRootIdentifier(),
-                    listenableShard.registerTreeChangeListener(
-                            context.getPrefix().getRootIdentifier(), changes -> onDataTreeChanged(
-                                    context.getPrefix().getRootIdentifier(), changes)));
-        }
-
-        void close() {
-            for (final ListenerRegistration<DOMDataTreeChangeListener> registration : registrations.values()) {
-                registration.close();
-            }
-            registrations.clear();
-        }
-    }
-
-    private static final class EmptyDataTreeCandidateNode implements DataTreeCandidateNode {
-
-        private final PathArgument identifier;
-
-        EmptyDataTreeCandidateNode(final PathArgument identifier) {
-            this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
-        }
-
-        @Nonnull
-        @Override
-        public PathArgument getIdentifier() {
-            return identifier;
-        }
-
-        @Nonnull
-        @Override
-        public Collection<DataTreeCandidateNode> getChildNodes() {
-            return Collections.emptySet();
-        }
-
-        @Nullable
-        @Override
-        @SuppressWarnings("checkstyle:hiddenField")
-        public DataTreeCandidateNode getModifiedChild(final PathArgument identifier) {
-            return null;
-        }
-
-        @Nonnull
-        @Override
-        public ModificationType getModificationType() {
-            return ModificationType.UNMODIFIED;
-        }
-
-        @Nonnull
-        @Override
-        public Optional<NormalizedNode<?, ?>> getDataAfter() {
-            return Optional.empty();
-        }
-
-        @Nonnull
-        @Override
-        public Optional<NormalizedNode<?, ?>> getDataBefore() {
-            return Optional.empty();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFactory.java
deleted file mode 100644 (file)
index f1cdcd8..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import java.util.concurrent.CompletionStage;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-
-/**
- * A factory that handles addition of new clustered shard's based on a prefix. This factory is a QoL class that handles
- * all the boilerplate that comes with registration of a new clustered shard into the system and creating the backend
- * shard/replicas that come along with it.
- */
-@Beta
-public interface DistributedShardFactory {
-
-    /**
-     * Register a new shard that is rooted at the desired prefix with replicas on the provided members.
-     * Note to register a shard without replicas you still need to provide at least one Member for the shard.
-     *
-     * @param prefix         Shard root
-     * @param replicaMembers Members that this shard is replicated on, has to have at least one Member even if the shard
-     *                       should not be replicated.
-     * @return A future that will be completed with a DistributedShardRegistration once the backend and frontend shards
-     *         are spawned.
-     * @throws DOMDataTreeShardingConflictException If the initial check for a conflict on the local node fails, the
-     *         sharding configuration won't be updated if this exception is thrown.
-     */
-    CompletionStage<DistributedShardRegistration>
-        createDistributedShard(DOMDataTreeIdentifier prefix, Collection<MemberName> replicaMembers)
-            throws DOMDataTreeShardingConflictException;
-
-    /**
-     * Registration of the CDS shard that allows you to remove the shard from the system by closing the registration.
-     * This removal is done asynchronously.
-     */
-    interface DistributedShardRegistration {
-
-        /**
-         *  Removes the shard from the system, this removal is done asynchronously, the future completes once the
-         *  backend shard is no longer present.
-         */
-        CompletionStage<Void> close();
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontend.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontend.java
deleted file mode 100644 (file)
index 31bc8b6..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Nonnull;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.spi.shard.ChildShardContext;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardProducer;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.ReadableWriteableDOMDataTreeShard;
-import org.opendaylight.mdsal.dom.spi.shard.SubshardProducerSpecification;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableDOMDataTreeShard;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Proxy implementation of a shard that creates forwarding producers to the backend shard.
- */
-class DistributedShardFrontend implements ReadableWriteableDOMDataTreeShard {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedShardFrontend.class);
-
-    private final DataStoreClient client;
-    private final DOMDataTreeIdentifier shardRoot;
-    @GuardedBy("this")
-    private final Map<DOMDataTreeIdentifier, ChildShardContext> childShards = new HashMap<>();
-    @GuardedBy("this")
-    private final List<ShardProxyProducer> producers = new ArrayList<>();
-
-    private final DistributedShardChangePublisher publisher;
-
-    DistributedShardFrontend(final AbstractDataStore distributedDataStore,
-                             final DataStoreClient client,
-                             final DOMDataTreeIdentifier shardRoot) {
-        this.client = Preconditions.checkNotNull(client);
-        this.shardRoot = Preconditions.checkNotNull(shardRoot);
-
-        publisher = new DistributedShardChangePublisher(client, Preconditions.checkNotNull(distributedDataStore),
-                shardRoot, childShards);
-    }
-
-    @Override
-    public synchronized DOMDataTreeShardProducer createProducer(final Collection<DOMDataTreeIdentifier> paths) {
-        for (final DOMDataTreeIdentifier prodPrefix : paths) {
-            Preconditions.checkArgument(shardRoot.contains(prodPrefix), "Prefix %s is not contained under shard root",
-                    prodPrefix, paths);
-        }
-
-        final ShardProxyProducer ret =
-                new ShardProxyProducer(shardRoot, paths, client, createModificationFactory(paths));
-        producers.add(ret);
-        return ret;
-    }
-
-    @Override
-    public synchronized void onChildAttached(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
-        LOG.debug("{} : Child shard attached at {}", shardRoot, prefix);
-        Preconditions.checkArgument(child != this, "Attempted to attach child %s onto self", this);
-        addChildShard(prefix, child);
-        updateProducers();
-    }
-
-    @Override
-    public synchronized void onChildDetached(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
-        LOG.debug("{} : Child shard detached at {}", shardRoot, prefix);
-        childShards.remove(prefix);
-        updateProducers();
-        // TODO we should grab the dataTreeSnapshot that's in the shard and apply it to this shard
-    }
-
-    private void addChildShard(final DOMDataTreeIdentifier prefix, final DOMDataTreeShard child) {
-        Preconditions.checkArgument(child instanceof WriteableDOMDataTreeShard);
-        childShards.put(prefix, new ChildShardContext(prefix, (WriteableDOMDataTreeShard) child));
-    }
-
-    DistributedShardModificationFactory createModificationFactory(final Collection<DOMDataTreeIdentifier> prefixes) {
-        // TODO this could be abstract
-        final Map<DOMDataTreeIdentifier, SubshardProducerSpecification> affectedSubshards = new HashMap<>();
-
-        for (final DOMDataTreeIdentifier producerPrefix : prefixes) {
-            for (final ChildShardContext maybeAffected : childShards.values()) {
-                final DOMDataTreeIdentifier bindPath;
-                if (producerPrefix.contains(maybeAffected.getPrefix())) {
-                    bindPath = maybeAffected.getPrefix();
-                } else if (maybeAffected.getPrefix().contains(producerPrefix)) {
-                    // Bound path is inside subshard
-                    bindPath = producerPrefix;
-                } else {
-                    continue;
-                }
-
-                SubshardProducerSpecification spec = affectedSubshards.computeIfAbsent(maybeAffected.getPrefix(),
-                    k -> new SubshardProducerSpecification(maybeAffected));
-                spec.addPrefix(bindPath);
-            }
-        }
-
-        final DistributedShardModificationFactoryBuilder builder =
-                new DistributedShardModificationFactoryBuilder(shardRoot);
-        for (final SubshardProducerSpecification spec : affectedSubshards.values()) {
-            final ForeignShardModificationContext foreignContext =
-                    new ForeignShardModificationContext(spec.getPrefix(), spec.createProducer());
-            builder.addSubshard(foreignContext);
-            builder.addSubshard(spec.getPrefix(), foreignContext);
-        }
-
-        return builder.build();
-    }
-
-    private void updateProducers() {
-        for (final ShardProxyProducer producer : producers) {
-            producer.setModificationFactory(createModificationFactory(producer.getPrefixes()));
-        }
-    }
-
-    @Nonnull
-    @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener) {
-        return publisher.registerTreeChangeListener(treeId, listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModification.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModification.java
deleted file mode 100644 (file)
index 4a36c38..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.base.Preconditions;
-import java.util.Map;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.WritableNodeOperation;
-import org.opendaylight.mdsal.dom.spi.shard.WriteCursorStrategy;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableModificationNode;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableNodeWithSubshard;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-/**
- * Shard modification that consists of the whole shard context, provides cursors which correctly delegate to subshards
- * if any are present.
- */
-public class DistributedShardModification extends WriteableNodeWithSubshard {
-
-    private final DistributedShardModificationContext context;
-    private final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards;
-
-    public DistributedShardModification(final DistributedShardModificationContext context,
-                                        final Map<PathArgument, WriteableModificationNode> subshards,
-                                        final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards) {
-        super(subshards);
-        this.context = Preconditions.checkNotNull(context);
-        this.childShards = Preconditions.checkNotNull(childShards);
-    }
-
-    @Override
-    public PathArgument getIdentifier() {
-        return context.getIdentifier().getRootIdentifier().getLastPathArgument();
-    }
-
-    @Override
-    public WriteCursorStrategy createOperation(final DOMDataTreeWriteCursor parentCursor) {
-        return new WritableNodeOperation(this, context.cursor()) {
-            @Override
-            public void exit() {
-                throw new IllegalStateException("Can not exit data tree root");
-            }
-        };
-    }
-
-    void cursorClosed() {
-        context.closeCursor();
-    }
-
-    DOMStoreThreePhaseCommitCohort seal() {
-        childShards.values().stream().filter(ForeignShardModificationContext::isModified)
-                .forEach(ForeignShardModificationContext::ready);
-
-        return context.ready();
-    }
-
-    DOMDataTreeIdentifier getPrefix() {
-        return context.getIdentifier();
-    }
-
-    Map<DOMDataTreeIdentifier, ForeignShardModificationContext> getChildShards() {
-        return childShards;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationContext.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationContext.java
deleted file mode 100644 (file)
index 9d51684..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-
-/**
- * The context for a single shards modification, keeps a ClientTransaction so it can route requests correctly.
- */
-public class DistributedShardModificationContext {
-
-    private ClientTransaction transaction;
-    private DOMDataTreeIdentifier identifier;
-    private DOMDataTreeWriteCursor cursor;
-
-    public DistributedShardModificationContext(final ClientTransaction transaction,
-                                               final DOMDataTreeIdentifier identifier) {
-        this.transaction = transaction;
-        this.identifier = identifier;
-    }
-
-    public DOMDataTreeIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    DOMDataTreeWriteCursor cursor() {
-        if (cursor == null) {
-            cursor = transaction.openCursor();
-        }
-
-        return cursor;
-    }
-
-    DOMStoreThreePhaseCommitCohort ready() {
-        if (cursor != null) {
-            cursor.close();
-            cursor = null;
-        }
-
-        return transaction.ready();
-    }
-
-    void closeCursor() {
-        if (cursor != null) {
-            cursor.close();
-            cursor = null;
-        }
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationCursor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationCursor.java
deleted file mode 100644 (file)
index 37ccf60..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.mdsal.dom.spi.shard.AbstractDataModificationCursor;
-import org.opendaylight.mdsal.dom.spi.shard.WriteCursorStrategy;
-
-/**
- * Internal cursor implementation consisting of WriteCursorStrategies which forwards writes to foreign modifications
- * if any.
- */
-public class DistributedShardModificationCursor extends AbstractDataModificationCursor<DistributedShardModification> {
-
-    private ShardProxyTransaction parent;
-
-    public DistributedShardModificationCursor(final DistributedShardModification root,
-                                              final ShardProxyTransaction parent) {
-        super(root);
-        this.parent = parent;
-    }
-
-    @Override
-    protected WriteCursorStrategy getRootOperation(final DistributedShardModification root) {
-        return root.createOperation(null);
-    }
-
-    @Override
-    public void close() {
-        parent.cursorClosed();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactory.java
deleted file mode 100644 (file)
index 8fc1f48..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.shard.WriteableModificationNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-/**
- * Factory for {@link DistributedShardModification}.
- */
-public final class DistributedShardModificationFactory {
-    private final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards;
-    private final Map<PathArgument, WriteableModificationNode> children;
-    private final DOMDataTreeIdentifier root;
-
-    DistributedShardModificationFactory(final DOMDataTreeIdentifier root,
-                                        final Map<PathArgument, WriteableModificationNode> children,
-                                        final Map<DOMDataTreeIdentifier, ForeignShardModificationContext> childShards) {
-        this.root = Preconditions.checkNotNull(root);
-        this.children = ImmutableMap.copyOf(children);
-        this.childShards = ImmutableMap.copyOf(childShards);
-    }
-
-    @VisibleForTesting
-    Map<PathArgument, WriteableModificationNode> getChildren() {
-        return children;
-    }
-
-    @VisibleForTesting
-    Map<DOMDataTreeIdentifier, ForeignShardModificationContext> getChildShards() {
-        return childShards;
-    }
-
-    DistributedShardModification createModification(final ClientTransaction transaction) {
-        return new DistributedShardModification(
-                new DistributedShardModificationContext(transaction, root), children, childShards);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactoryBuilder.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardModificationFactoryBuilder.java
deleted file mode 100644 (file)
index 15459ce..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.store.inmemory.ShardDataModificationFactoryBuilder;
-
-/**
- * Builder for {@link DistributedShardModificationFactory}.
- */
-public class DistributedShardModificationFactoryBuilder
-        extends ShardDataModificationFactoryBuilder<DistributedShardModificationFactory> {
-
-
-    public DistributedShardModificationFactoryBuilder(final DOMDataTreeIdentifier root) {
-        super(root);
-    }
-
-    @Override
-    public DistributedShardModificationFactory build() {
-        return new DistributedShardModificationFactory(root, buildChildren(), childShards);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTree.java
deleted file mode 100644 (file)
index df21b90..0000000
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.dispatch.Mapper;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.EnumMap;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import javax.annotation.Nonnull;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.GuardedBy;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.messages.CreateShard;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor.ShardedDataTreeActorCreator;
-import org.opendaylight.controller.cluster.sharding.messages.InitConfigListener;
-import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeServiceExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService;
-import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
-import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTable;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.prefix.shard.configuration.rev170110.PrefixShards;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-import scala.concurrent.Promise;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * A layer on top of DOMDataTreeService that distributes producer/shard registrations to remote nodes via
- * {@link ShardedDataTreeActor}. Also provides QoL method for addition of prefix based clustered shard into the system.
- */
-public class DistributedShardedDOMDataTree implements DOMDataTreeService, DOMDataTreeShardingService,
-        DistributedShardFactory {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTree.class);
-
-    private static final int MAX_ACTOR_CREATION_RETRIES = 100;
-    private static final int ACTOR_RETRY_DELAY = 100;
-    private static final TimeUnit ACTOR_RETRY_TIME_UNIT = TimeUnit.MILLISECONDS;
-    private static final int LOOKUP_TASK_MAX_RETRIES = 100;
-    static final FiniteDuration SHARD_FUTURE_TIMEOUT_DURATION =
-            new FiniteDuration(LOOKUP_TASK_MAX_RETRIES * LOOKUP_TASK_MAX_RETRIES * 3, TimeUnit.SECONDS);
-    static final Timeout SHARD_FUTURE_TIMEOUT = new Timeout(SHARD_FUTURE_TIMEOUT_DURATION);
-
-    static final String ACTOR_ID = "ShardedDOMDataTreeFrontend";
-
-    private final ShardedDOMDataTree shardedDOMDataTree;
-    private final ActorSystem actorSystem;
-    private final AbstractDataStore distributedOperDatastore;
-    private final AbstractDataStore distributedConfigDatastore;
-
-    private final ActorRef shardedDataTreeActor;
-    private final MemberName memberName;
-
-    @GuardedBy("shards")
-    private final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shards =
-            DOMDataTreePrefixTable.create();
-
-    private final EnumMap<LogicalDatastoreType, Entry<DataStoreClient, ActorRef>> configurationShardMap =
-            new EnumMap<>(LogicalDatastoreType.class);
-
-    private final EnumMap<LogicalDatastoreType, PrefixedShardConfigWriter> writerMap =
-            new EnumMap<>(LogicalDatastoreType.class);
-
-    private final PrefixedShardConfigUpdateHandler updateHandler;
-
-    public DistributedShardedDOMDataTree(final ActorSystemProvider actorSystemProvider,
-                                         final AbstractDataStore distributedOperDatastore,
-                                         final AbstractDataStore distributedConfigDatastore) {
-        this.actorSystem = Preconditions.checkNotNull(actorSystemProvider).getActorSystem();
-        this.distributedOperDatastore = Preconditions.checkNotNull(distributedOperDatastore);
-        this.distributedConfigDatastore = Preconditions.checkNotNull(distributedConfigDatastore);
-        shardedDOMDataTree = new ShardedDOMDataTree();
-
-        shardedDataTreeActor = createShardedDataTreeActor(actorSystem,
-                new ShardedDataTreeActorCreator()
-                        .setShardingService(this)
-                        .setActorSystem(actorSystem)
-                        .setClusterWrapper(distributedConfigDatastore.getActorUtils().getClusterWrapper())
-                        .setDistributedConfigDatastore(distributedConfigDatastore)
-                        .setDistributedOperDatastore(distributedOperDatastore)
-                        .setLookupTaskMaxRetries(LOOKUP_TASK_MAX_RETRIES),
-                ACTOR_ID);
-
-        this.memberName = distributedConfigDatastore.getActorUtils().getCurrentMemberName();
-
-        updateHandler = new PrefixedShardConfigUpdateHandler(shardedDataTreeActor,
-                distributedConfigDatastore.getActorUtils().getCurrentMemberName());
-
-        LOG.debug("{} - Starting prefix configuration shards", memberName);
-        createPrefixConfigShard(distributedConfigDatastore);
-        createPrefixConfigShard(distributedOperDatastore);
-    }
-
-    private static void createPrefixConfigShard(final AbstractDataStore dataStore) {
-        Configuration configuration = dataStore.getActorUtils().getConfiguration();
-        Collection<MemberName> memberNames = configuration.getUniqueMemberNamesForAllShards();
-        CreateShard createShardMessage =
-                new CreateShard(new ModuleShardConfiguration(PrefixShards.QNAME.getNamespace(),
-                        "prefix-shard-configuration", ClusterUtils.PREFIX_CONFIG_SHARD_ID, ModuleShardStrategy.NAME,
-                        memberNames),
-                        Shard.builder(), dataStore.getActorUtils().getDatastoreContext());
-
-        dataStore.getActorUtils().getShardManager().tell(createShardMessage, noSender());
-    }
-
-    /**
-     * This will try to initialize prefix configuration shards upon their
-     * successful start. We need to create writers to these shards, so we can
-     * satisfy future {@link #createDistributedShard} and
-     * {@link #resolveShardAdditions} requests and update prefix configuration
-     * shards accordingly.
-     *
-     * <p>
-     * We also need to initialize listeners on these shards, so we can react
-     * on changes made on them by other cluster members or even by ourselves.
-     *
-     * <p>
-     * Finally, we need to be sure that default shards for both operational and
-     * configuration data stores are up and running and we have distributed
-     * shards frontend created for them.
-     *
-     * <p>
-     * This is intended to be invoked by blueprint as initialization method.
-     */
-    public void init() {
-        // create our writers to the configuration
-        try {
-            LOG.debug("{} - starting config shard lookup.", memberName);
-
-            // We have to wait for prefix config shards to be up and running
-            // so we can create datastore clients for them
-            handleConfigShardLookup().get(SHARD_FUTURE_TIMEOUT_DURATION.length(), SHARD_FUTURE_TIMEOUT_DURATION.unit());
-        } catch (InterruptedException | ExecutionException | TimeoutException e) {
-            throw new IllegalStateException("Prefix config shards not found", e);
-        }
-
-        try {
-            LOG.debug("{}: Prefix configuration shards ready - creating clients", memberName);
-            configurationShardMap.put(LogicalDatastoreType.CONFIGURATION,
-                    createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
-                            distributedConfigDatastore.getActorUtils()));
-        } catch (final DOMDataTreeShardCreationFailedException e) {
-            throw new IllegalStateException(
-                    "Unable to create datastoreClient for config DS prefix configuration shard.", e);
-        }
-
-        try {
-            configurationShardMap.put(LogicalDatastoreType.OPERATIONAL,
-                    createDatastoreClient(ClusterUtils.PREFIX_CONFIG_SHARD_ID,
-                            distributedOperDatastore.getActorUtils()));
-
-        } catch (final DOMDataTreeShardCreationFailedException e) {
-            throw new IllegalStateException(
-                        "Unable to create datastoreClient for oper DS prefix configuration shard.", e);
-        }
-
-        writerMap.put(LogicalDatastoreType.CONFIGURATION, new PrefixedShardConfigWriter(
-                configurationShardMap.get(LogicalDatastoreType.CONFIGURATION).getKey()));
-
-        writerMap.put(LogicalDatastoreType.OPERATIONAL, new PrefixedShardConfigWriter(
-                configurationShardMap.get(LogicalDatastoreType.OPERATIONAL).getKey()));
-
-        updateHandler.initListener(distributedConfigDatastore, LogicalDatastoreType.CONFIGURATION);
-        updateHandler.initListener(distributedOperDatastore, LogicalDatastoreType.OPERATIONAL);
-
-        distributedConfigDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
-        distributedOperDatastore.getActorUtils().getShardManager().tell(InitConfigListener.INSTANCE, noSender());
-
-
-        //create shard registration for DEFAULT_SHARD
-        initDefaultShard(LogicalDatastoreType.CONFIGURATION);
-        initDefaultShard(LogicalDatastoreType.OPERATIONAL);
-    }
-
-    private ListenableFuture<List<Void>> handleConfigShardLookup() {
-
-        final ListenableFuture<Void> configFuture = lookupConfigShard(LogicalDatastoreType.CONFIGURATION);
-        final ListenableFuture<Void> operFuture = lookupConfigShard(LogicalDatastoreType.OPERATIONAL);
-
-        return Futures.allAsList(configFuture, operFuture);
-    }
-
-    private ListenableFuture<Void> lookupConfigShard(final LogicalDatastoreType type) {
-        final SettableFuture<Void> future = SettableFuture.create();
-
-        final Future<Object> ask =
-                Patterns.ask(shardedDataTreeActor, new StartConfigShardLookup(type), SHARD_FUTURE_TIMEOUT);
-
-        ask.onComplete(new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable throwable, final Object result) {
-                if (throwable != null) {
-                    future.setException(throwable);
-                } else {
-                    future.set(null);
-                }
-            }
-        }, actorSystem.dispatcher());
-
-        return future;
-    }
-
-    @Nonnull
-    @Override
-    public <T extends DOMDataTreeListener> ListenerRegistration<T> registerListener(
-            final T listener, final Collection<DOMDataTreeIdentifier> subtrees,
-            final boolean allowRxMerges, final Collection<DOMDataTreeProducer> producers)
-            throws DOMDataTreeLoopException {
-        return shardedDOMDataTree.registerListener(listener, subtrees, allowRxMerges, producers);
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMDataTreeServiceExtension> getExtensions() {
-        return ImmutableClassToInstanceMap.of();
-    }
-
-    @Nonnull
-    @Override
-    public DOMDataTreeProducer createProducer(@Nonnull final Collection<DOMDataTreeIdentifier> subtrees) {
-        LOG.debug("{} - Creating producer for {}", memberName, subtrees);
-        final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(subtrees);
-
-        final Object response = distributedConfigDatastore.getActorUtils()
-                .executeOperation(shardedDataTreeActor, new ProducerCreated(subtrees));
-        if (response == null) {
-            LOG.debug("{} - Received success from remote nodes, creating producer:{}", memberName, subtrees);
-            return new ProxyProducer(producer, subtrees, shardedDataTreeActor,
-                    distributedConfigDatastore.getActorUtils(), shards);
-        }
-
-        closeProducer(producer);
-
-        if (response instanceof Throwable) {
-            Throwables.throwIfUnchecked((Throwable) response);
-            throw new RuntimeException((Throwable) response);
-        }
-        throw new RuntimeException("Unexpected response to create producer received." + response);
-    }
-
-    @Override
-    public CompletionStage<DistributedShardRegistration> createDistributedShard(
-            final DOMDataTreeIdentifier prefix, final Collection<MemberName> replicaMembers)
-            throws DOMDataTreeShardingConflictException {
-
-        synchronized (shards) {
-            final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
-                    shards.lookup(prefix);
-            if (lookup != null && lookup.getValue().getPrefix().equals(prefix)) {
-                throw new DOMDataTreeShardingConflictException(
-                        "Prefix " + prefix + " is already occupied by another shard.");
-            }
-        }
-
-        final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType());
-
-        final ListenableFuture<Void> writeFuture =
-                writer.writeConfig(prefix.getRootIdentifier(), replicaMembers);
-
-        final Promise<DistributedShardRegistration> shardRegistrationPromise = akka.dispatch.Futures.promise();
-        Futures.addCallback(writeFuture, new FutureCallback<Void>() {
-            @Override
-            public void onSuccess(@Nullable final Void result) {
-
-                final Future<Object> ask =
-                        Patterns.ask(shardedDataTreeActor, new LookupPrefixShard(prefix), SHARD_FUTURE_TIMEOUT);
-
-                shardRegistrationPromise.completeWith(ask.transform(
-                        new Mapper<Object, DistributedShardRegistration>() {
-                            @Override
-                            public DistributedShardRegistration apply(final Object parameter) {
-                                return new DistributedShardRegistrationImpl(
-                                        prefix, shardedDataTreeActor, DistributedShardedDOMDataTree.this);
-                            }
-                        },
-                        new Mapper<Throwable, Throwable>() {
-                            @Override
-                            public Throwable apply(final Throwable throwable) {
-                                return new DOMDataTreeShardCreationFailedException(
-                                        "Unable to create a cds shard.", throwable);
-                            }
-                        }, actorSystem.dispatcher()));
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                shardRegistrationPromise.failure(
-                        new DOMDataTreeShardCreationFailedException("Unable to create a cds shard.", throwable));
-            }
-        }, MoreExecutors.directExecutor());
-
-        return FutureConverters.toJava(shardRegistrationPromise.future());
-    }
-
-    void resolveShardAdditions(final Set<DOMDataTreeIdentifier> additions) {
-        LOG.debug("{}: Resolving additions : {}", memberName, additions);
-        // we need to register the shards from top to bottom, so we need to atleast make sure the ordering reflects that
-        additions
-            .stream()
-            .sorted(Comparator.comparingInt(o -> o.getRootIdentifier().getPathArguments().size()))
-            .forEachOrdered(this::createShardFrontend);
-    }
-
-    void resolveShardRemovals(final Set<DOMDataTreeIdentifier> removals) {
-        LOG.debug("{}: Resolving removals : {}", memberName, removals);
-
-        // do we need to go from bottom to top?
-        removals.forEach(this::despawnShardFrontend);
-    }
-
-    private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
-        LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
-        final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
-        final AbstractDataStore distributedDataStore =
-                prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION)
-                        ? distributedConfigDatastore : distributedOperDatastore;
-
-        try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
-            final Entry<DataStoreClient, ActorRef> entry =
-                    createDatastoreClient(shardName, distributedDataStore.getActorUtils());
-
-            final DistributedShardFrontend shard =
-                    new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
-
-            final DOMDataTreeShardRegistration<DOMDataTreeShard> reg =
-                    shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
-
-            synchronized (shards) {
-                shards.store(prefix, reg);
-            }
-
-        } catch (final DOMDataTreeShardingConflictException e) {
-            LOG.error("{}: Prefix {} is already occupied by another shard",
-                    distributedConfigDatastore.getActorUtils().getClusterWrapper().getCurrentMemberName(), prefix, e);
-        } catch (DOMDataTreeProducerException e) {
-            LOG.error("Unable to close producer", e);
-        } catch (DOMDataTreeShardCreationFailedException e) {
-            LOG.error("Unable to create datastore client for shard {}", prefix, e);
-        }
-    }
-
-    private void despawnShardFrontend(final DOMDataTreeIdentifier prefix) {
-        LOG.debug("{}: Removing CDS shard for prefix: {}", memberName, prefix);
-        final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup;
-        synchronized (shards) {
-            lookup = shards.lookup(prefix);
-        }
-
-        if (lookup == null || !lookup.getValue().getPrefix().equals(prefix)) {
-            LOG.debug("{}: Received despawn for non-existing CDS shard frontend, prefix: {}, ignoring..",
-                    memberName, prefix);
-            return;
-        }
-
-        lookup.getValue().close();
-        // need to remove from our local table thats used for tracking
-        synchronized (shards) {
-            shards.remove(prefix);
-        }
-
-        final PrefixedShardConfigWriter writer = writerMap.get(prefix.getDatastoreType());
-        final ListenableFuture<Void> future = writer.removeConfig(prefix.getRootIdentifier());
-
-        Futures.addCallback(future, new FutureCallback<Void>() {
-            @Override
-            public void onSuccess(@Nullable final Void result) {
-                LOG.debug("{} - Succesfuly removed shard for {}", memberName, prefix);
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                LOG.error("Removal of shard {} from configuration failed.", prefix, throwable);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookupShardFrontend(
-            final DOMDataTreeIdentifier prefix) {
-        synchronized (shards) {
-            return shards.lookup(prefix);
-        }
-    }
-
-    DOMDataTreeProducer localCreateProducer(final Collection<DOMDataTreeIdentifier> prefix) {
-        return shardedDOMDataTree.createProducer(prefix);
-    }
-
-    @Nonnull
-    @Override
-    public <T extends DOMDataTreeShard> ListenerRegistration<T> registerDataTreeShard(
-            @Nonnull final DOMDataTreeIdentifier prefix,
-            @Nonnull final T shard,
-            @Nonnull final DOMDataTreeProducer producer)
-            throws DOMDataTreeShardingConflictException {
-
-        LOG.debug("Registering shard[{}] at prefix: {}", shard, prefix);
-
-        if (producer instanceof ProxyProducer) {
-            return shardedDOMDataTree.registerDataTreeShard(prefix, shard, ((ProxyProducer) producer).delegate());
-        }
-
-        return shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private Entry<DataStoreClient, ActorRef> createDatastoreClient(final String shardName, final ActorUtils actorUtils)
-            throws DOMDataTreeShardCreationFailedException {
-
-        LOG.debug("{}: Creating distributed datastore client for shard {}", memberName, shardName);
-        final Props distributedDataStoreClientProps =
-                SimpleDataStoreClientActor.props(memberName, "Shard-" + shardName, actorUtils, shardName);
-
-        final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
-        try {
-            return new SimpleEntry<>(SimpleDataStoreClientActor
-                    .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS), clientActor);
-        } catch (final Exception e) {
-            LOG.error("{}: Failed to get actor for {}", distributedDataStoreClientProps, memberName, e);
-            clientActor.tell(PoisonPill.getInstance(), noSender());
-            throw new DOMDataTreeShardCreationFailedException(
-                    "Unable to create datastore client for shard{" + shardName + "}", e);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void initDefaultShard(final LogicalDatastoreType logicalDatastoreType) {
-
-        final PrefixedShardConfigWriter writer = writerMap.get(logicalDatastoreType);
-
-        if (writer.checkDefaultIsPresent()) {
-            LOG.debug("{}: Default shard for {} is already present in the config. Possibly saved in snapshot.",
-                    memberName, logicalDatastoreType);
-        } else {
-            try {
-                // Currently the default shard configuration is present in the out-of-box modules.conf and is
-                // expected to be present. So look up the local default shard here and create the frontend.
-
-                // TODO we don't have to do it for config and operational default shard separately. Just one of them
-                // should be enough
-                final ActorUtils actorUtils = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION
-                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
-                final Optional<ActorRef> defaultLocalShardOptional =
-                        actorUtils.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
-
-                if (defaultLocalShardOptional.isPresent()) {
-                    LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName,
-                            logicalDatastoreType);
-                    createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY));
-                }
-
-                // The local shard isn't present - we assume that means the local member isn't in the replica list
-                // and will be dynamically created later via an explicit add-shard-replica request. This is the
-                // bootstrapping mechanism to add a new node into an existing cluster. The following code to create
-                // the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
-                // the default shard is a module-based shard by default, it makes sense to always treat it as such,
-                // ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
-//                final Collection<MemberName> names = distributedConfigDatastore.getActorUtils().getConfiguration()
-//                        .getUniqueMemberNamesForAllShards();
-//                Await.result(FutureConverters.toScala(createDistributedShard(
-//                        new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
-//                        SHARD_FUTURE_TIMEOUT_DURATION);
-//            } catch (DOMDataTreeShardingConflictException e) {
-//                LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
-//                        memberName, logicalDatastoreType);
-            } catch (Exception e) {
-                LOG.error("{}: Default shard initialization for {} failed", memberName, logicalDatastoreType, e);
-                throw new RuntimeException(e);
-            }
-        }
-    }
-
-    private static void closeProducer(final DOMDataTreeProducer producer) {
-        try {
-            producer.close();
-        } catch (final DOMDataTreeProducerException e) {
-            LOG.error("Unable to close producer", e);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private static ActorRef createShardedDataTreeActor(final ActorSystem actorSystem,
-                                                       final ShardedDataTreeActorCreator creator,
-                                                       final String shardDataTreeActorId) {
-        Exception lastException = null;
-
-        for (int i = 0; i < MAX_ACTOR_CREATION_RETRIES; i++) {
-            try {
-                return actorSystem.actorOf(creator.props(), shardDataTreeActorId);
-            } catch (final Exception e) {
-                lastException = e;
-                Uninterruptibles.sleepUninterruptibly(ACTOR_RETRY_DELAY, ACTOR_RETRY_TIME_UNIT);
-                LOG.debug("Could not create actor {} because of {} -"
-                                + " waiting for sometime before retrying (retry count = {})",
-                        shardDataTreeActorId, e.getMessage(), i);
-            }
-        }
-
-        throw new IllegalStateException("Failed to create actor for ShardedDOMDataTree", lastException);
-    }
-
-    private class DistributedShardRegistrationImpl implements DistributedShardRegistration {
-
-        private final DOMDataTreeIdentifier prefix;
-        private final ActorRef shardedDataTreeActor;
-        private final DistributedShardedDOMDataTree distributedShardedDOMDataTree;
-
-        DistributedShardRegistrationImpl(final DOMDataTreeIdentifier prefix,
-                                         final ActorRef shardedDataTreeActor,
-                                         final DistributedShardedDOMDataTree distributedShardedDOMDataTree) {
-            this.prefix = prefix;
-            this.shardedDataTreeActor = shardedDataTreeActor;
-            this.distributedShardedDOMDataTree = distributedShardedDOMDataTree;
-        }
-
-        @Override
-        public CompletionStage<Void> close() {
-            // first despawn on the local node
-            distributedShardedDOMDataTree.despawnShardFrontend(prefix);
-            // update the config so the remote nodes are updated
-            final Future<Object> ask =
-                    Patterns.ask(shardedDataTreeActor, new PrefixShardRemovalLookup(prefix), SHARD_FUTURE_TIMEOUT);
-
-            final Future<Void> closeFuture = ask.transform(
-                    new Mapper<Object, Void>() {
-                        @Override
-                        public Void apply(final Object parameter) {
-                            return null;
-                        }
-                    },
-                    new Mapper<Throwable, Throwable>() {
-                        @Override
-                        public Throwable apply(final Throwable throwable) {
-                            return throwable;
-                        }
-                    }, actorSystem.dispatcher());
-
-            return FutureConverters.toJava(closeFuture);
-        }
-    }
-
-    // TODO what about producers created by this producer?
-    // They should also be CDSProducers
-    private static final class ProxyProducer extends ForwardingObject implements CDSDataTreeProducer {
-
-        private final DOMDataTreeProducer delegate;
-        private final Collection<DOMDataTreeIdentifier> subtrees;
-        private final ActorRef shardDataTreeActor;
-        private final ActorUtils actorUtils;
-        @GuardedBy("shardAccessMap")
-        private final Map<DOMDataTreeIdentifier, CDSShardAccessImpl> shardAccessMap = new HashMap<>();
-
-        // We don't have to guard access to shardTable in ProxyProducer.
-        // ShardTable's entries relevant to this ProxyProducer shouldn't
-        // change during producer's lifetime.
-        private final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shardTable;
-
-        ProxyProducer(final DOMDataTreeProducer delegate,
-                      final Collection<DOMDataTreeIdentifier> subtrees,
-                      final ActorRef shardDataTreeActor,
-                      final ActorUtils actorUtils,
-                      final DOMDataTreePrefixTable<DOMDataTreeShardRegistration<DOMDataTreeShard>> shardLayout) {
-            this.delegate = requireNonNull(delegate);
-            this.subtrees = requireNonNull(subtrees);
-            this.shardDataTreeActor = requireNonNull(shardDataTreeActor);
-            this.actorUtils = requireNonNull(actorUtils);
-            this.shardTable = requireNonNull(shardLayout);
-        }
-
-        @Nonnull
-        @Override
-        public DOMDataTreeCursorAwareTransaction createTransaction(final boolean isolated) {
-            return delegate.createTransaction(isolated);
-        }
-
-        @Nonnull
-        @Override
-        @SuppressWarnings("checkstyle:hiddenField")
-        public DOMDataTreeProducer createProducer(@Nonnull final Collection<DOMDataTreeIdentifier> subtrees) {
-            // TODO we probably don't need to distribute this on the remote nodes since once we have this producer
-            // open we surely have the rights to all the subtrees.
-            return delegate.createProducer(subtrees);
-        }
-
-        @Override
-        @SuppressWarnings("checkstyle:IllegalCatch")
-        public void close() throws DOMDataTreeProducerException {
-            delegate.close();
-
-            synchronized (shardAccessMap) {
-                shardAccessMap.values().forEach(CDSShardAccessImpl::close);
-            }
-
-            final Object o = actorUtils.executeOperation(shardDataTreeActor, new ProducerRemoved(subtrees));
-            if (o instanceof DOMDataTreeProducerException) {
-                throw (DOMDataTreeProducerException) o;
-            } else if (o instanceof Throwable) {
-                throw new DOMDataTreeProducerException("Unable to close producer", (Throwable) o);
-            }
-        }
-
-        @Override
-        protected DOMDataTreeProducer delegate() {
-            return delegate;
-        }
-
-        @Nonnull
-        @Override
-        public CDSShardAccess getShardAccess(@Nonnull final DOMDataTreeIdentifier subtree) {
-            checkArgument(subtrees.stream().anyMatch(dataTreeIdentifier -> dataTreeIdentifier.contains(subtree)),
-                "Subtree %s is not controlled by this producer %s", subtree, this);
-
-            final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> lookup =
-                    shardTable.lookup(subtree);
-            checkState(lookup != null, "Subtree %s is not contained in any registered shard.", subtree);
-
-            final DOMDataTreeIdentifier lookupId = lookup.getValue().getPrefix();
-
-            synchronized (shardAccessMap) {
-                if (shardAccessMap.get(lookupId) != null) {
-                    return shardAccessMap.get(lookupId);
-                }
-
-                // TODO Maybe we can have static factory method and return the same instance
-                // for same subtrees. But maybe it is not needed since there can be only one
-                // producer attached to some subtree at a time. And also how we can close ShardAccess
-                // then
-                final CDSShardAccessImpl shardAccess = new CDSShardAccessImpl(lookupId, actorUtils);
-                shardAccessMap.put(lookupId, shardAccess);
-                return shardAccess;
-            }
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/LookupTask.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/LookupTask.java
deleted file mode 100644 (file)
index 232983e..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-
-import akka.actor.ActorRef;
-import akka.actor.Status;
-import javax.annotation.Nullable;
-import javax.annotation.concurrent.NotThreadSafe;
-
-/**
- * Base class for lookup tasks. Lookup tasks are supposed to run repeatedly
- * until successful lookup or maximum retries are hit.
- */
-@NotThreadSafe
-abstract class LookupTask implements Runnable {
-    private final int maxRetries;
-    private final ActorRef replyTo;
-    private int retried = 0;
-
-    LookupTask(final ActorRef replyTo, final int maxRetries) {
-        this.replyTo = replyTo;
-        this.maxRetries = maxRetries;
-    }
-
-    abstract void reschedule(int retries);
-
-    void tryReschedule(@Nullable final Throwable throwable) {
-        if (retried <= maxRetries) {
-            retried++;
-            reschedule(retried);
-        } else {
-            fail(throwable);
-        }
-    }
-
-    void fail(@Nullable final Throwable throwable) {
-        if (throwable == null) {
-            replyTo.tell(new Status.Failure(
-                    new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
-                            + "Failing..")), noSender());
-        } else {
-            replyTo.tell(new Status.Failure(
-                    new DOMDataTreeShardCreationFailedException("Unable to find the backend shard."
-                            + "Failing..", throwable)), noSender());
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigUpdateHandler.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigUpdateHandler.java
deleted file mode 100644 (file)
index 1e66728..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_PREFIX_QNAME;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_REPLICAS_QNAME;
-import static org.opendaylight.controller.cluster.datastore.utils.ClusterUtils.SHARD_REPLICA_QNAME;
-
-import akka.actor.ActorRef;
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.List;
-import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.PrefixShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Listens on changes on prefix-shard-configuration. Resolves the changes and
- * notifies handling actor with {@link PrefixShardCreated} and
- * {@link PrefixShardRemoved} messages.
- */
-public class PrefixedShardConfigUpdateHandler {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PrefixedShardConfigUpdateHandler.class);
-    private final ActorRef handlingActor;
-    private final MemberName memberName;
-
-    private final EnumMap<LogicalDatastoreType,ListenerRegistration<DOMDataTreeChangeListener>> registrations =
-            new EnumMap<>(LogicalDatastoreType.class);
-
-    public PrefixedShardConfigUpdateHandler(final ActorRef handlingActor, final MemberName memberName) {
-        this.handlingActor = Preconditions.checkNotNull(handlingActor);
-        this.memberName = Preconditions.checkNotNull(memberName);
-    }
-
-    public void initListener(final AbstractDataStore dataStore, final LogicalDatastoreType type) {
-        registrations.put(type, dataStore.registerShardConfigListener(
-                ClusterUtils.SHARD_LIST_PATH, new ShardConfigHandler(memberName, type, handlingActor)));
-    }
-
-    public void close() {
-        registrations.values().forEach(ListenerRegistration::close);
-        registrations.clear();
-    }
-
-    public static final class ShardConfigHandler implements ClusteredDOMDataTreeChangeListener {
-
-        private final MemberName memberName;
-        private final LogicalDatastoreType type;
-        private final ActorRef handlingActor;
-        private final String logName;
-
-        public ShardConfigHandler(final MemberName memberName,
-                           final LogicalDatastoreType type,
-                           final ActorRef handlingActor) {
-            this.memberName = memberName;
-            this.type = type;
-            this.handlingActor = handlingActor;
-            logName = memberName.getName() + "-" + type;
-        }
-
-        @Override
-        public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
-            changes.forEach(this::resolveChange);
-        }
-
-        private void resolveChange(final DataTreeCandidate candidate) {
-            switch (candidate.getRootNode().getModificationType()) {
-                case UNMODIFIED:
-                    break;
-                case APPEARED:
-                case DELETE:
-                case DISAPPEARED:
-                case SUBTREE_MODIFIED:
-                case WRITE:
-                    resolveModifiedRoot(candidate.getRootNode());
-                    break;
-                default:
-                    break;
-            }
-        }
-
-        private void resolveModifiedRoot(final DataTreeCandidateNode rootNode) {
-
-            LOG.debug("{}: New config received {}", logName, rootNode);
-            LOG.debug("{}: Data after: {}", logName, rootNode.getDataAfter());
-
-            // were in the shards list, iter children and resolve
-            for (final DataTreeCandidateNode childNode : rootNode.getChildNodes()) {
-                switch (childNode.getModificationType()) {
-                    case UNMODIFIED:
-                        break;
-                    case SUBTREE_MODIFIED:
-                    case APPEARED:
-                    case WRITE:
-                        resolveWrittenShard(childNode);
-                        break;
-                    case DELETE:
-                    case DISAPPEARED:
-                        resolveDeletedShard(childNode);
-                        break;
-                    default:
-                        break;
-                }
-            }
-        }
-
-        @SuppressWarnings("unchecked")
-        private void resolveWrittenShard(final DataTreeCandidateNode childNode) {
-            final MapEntryNode entryNode = (MapEntryNode) childNode.getDataAfter().get();
-            final LeafNode<YangInstanceIdentifier> prefix =
-                    (LeafNode<YangInstanceIdentifier>) entryNode.getChild(new NodeIdentifier(SHARD_PREFIX_QNAME)).get();
-
-            final YangInstanceIdentifier identifier = prefix.getValue();
-
-            LOG.debug("{}: Deserialized {} from datastore", logName, identifier);
-
-            final ContainerNode replicas =
-                    (ContainerNode) entryNode.getChild(new NodeIdentifier(SHARD_REPLICAS_QNAME)).get();
-
-            final LeafSetNode<String> replicaList =
-                    (LeafSetNode<String>) replicas.getChild(new NodeIdentifier(SHARD_REPLICA_QNAME)).get();
-
-            final List<MemberName> retReplicas = replicaList.getValue().stream()
-                    .map(child -> MemberName.forName(child.getValue()))
-                    .collect(Collectors.toList());
-
-            LOG.debug("{}: Replicas read from ds {}", logName, retReplicas.toString());
-
-            final PrefixShardConfiguration newConfig =
-                    new PrefixShardConfiguration(new DOMDataTreeIdentifier(type, identifier),
-                            PrefixShardStrategy.NAME, retReplicas);
-
-            LOG.debug("{}: Resulting config {} - sending PrefixShardCreated to {}", logName, newConfig, handlingActor);
-
-            handlingActor.tell(new PrefixShardCreated(newConfig), noSender());
-        }
-
-        private void resolveDeletedShard(final DataTreeCandidateNode childNode) {
-
-            final MapEntryNode entryNode = (MapEntryNode) childNode.getDataBefore().get();
-
-            final LeafNode<YangInstanceIdentifier> prefix =
-                    (LeafNode<YangInstanceIdentifier>) entryNode.getChild(new NodeIdentifier(SHARD_PREFIX_QNAME)).get();
-
-            final YangInstanceIdentifier deleted = prefix.getValue();
-            LOG.debug("{}: Removing shard at {}.", memberName, deleted);
-
-            final DOMDataTreeIdentifier domDataTreeIdentifier = new DOMDataTreeIdentifier(type, deleted);
-            final PrefixShardRemoved message = new PrefixShardRemoved(domDataTreeIdentifier);
-
-            handlingActor.tell(message, noSender());
-        }
-
-        @Override
-        public String toString() {
-            return "ShardConfigHandler [logName=" + logName + ", handlingActor=" + handlingActor + "]";
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigWriter.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/PrefixedShardConfigWriter.java
deleted file mode 100644 (file)
index 5e7f83e..0000000
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.ListNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Writes and removes prefix-based shards' configuration
- * to prefix-shard-configuration. This classed is meant to be utilized
- * by {@link DistributedShardedDOMDataTree} for updating
- * prefix-shard-configuration upon creating and de-spawning prefix-based shards.
- */
-class PrefixedShardConfigWriter {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PrefixedShardConfigWriter.class);
-
-    private final ClientLocalHistory history;
-
-    PrefixedShardConfigWriter(final DataStoreClient client) {
-        history = client.createLocalHistory();
-        writeInitialParent();
-    }
-
-    ListenableFuture<Void> writeConfig(final YangInstanceIdentifier path, final Collection<MemberName> replicas) {
-        LOG.debug("Writing config for {}, replicas {}", path, replicas);
-
-        return doSubmit(doWrite(path, replicas));
-    }
-
-    ListenableFuture<Void> removeConfig(final YangInstanceIdentifier path) {
-        LOG.debug("Removing config for {}.", path);
-
-        return doSubmit(doDelete(path));
-    }
-
-    private void writeInitialParent() {
-        final ClientTransaction tx = history.createTransaction();
-
-        final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
-        final ContainerNode root = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(ClusterUtils.PREFIX_SHARDS_QNAME))
-                .withChild(ImmutableMapNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_LIST_QNAME))
-                        .build())
-                .build();
-
-        cursor.merge(ClusterUtils.PREFIX_SHARDS_PATH.getLastPathArgument(), root);
-        cursor.close();
-
-        final DOMStoreThreePhaseCommitCohort cohort = tx.ready();
-
-        submitBlocking(cohort);
-    }
-
-    private static void submitBlocking(final DOMStoreThreePhaseCommitCohort cohort) {
-        try {
-            doSubmit(cohort).get();
-        } catch (final InterruptedException | ExecutionException e) {
-            LOG.error("Unable to write initial shard config parent.", e);
-        }
-    }
-
-    private static ListenableFuture<Void> doSubmit(final DOMStoreThreePhaseCommitCohort cohort) {
-        final AsyncFunction<Boolean, Void> validateFunction = input -> cohort.preCommit();
-        final AsyncFunction<Void, Void> prepareFunction = input -> cohort.commit();
-
-        final ListenableFuture<Void> prepareFuture = Futures.transformAsync(cohort.canCommit(), validateFunction,
-            MoreExecutors.directExecutor());
-        return Futures.transformAsync(prepareFuture, prepareFunction, MoreExecutors.directExecutor());
-    }
-
-    boolean checkDefaultIsPresent() {
-        final NodeIdentifierWithPredicates pag =
-                new NodeIdentifierWithPredicates(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME,
-                YangInstanceIdentifier.EMPTY);
-
-        final YangInstanceIdentifier defaultId = ClusterUtils.SHARD_LIST_PATH.node(pag);
-
-        final ClientSnapshot snapshot = history.takeSnapshot();
-        try {
-            return snapshot.exists(defaultId).get();
-        } catch (InterruptedException | ExecutionException e) {
-            LOG.error("Presence check of default shard in configuration failed.", e);
-            return false;
-        } finally {
-            snapshot.abort();
-        }
-    }
-
-    private DOMStoreThreePhaseCommitCohort doWrite(final YangInstanceIdentifier path,
-                                                   final Collection<MemberName> replicas) {
-
-        final ListNodeBuilder<Object, LeafSetEntryNode<Object>> replicaListBuilder =
-                ImmutableLeafSetNodeBuilder.create().withNodeIdentifier(
-                        new NodeIdentifier(ClusterUtils.SHARD_REPLICA_QNAME));
-
-        replicas.forEach(name -> replicaListBuilder.withChild(
-                ImmutableLeafSetEntryNodeBuilder.create()
-                        .withNodeIdentifier(new NodeWithValue<>(ClusterUtils.SHARD_REPLICA_QNAME, name.getName()))
-                        .withValue(name.getName())
-                        .build()));
-
-        final MapEntryNode newEntry = ImmutableMapEntryNodeBuilder.create()
-                .withNodeIdentifier(
-                        new NodeIdentifierWithPredicates(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME,
-                                path))
-                .withChild(ImmutableLeafNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_PREFIX_QNAME))
-                        .withValue(path)
-                        .build())
-                .withChild(ImmutableContainerNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(ClusterUtils.SHARD_REPLICAS_QNAME))
-                        .withChild(replicaListBuilder.build())
-                        .build())
-                .build();
-
-        final ClientTransaction tx = history.createTransaction();
-        final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
-        ClusterUtils.SHARD_LIST_PATH.getPathArguments().forEach(cursor::enter);
-
-        cursor.write(newEntry.getIdentifier(), newEntry);
-        cursor.close();
-
-        return tx.ready();
-    }
-
-    private DOMStoreThreePhaseCommitCohort doDelete(final YangInstanceIdentifier path) {
-
-        final ClientTransaction tx = history.createTransaction();
-        final DOMDataTreeWriteCursor cursor = tx.openCursor();
-
-        ClusterUtils.SHARD_LIST_PATH.getPathArguments().forEach(cursor::enter);
-
-        cursor.delete(
-                new NodeIdentifierWithPredicates(ClusterUtils.SHARD_LIST_QNAME, ClusterUtils.SHARD_PREFIX_QNAME, path));
-        cursor.close();
-
-        return tx.ready();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActor.java
deleted file mode 100644 (file)
index d33ad50..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
-import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
-
-/**
- * Proxy actor which acts as a facade for user-provided
- * {@link LeaderLocationListener}. It subscribes for {@link LeaderStateChanged}
- * notifications in its pre start hook and translates them to
- * {@link LeaderLocationListener#onLeaderLocationChanged(LeaderLocation)}
- * events.
- */
-public final class RoleChangeListenerActor extends AbstractUntypedActor {
-    private final LeaderLocationListener leaderLocationListener;
-    private final ActorRef roleChangeNotifier;
-
-    private RoleChangeListenerActor(final ActorRef roleChangeNotifier, final LeaderLocationListener listener) {
-        this.roleChangeNotifier = Preconditions.checkNotNull(roleChangeNotifier);
-        this.leaderLocationListener = Preconditions.checkNotNull(listener);
-    }
-
-    @Override
-    public void preStart() throws Exception {
-        super.preStart();
-        roleChangeNotifier.tell(new RegisterRoleChangeListener(), getSelf());
-    }
-
-    @Override
-    protected void handleReceive(final Object message) {
-        if (message instanceof RoleChangeNotification) {
-            ignoreMessage(message);
-        } else if (message instanceof LeaderStateChanged) {
-            onLeaderStateChanged((LeaderStateChanged) message);
-        } else {
-            unknownMessage(message);
-        }
-    }
-
-    private void onLeaderStateChanged(final LeaderStateChanged message) {
-        final LeaderLocation newLocation;
-        if (message.getLeaderId() == null) {
-            newLocation = LeaderLocation.UNKNOWN;
-        } else if (message.getMemberId().equals(message.getLeaderId())) {
-            newLocation = LeaderLocation.LOCAL;
-        } else {
-            newLocation = LeaderLocation.REMOTE;
-        }
-
-        // TODO should we wrap this in try catch block?
-        leaderLocationListener.onLeaderLocationChanged(newLocation);
-    }
-
-    public static Props props(final ActorRef roleChangeNotifier, final LeaderLocationListener listener) {
-        return Props.create(RoleChangeListenerActor.class, roleChangeNotifier, listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyProducer.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyProducer.java
deleted file mode 100644 (file)
index b78836b..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import java.util.Collection;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardProducer;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardWriteTransaction;
-
-/**
- * Proxy producer implementation that creates transactions that forward all calls to {@link DataStoreClient}.
- */
-class ShardProxyProducer implements DOMDataTreeShardProducer {
-    private final DOMDataTreeIdentifier shardRoot;
-    private final Collection<DOMDataTreeIdentifier> prefixes;
-    private final ClientLocalHistory history;
-    private DistributedShardModificationFactory modificationFactory;
-
-    ShardProxyProducer(final DOMDataTreeIdentifier shardRoot,
-                       final Collection<DOMDataTreeIdentifier> prefixes,
-                       final DataStoreClient client,
-                       final DistributedShardModificationFactory modificationFactory) {
-        this.shardRoot = Preconditions.checkNotNull(shardRoot);
-        this.prefixes = ImmutableList.copyOf(Preconditions.checkNotNull(prefixes));
-        this.modificationFactory = Preconditions.checkNotNull(modificationFactory);
-        history = Preconditions.checkNotNull(client).createLocalHistory();
-    }
-
-    @Nonnull
-    @Override
-    public Collection<DOMDataTreeIdentifier> getPrefixes() {
-        return prefixes;
-    }
-
-    @Override
-    public DOMDataTreeShardWriteTransaction createTransaction() {
-        return new ShardProxyTransaction(shardRoot, prefixes,
-                modificationFactory.createModification(history.createTransaction()));
-    }
-
-    DistributedShardModificationFactory getModificationFactory() {
-        return modificationFactory;
-    }
-
-    void setModificationFactory(final DistributedShardModificationFactory modificationFactory) {
-        this.modificationFactory = Preconditions.checkNotNull(modificationFactory);
-    }
-}
-
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyTransaction.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardProxyTransaction.java
deleted file mode 100644 (file)
index dcb74fa..0000000
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.Optional;
-import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.spi.shard.DOMDataTreeShardWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.shard.ForeignShardModificationContext;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.store.inmemory.ForeignShardThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Proxy {@link DOMDataTreeShardWriteTransaction} that creates a proxy cursor that translates all calls into
- * {@link ClientTransaction} calls.
- */
-class ShardProxyTransaction implements DOMDataTreeShardWriteTransaction {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ShardProxyTransaction.class);
-
-    private final DOMDataTreeIdentifier shardRoot;
-    private final Collection<DOMDataTreeIdentifier> prefixes;
-    private final DistributedShardModification modification;
-    private ClientTransaction currentTx;
-    private final List<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>();
-
-    private DOMDataTreeWriteCursor cursor = null;
-
-    ShardProxyTransaction(final DOMDataTreeIdentifier shardRoot,
-                          final Collection<DOMDataTreeIdentifier> prefixes,
-                          final DistributedShardModification modification) {
-        this.shardRoot = Preconditions.checkNotNull(shardRoot);
-        this.prefixes = Preconditions.checkNotNull(prefixes);
-        this.modification = Preconditions.checkNotNull(modification);
-    }
-
-    private DOMDataTreeWriteCursor getCursor() {
-        if (cursor == null) {
-            cursor = new DistributedShardModificationCursor(modification, this);
-        }
-        return cursor;
-    }
-
-    @Nonnull
-    @Override
-    public DOMDataTreeWriteCursor createCursor(@Nonnull final DOMDataTreeIdentifier prefix) {
-        checkAvailable(prefix);
-        final YangInstanceIdentifier relativePath = toRelative(prefix.getRootIdentifier());
-        final DOMDataTreeWriteCursor ret = getCursor();
-        ret.enter(relativePath.getPathArguments());
-        return ret;
-    }
-
-    void cursorClosed() {
-        cursor = null;
-        modification.cursorClosed();
-    }
-
-    private void checkAvailable(final DOMDataTreeIdentifier prefix) {
-        for (final DOMDataTreeIdentifier p : prefixes) {
-            if (p.contains(prefix)) {
-                return;
-            }
-        }
-        throw new IllegalArgumentException("Prefix[" + prefix + "] not available for this transaction. "
-                + "Available prefixes: " + prefixes);
-    }
-
-    private YangInstanceIdentifier toRelative(final YangInstanceIdentifier path) {
-        final Optional<YangInstanceIdentifier> relative =
-                path.relativeTo(modification.getPrefix().getRootIdentifier());
-        Preconditions.checkArgument(relative.isPresent());
-        return relative.get();
-    }
-
-    @Override
-    public void ready() {
-        LOG.debug("Readying transaction for shard {}", shardRoot);
-
-        Preconditions.checkNotNull(modification, "Attempting to ready an empty transaction.");
-
-        cohorts.add(modification.seal());
-        for (Entry<DOMDataTreeIdentifier, ForeignShardModificationContext> entry
-                : modification.getChildShards().entrySet()) {
-            cohorts.add(new ForeignShardThreePhaseCommitCohort(entry.getKey(), entry.getValue()));
-        }
-    }
-
-    @Override
-    public void close() {
-        cohorts.forEach(DOMStoreThreePhaseCommitCohort::abort);
-        cohorts.clear();
-
-        if (currentTx != null) {
-            currentTx.abort();
-            currentTx = null;
-        }
-    }
-
-    @Override
-    public ListenableFuture<Void> submit() {
-        LOG.debug("Submitting transaction for shard {}", shardRoot);
-
-        checkTransactionReadied();
-
-        final AsyncFunction<Boolean, Void> validateFunction = input -> prepare();
-        final AsyncFunction<Void, Void> prepareFunction = input -> commit();
-
-        // transform validate into prepare
-        final ListenableFuture<Void> prepareFuture = Futures.transformAsync(validate(), validateFunction,
-            MoreExecutors.directExecutor());
-        // transform prepare into commit and return as submit result
-        return Futures.transformAsync(prepareFuture, prepareFunction, MoreExecutors.directExecutor());
-    }
-
-    private void checkTransactionReadied() {
-        Preconditions.checkState(!cohorts.isEmpty(), "Transaction not readied yet");
-    }
-
-    @Override
-    public ListenableFuture<Boolean> validate() {
-        LOG.debug("Validating transaction for shard {}", shardRoot);
-
-        checkTransactionReadied();
-        final List<ListenableFuture<Boolean>> futures =
-                cohorts.stream().map(DOMStoreThreePhaseCommitCohort::canCommit).collect(Collectors.toList());
-        final SettableFuture<Boolean> ret = SettableFuture.create();
-
-        Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Boolean>>() {
-            @Override
-            public void onSuccess(final List<Boolean> result) {
-                ret.set(true);
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                ret.setException(throwable);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-
-    @Override
-    public ListenableFuture<Void> prepare() {
-        LOG.debug("Preparing transaction for shard {}", shardRoot);
-
-        checkTransactionReadied();
-        final List<ListenableFuture<Void>> futures =
-                cohorts.stream().map(DOMStoreThreePhaseCommitCohort::preCommit).collect(Collectors.toList());
-        final SettableFuture<Void> ret = SettableFuture.create();
-
-        Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Void>>() {
-            @Override
-            public void onSuccess(final List<Void> result) {
-                ret.set(null);
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                ret.setException(throwable);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        LOG.debug("Committing transaction for shard {}", shardRoot);
-
-        checkTransactionReadied();
-        final List<ListenableFuture<Void>> futures =
-                cohorts.stream().map(DOMStoreThreePhaseCommitCohort::commit).collect(Collectors.toList());
-        final SettableFuture<Void> ret = SettableFuture.create();
-
-        Futures.addCallback(Futures.allAsList(futures), new FutureCallback<List<Void>>() {
-            @Override
-            public void onSuccess(final List<Void> result) {
-                ret.set(null);
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                ret.setException(throwable);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardedDataTreeActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardedDataTreeActor.java
deleted file mode 100644 (file)
index 679055f..0000000
+++ /dev/null
@@ -1,828 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.actor.Status;
-import akka.actor.Status.Success;
-import akka.cluster.ClusterEvent;
-import akka.cluster.ClusterEvent.MemberExited;
-import akka.cluster.ClusterEvent.MemberRemoved;
-import akka.cluster.ClusterEvent.MemberUp;
-import akka.cluster.ClusterEvent.MemberWeaklyUp;
-import akka.cluster.ClusterEvent.ReachableMember;
-import akka.cluster.ClusterEvent.UnreachableMember;
-import akka.cluster.Member;
-import akka.dispatch.OnComplete;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
-import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-import org.opendaylight.controller.cluster.sharding.messages.LookupPrefixShard;
-import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.NotifyProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardCreated;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemovalLookup;
-import org.opendaylight.controller.cluster.sharding.messages.PrefixShardRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerCreated;
-import org.opendaylight.controller.cluster.sharding.messages.ProducerRemoved;
-import org.opendaylight.controller.cluster.sharding.messages.StartConfigShardLookup;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShard;
-import org.opendaylight.mdsal.dom.broker.DOMDataTreeShardRegistration;
-import org.opendaylight.mdsal.dom.spi.DOMDataTreePrefixTableEntry;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.compat.java8.FutureConverters;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Actor that tracks currently open producers/shards on remote nodes and handles notifications of remote
- * nodes of newly open producers/shards on the local node.
- */
-public class ShardedDataTreeActor extends AbstractUntypedPersistentActor {
-
-    private static final Logger LOG = LoggerFactory.getLogger(ShardedDataTreeActor.class);
-
-    private static final String PERSISTENCE_ID = "sharding-service-actor";
-    private static final Timeout DEFAULT_ASK_TIMEOUT = new Timeout(15, TimeUnit.SECONDS);
-
-    static final FiniteDuration SHARD_LOOKUP_TASK_INTERVAL = new FiniteDuration(1L, TimeUnit.SECONDS);
-
-    private final DistributedShardedDOMDataTree shardingService;
-    private final ActorSystem actorSystem;
-    private final ClusterWrapper clusterWrapper;
-    // helper actorContext used only for static calls to executeAsync etc
-    // for calls that need specific actor context tied to a datastore use the one provided in the DistributedDataStore
-    private final ActorUtils actorUtils;
-    private final ShardingServiceAddressResolver resolver;
-    private final AbstractDataStore distributedConfigDatastore;
-    private final AbstractDataStore distributedOperDatastore;
-    private final int lookupTaskMaxRetries;
-
-    private final Map<DOMDataTreeIdentifier, ActorProducerRegistration> idToProducer = new HashMap<>();
-
-    ShardedDataTreeActor(final ShardedDataTreeActorCreator builder) {
-        LOG.debug("Creating ShardedDataTreeActor on {}", builder.getClusterWrapper().getCurrentMemberName());
-
-        shardingService = builder.getShardingService();
-        actorSystem = builder.getActorSystem();
-        clusterWrapper = builder.getClusterWrapper();
-        distributedConfigDatastore = builder.getDistributedConfigDatastore();
-        distributedOperDatastore = builder.getDistributedOperDatastore();
-        lookupTaskMaxRetries = builder.getLookupTaskMaxRetries();
-        actorUtils = distributedConfigDatastore.getActorUtils();
-        resolver = new ShardingServiceAddressResolver(
-                DistributedShardedDOMDataTree.ACTOR_ID, clusterWrapper.getCurrentMemberName());
-
-        clusterWrapper.subscribeToMemberEvents(self());
-    }
-
-    @Override
-    public void preStart() {
-    }
-
-    @Override
-    protected void handleRecover(final Object message) {
-        LOG.debug("Received a recover message {}", message);
-    }
-
-    @Override
-    protected void handleCommand(final Object message) {
-        LOG.debug("{} : Received {}", clusterWrapper.getCurrentMemberName(), message);
-        if (message instanceof ClusterEvent.MemberUp) {
-            memberUp((ClusterEvent.MemberUp) message);
-        } else if (message instanceof ClusterEvent.MemberWeaklyUp) {
-            memberWeaklyUp((ClusterEvent.MemberWeaklyUp) message);
-        } else if (message instanceof ClusterEvent.MemberExited) {
-            memberExited((ClusterEvent.MemberExited) message);
-        } else if (message instanceof ClusterEvent.MemberRemoved) {
-            memberRemoved((ClusterEvent.MemberRemoved) message);
-        } else if (message instanceof ClusterEvent.UnreachableMember) {
-            memberUnreachable((ClusterEvent.UnreachableMember) message);
-        } else if (message instanceof ClusterEvent.ReachableMember) {
-            memberReachable((ClusterEvent.ReachableMember) message);
-        } else if (message instanceof ProducerCreated) {
-            onProducerCreated((ProducerCreated) message);
-        } else if (message instanceof NotifyProducerCreated) {
-            onNotifyProducerCreated((NotifyProducerCreated) message);
-        } else if (message instanceof ProducerRemoved) {
-            onProducerRemoved((ProducerRemoved) message);
-        } else if (message instanceof NotifyProducerRemoved) {
-            onNotifyProducerRemoved((NotifyProducerRemoved) message);
-        } else if (message instanceof PrefixShardCreated) {
-            onPrefixShardCreated((PrefixShardCreated) message);
-        } else if (message instanceof LookupPrefixShard) {
-            onLookupPrefixShard((LookupPrefixShard) message);
-        } else if (message instanceof PrefixShardRemovalLookup) {
-            onPrefixShardRemovalLookup((PrefixShardRemovalLookup) message);
-        } else if (message instanceof PrefixShardRemoved) {
-            onPrefixShardRemoved((PrefixShardRemoved) message);
-        } else if (message instanceof StartConfigShardLookup) {
-            onStartConfigShardLookup((StartConfigShardLookup) message);
-        }
-    }
-
-    @Override
-    public String persistenceId() {
-        return PERSISTENCE_ID;
-    }
-
-    private void memberUp(final MemberUp message) {
-        final MemberName memberName = memberToName(message.member());
-
-        LOG.info("{}: Received MemberUp: memberName: {}, address: {}", persistenceId(), memberName,
-                message.member().address());
-
-        resolver.addPeerAddress(memberName, message.member().address());
-    }
-
-    private void memberWeaklyUp(final MemberWeaklyUp message) {
-        final MemberName memberName = memberToName(message.member());
-
-        LOG.info("{}: Received MemberWeaklyUp: memberName: {}, address: {}", persistenceId(), memberName,
-                message.member().address());
-
-        resolver.addPeerAddress(memberName, message.member().address());
-    }
-
-    private void memberExited(final MemberExited message) {
-        final MemberName memberName = memberToName(message.member());
-
-        LOG.info("{}: Received MemberExited: memberName: {}, address: {}", persistenceId(), memberName,
-                message.member().address());
-
-        resolver.removePeerAddress(memberName);
-    }
-
-    private void memberRemoved(final MemberRemoved message) {
-        final MemberName memberName = memberToName(message.member());
-
-        LOG.info("{}: Received MemberRemoved: memberName: {}, address: {}", persistenceId(), memberName,
-                message.member().address());
-
-        resolver.removePeerAddress(memberName);
-    }
-
-    private void memberUnreachable(final UnreachableMember message) {
-        final MemberName memberName = memberToName(message.member());
-        LOG.debug("Received UnreachableMember: memberName {}, address: {}", memberName, message.member().address());
-
-        resolver.removePeerAddress(memberName);
-    }
-
-    private void memberReachable(final ReachableMember message) {
-        final MemberName memberName = memberToName(message.member());
-        LOG.debug("Received ReachableMember: memberName {}, address: {}", memberName, message.member().address());
-
-        resolver.addPeerAddress(memberName, message.member().address());
-    }
-
-    private void onProducerCreated(final ProducerCreated message) {
-        LOG.debug("Received ProducerCreated: {}", message);
-
-        // fastpath if we have no peers
-        if (resolver.getShardingServicePeerActorAddresses().isEmpty()) {
-            getSender().tell(new Status.Success(null), ActorRef.noSender());
-        }
-
-        final ActorRef sender = getSender();
-        final Collection<DOMDataTreeIdentifier> subtrees = message.getSubtrees();
-
-        final List<CompletableFuture<Object>> futures = new ArrayList<>();
-
-        for (final String address : resolver.getShardingServicePeerActorAddresses()) {
-            final ActorSelection actorSelection = actorSystem.actorSelection(address);
-            futures.add(
-                    FutureConverters.toJava(
-                            actorUtils.executeOperationAsync(
-                                    actorSelection, new NotifyProducerCreated(subtrees), DEFAULT_ASK_TIMEOUT))
-                    .toCompletableFuture());
-        }
-
-        final CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
-                futures.toArray(new CompletableFuture[futures.size()]));
-
-        combinedFuture
-                .thenRun(() -> sender.tell(new Success(null), ActorRef.noSender()))
-                .exceptionally(throwable -> {
-                    sender.tell(new Status.Failure(throwable), self());
-                    return null;
-                });
-    }
-
-    private void onNotifyProducerCreated(final NotifyProducerCreated message) {
-        LOG.debug("Received NotifyProducerCreated: {}", message);
-
-        final Collection<DOMDataTreeIdentifier> subtrees = message.getSubtrees();
-
-        try {
-            final ActorProducerRegistration registration =
-                    new ActorProducerRegistration(shardingService.localCreateProducer(subtrees), subtrees);
-            subtrees.forEach(id -> idToProducer.put(id, registration));
-            sender().tell(new Status.Success(null), self());
-        } catch (final IllegalArgumentException e) {
-            sender().tell(new Status.Failure(e), getSelf());
-        }
-    }
-
-    private void onProducerRemoved(final ProducerRemoved message) {
-        LOG.debug("Received ProducerRemoved: {}", message);
-
-        final List<CompletableFuture<Object>> futures = new ArrayList<>();
-
-        for (final String address : resolver.getShardingServicePeerActorAddresses()) {
-            final ActorSelection selection = actorSystem.actorSelection(address);
-
-            futures.add(FutureConverters.toJava(
-                    actorUtils.executeOperationAsync(selection, new NotifyProducerRemoved(message.getSubtrees())))
-                    .toCompletableFuture());
-        }
-
-        final CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
-                futures.toArray(new CompletableFuture[futures.size()]));
-
-        final ActorRef respondTo = getSender();
-
-        combinedFuture
-                .thenRun(() -> respondTo.tell(new Status.Success(null), self()))
-                .exceptionally(e -> {
-                    respondTo.tell(new Status.Failure(null), self());
-                    return null;
-                });
-
-    }
-
-    private void onNotifyProducerRemoved(final NotifyProducerRemoved message) {
-        LOG.debug("Received NotifyProducerRemoved: {}", message);
-
-        final ActorProducerRegistration registration = idToProducer.remove(message.getSubtrees().iterator().next());
-        if (registration == null) {
-            LOG.warn("The notification contained a path on which no producer is registered, throwing away");
-            getSender().tell(new Status.Success(null), ActorRef.noSender());
-            return;
-        }
-
-        try {
-            registration.close();
-            getSender().tell(new Status.Success(null), ActorRef.noSender());
-        } catch (final DOMDataTreeProducerException e) {
-            LOG.error("Unable to close producer", e);
-            getSender().tell(new Status.Failure(e), ActorRef.noSender());
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void onLookupPrefixShard(final LookupPrefixShard message) {
-        LOG.debug("Member: {}, Received LookupPrefixShard: {}", clusterWrapper.getCurrentMemberName(), message);
-
-        final DOMDataTreeIdentifier prefix = message.getPrefix();
-
-        final ActorUtils context = prefix.getDatastoreType() == LogicalDatastoreType.CONFIGURATION
-                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
-        // schedule a notification task for the reply
-        actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
-                new ShardCreationLookupTask(actorSystem, getSender(), clusterWrapper,
-                        context, shardingService, prefix, lookupTaskMaxRetries), actorSystem.dispatcher());
-    }
-
-    private void onPrefixShardCreated(final PrefixShardCreated message) {
-        LOG.debug("Member: {}, Received PrefixShardCreated: {}", clusterWrapper.getCurrentMemberName(), message);
-
-        final PrefixShardConfiguration config = message.getConfiguration();
-
-        shardingService.resolveShardAdditions(Collections.singleton(config.getPrefix()));
-    }
-
-    private void onPrefixShardRemovalLookup(final PrefixShardRemovalLookup message) {
-        LOG.debug("Member: {}, Received PrefixShardRemovalLookup: {}", clusterWrapper.getCurrentMemberName(), message);
-
-        final ShardRemovalLookupTask removalTask =
-                new ShardRemovalLookupTask(actorSystem, getSender(),
-                        actorUtils, message.getPrefix(), lookupTaskMaxRetries);
-
-        actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL, removalTask, actorSystem.dispatcher());
-    }
-
-    private void onPrefixShardRemoved(final PrefixShardRemoved message) {
-        LOG.debug("Received PrefixShardRemoved: {}", message);
-
-        shardingService.resolveShardRemovals(Collections.singleton(message.getPrefix()));
-    }
-
-    private void onStartConfigShardLookup(final StartConfigShardLookup message) {
-        LOG.debug("Received StartConfigShardLookup: {}", message);
-
-        final ActorUtils context =
-                message.getType().equals(LogicalDatastoreType.CONFIGURATION)
-                        ? distributedConfigDatastore.getActorUtils() : distributedOperDatastore.getActorUtils();
-
-        // schedule a notification task for the reply
-        actorSystem.scheduler().scheduleOnce(SHARD_LOOKUP_TASK_INTERVAL,
-                new ConfigShardLookupTask(
-                        actorSystem, getSender(), context, message, lookupTaskMaxRetries),
-                actorSystem.dispatcher());
-    }
-
-    private static MemberName memberToName(final Member member) {
-        return MemberName.forName(member.roles().iterator().next());
-    }
-
-    private class ActorProducerRegistration {
-
-        private final DOMDataTreeProducer producer;
-        private final Collection<DOMDataTreeIdentifier> subtrees;
-
-        ActorProducerRegistration(final DOMDataTreeProducer producer,
-                                  final Collection<DOMDataTreeIdentifier> subtrees) {
-            this.producer = producer;
-            this.subtrees = subtrees;
-        }
-
-        void close() throws DOMDataTreeProducerException {
-            producer.close();
-            subtrees.forEach(idToProducer::remove);
-        }
-    }
-
-    private static class ShardFrontendRegistration extends
-            AbstractObjectRegistration<ListenerRegistration<DistributedShardFrontend>> {
-
-        private final ActorRef clientActor;
-        private final ListenerRegistration<DistributedShardFrontend> shardRegistration;
-
-        ShardFrontendRegistration(final ActorRef clientActor,
-                                  final ListenerRegistration<DistributedShardFrontend> shardRegistration) {
-            super(shardRegistration);
-            this.clientActor = clientActor;
-            this.shardRegistration = shardRegistration;
-        }
-
-        @Override
-        protected void removeRegistration() {
-            shardRegistration.close();
-            clientActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        }
-    }
-
-    /**
-     * Handles the lookup step of cds shard creation once the configuration is updated.
-     */
-    private static class ShardCreationLookupTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final ClusterWrapper clusterWrapper;
-        private final ActorUtils context;
-        private final DistributedShardedDOMDataTree shardingService;
-        private final DOMDataTreeIdentifier toLookup;
-        private final int lookupMaxRetries;
-
-        ShardCreationLookupTask(final ActorSystem system,
-                                final ActorRef replyTo,
-                                final ClusterWrapper clusterWrapper,
-                                final ActorUtils context,
-                                final DistributedShardedDOMDataTree shardingService,
-                                final DOMDataTreeIdentifier toLookup,
-                                final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.clusterWrapper = clusterWrapper;
-            this.context = context;
-            this.shardingService = shardingService;
-            this.toLookup = toLookup;
-            this.lookupMaxRetries = lookupMaxRetries;
-        }
-
-        @Override
-        public void run() {
-            final Future<ActorRef> localShardFuture =
-                    context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
-
-            localShardFuture.onComplete(new OnComplete<ActorRef>() {
-                @Override
-                public void onComplete(Throwable throwable, ActorRef actorRef) {
-                    if (throwable != null) {
-                        tryReschedule(throwable);
-                    } else {
-                        LOG.debug("Local backend for shard[{}] lookup successful, starting leader lookup..", toLookup);
-
-                        system.scheduler().scheduleOnce(
-                                SHARD_LOOKUP_TASK_INTERVAL,
-                                new ShardLeaderLookupTask(system, replyTo, context, clusterWrapper, actorRef,
-                                        shardingService, toLookup, lookupMaxRetries),
-                                system.dispatcher());
-                    }
-                }
-            }, system.dispatcher());
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("Local backend for shard[{}] not found, try: {}, rescheduling..", toLookup, retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, ShardCreationLookupTask.this, system.dispatcher());
-        }
-    }
-
-    /**
-     * Handles the readiness step by waiting for a leader of the created shard.
-     */
-    private static class ShardLeaderLookupTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final ActorUtils context;
-        private final ClusterWrapper clusterWrapper;
-        private final ActorRef shard;
-        private final DistributedShardedDOMDataTree shardingService;
-        private final DOMDataTreeIdentifier toLookup;
-        private final int lookupMaxRetries;
-
-        ShardLeaderLookupTask(final ActorSystem system,
-                              final ActorRef replyTo,
-                              final ActorUtils context,
-                              final ClusterWrapper clusterWrapper,
-                              final ActorRef shard,
-                              final DistributedShardedDOMDataTree shardingService,
-                              final DOMDataTreeIdentifier toLookup,
-                              final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.context = context;
-            this.clusterWrapper = clusterWrapper;
-            this.shard = shard;
-            this.shardingService = shardingService;
-            this.toLookup = toLookup;
-            this.lookupMaxRetries = lookupMaxRetries;
-        }
-
-        @Override
-        public void run() {
-
-            final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
-
-            ask.onComplete(new OnComplete<Object>() {
-                @Override
-                public void onComplete(final Throwable throwable, final Object findLeaderReply) {
-                    if (throwable != null) {
-                        tryReschedule(throwable);
-                    } else {
-                        final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
-                        final java.util.Optional<String> leaderActor = findLeader.getLeaderActor();
-                        if (leaderActor.isPresent()) {
-                            // leader is found, backend seems ready, check if the frontend is ready
-                            LOG.debug("{} - Leader for shard[{}] backend ready, starting frontend lookup..",
-                                    clusterWrapper.getCurrentMemberName(), toLookup);
-                            system.scheduler().scheduleOnce(
-                                    SHARD_LOOKUP_TASK_INTERVAL,
-                                    new FrontendLookupTask(
-                                            system, replyTo, shardingService, toLookup, lookupMaxRetries),
-                                    system.dispatcher());
-                        } else {
-                            tryReschedule(null);
-                        }
-                    }
-                }
-            }, system.dispatcher());
-
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("{} - Leader for shard[{}] backend not found on try: {}, retrying..",
-                    clusterWrapper.getCurrentMemberName(), toLookup, retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, ShardLeaderLookupTask.this, system.dispatcher());
-        }
-    }
-
-    /**
-     * After backend is ready this handles the last step - checking if we have a frontend shard for the backend,
-     * once this completes(which should be ready by the time the backend is created, this is just a sanity check in
-     * case they race), the future for the cds shard creation is completed and the shard is ready for use.
-     */
-    private static final class FrontendLookupTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final DistributedShardedDOMDataTree shardingService;
-        private final DOMDataTreeIdentifier toLookup;
-
-        FrontendLookupTask(final ActorSystem system,
-                           final ActorRef replyTo,
-                           final DistributedShardedDOMDataTree shardingService,
-                           final DOMDataTreeIdentifier toLookup,
-                           final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.shardingService = shardingService;
-            this.toLookup = toLookup;
-        }
-
-        @Override
-        public void run() {
-            final DOMDataTreePrefixTableEntry<DOMDataTreeShardRegistration<DOMDataTreeShard>> entry =
-                    shardingService.lookupShardFrontend(toLookup);
-
-            if (entry != null && tableEntryIdCheck(entry, toLookup) && entry.getValue() != null) {
-                replyTo.tell(new Success(null), ActorRef.noSender());
-            } else {
-                tryReschedule(null);
-            }
-        }
-
-        private boolean tableEntryIdCheck(final DOMDataTreePrefixTableEntry<?> entry,
-                                          final DOMDataTreeIdentifier prefix) {
-            if (entry == null) {
-                return false;
-            }
-
-            if (YangInstanceIdentifier.EMPTY.equals(prefix.getRootIdentifier())) {
-                return true;
-            }
-
-            if (entry.getIdentifier().equals(toLookup.getRootIdentifier().getLastPathArgument())) {
-                return true;
-            }
-
-            return false;
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("Frontend for shard[{}] not found on try: {}, retrying..", toLookup, retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, FrontendLookupTask.this, system.dispatcher());
-        }
-    }
-
-    /**
-     * Task that is run once a cds shard registration is closed and completes once the backend shard is removed from the
-     * configuration.
-     */
-    private static class ShardRemovalLookupTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final ActorUtils context;
-        private final DOMDataTreeIdentifier toLookup;
-
-        ShardRemovalLookupTask(final ActorSystem system,
-                               final ActorRef replyTo,
-                               final ActorUtils context,
-                               final DOMDataTreeIdentifier toLookup,
-                               final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.context = context;
-            this.toLookup = toLookup;
-        }
-
-        @Override
-        public void run() {
-            final Future<ActorRef> localShardFuture =
-                    context.findLocalShardAsync(ClusterUtils.getCleanShardName(toLookup.getRootIdentifier()));
-
-            localShardFuture.onComplete(new OnComplete<ActorRef>() {
-                @Override
-                public void onComplete(Throwable throwable, ActorRef actorRef) {
-                    if (throwable != null) {
-                        //TODO Shouldn't we check why findLocalShard failed?
-                        LOG.debug("Backend shard[{}] removal lookup successful notifying the registration future",
-                                toLookup);
-                        replyTo.tell(new Success(null), ActorRef.noSender());
-                    } else {
-                        tryReschedule(null);
-                    }
-                }
-            }, system.dispatcher());
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("Backend shard[{}] removal lookup failed, shard is still present, try: {}, rescheduling..",
-                    toLookup, retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, ShardRemovalLookupTask.this, system.dispatcher());
-        }
-    }
-
-    /**
-     * Task for handling the lookup of the backend for the configuration shard.
-     */
-    private static class ConfigShardLookupTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final ActorUtils context;
-
-        ConfigShardLookupTask(final ActorSystem system,
-                              final ActorRef replyTo,
-                              final ActorUtils context,
-                              final StartConfigShardLookup message,
-                              final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.context = context;
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("Local backend for prefix configuration shard not found, try: {}, rescheduling..", retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, ConfigShardLookupTask.this, system.dispatcher());
-        }
-
-        @Override
-        public void run() {
-            final Optional<ActorRef> localShard =
-                    context.findLocalShard(ClusterUtils.PREFIX_CONFIG_SHARD_ID);
-
-            if (!localShard.isPresent()) {
-                tryReschedule(null);
-            } else {
-                LOG.debug("Local backend for prefix configuration shard lookup successful");
-                replyTo.tell(new Status.Success(null), ActorRef.noSender());
-            }
-        }
-    }
-
-    /**
-     * Task for handling the readiness state of the config shard. Reports success once the leader is elected.
-     */
-    private static class ConfigShardReadinessTask extends LookupTask {
-
-        private final ActorSystem system;
-        private final ActorRef replyTo;
-        private final ActorUtils context;
-        private final ClusterWrapper clusterWrapper;
-        private final ActorRef shard;
-
-        ConfigShardReadinessTask(final ActorSystem system,
-                                 final ActorRef replyTo,
-                                 final ActorUtils context,
-                                 final ClusterWrapper clusterWrapper,
-                                 final ActorRef shard,
-                                 final int lookupMaxRetries) {
-            super(replyTo, lookupMaxRetries);
-            this.system = system;
-            this.replyTo = replyTo;
-            this.context = context;
-            this.clusterWrapper = clusterWrapper;
-            this.shard = shard;
-        }
-
-        @Override
-        void reschedule(int retries) {
-            LOG.debug("{} - Leader for config shard not found on try: {}, retrying..",
-                    clusterWrapper.getCurrentMemberName(), retries);
-            system.scheduler().scheduleOnce(
-                    SHARD_LOOKUP_TASK_INTERVAL, ConfigShardReadinessTask.this, system.dispatcher());
-        }
-
-        @Override
-        public void run() {
-            final Future<Object> ask = Patterns.ask(shard, FindLeader.INSTANCE, context.getOperationTimeout());
-
-            ask.onComplete(new OnComplete<Object>() {
-                @Override
-                public void onComplete(final Throwable throwable, final Object findLeaderReply) {
-                    if (throwable != null) {
-                        tryReschedule(throwable);
-                    } else {
-                        final FindLeaderReply findLeader = (FindLeaderReply) findLeaderReply;
-                        final java.util.Optional<String> leaderActor = findLeader.getLeaderActor();
-                        if (leaderActor.isPresent()) {
-                            // leader is found, backend seems ready, check if the frontend is ready
-                            LOG.debug("{} - Leader for config shard is ready. Ending lookup.",
-                                    clusterWrapper.getCurrentMemberName());
-                            replyTo.tell(new Status.Success(null), ActorRef.noSender());
-                        } else {
-                            tryReschedule(null);
-                        }
-                    }
-                }
-            }, system.dispatcher());
-        }
-    }
-
-    public static class ShardedDataTreeActorCreator {
-
-        private DistributedShardedDOMDataTree shardingService;
-        private AbstractDataStore distributedConfigDatastore;
-        private AbstractDataStore distributedOperDatastore;
-        private ActorSystem actorSystem;
-        private ClusterWrapper cluster;
-        private int maxRetries;
-
-        public DistributedShardedDOMDataTree getShardingService() {
-            return shardingService;
-        }
-
-        public ShardedDataTreeActorCreator setShardingService(final DistributedShardedDOMDataTree shardingService) {
-            this.shardingService = shardingService;
-            return this;
-        }
-
-        public ActorSystem getActorSystem() {
-            return actorSystem;
-        }
-
-        public ShardedDataTreeActorCreator setActorSystem(final ActorSystem actorSystem) {
-            this.actorSystem = actorSystem;
-            return this;
-        }
-
-        public ShardedDataTreeActorCreator setClusterWrapper(final ClusterWrapper clusterWrapper) {
-            this.cluster = clusterWrapper;
-            return this;
-        }
-
-        public ClusterWrapper getClusterWrapper() {
-            return cluster;
-        }
-
-        public AbstractDataStore getDistributedConfigDatastore() {
-            return distributedConfigDatastore;
-        }
-
-        public ShardedDataTreeActorCreator setDistributedConfigDatastore(
-                final AbstractDataStore distributedConfigDatastore) {
-            this.distributedConfigDatastore = distributedConfigDatastore;
-            return this;
-        }
-
-        public AbstractDataStore getDistributedOperDatastore() {
-            return distributedOperDatastore;
-        }
-
-        public ShardedDataTreeActorCreator setDistributedOperDatastore(
-                final AbstractDataStore distributedOperDatastore) {
-            this.distributedOperDatastore = distributedOperDatastore;
-            return this;
-        }
-
-        public ShardedDataTreeActorCreator setLookupTaskMaxRetries(final int newMaxRetries) {
-            this.maxRetries = newMaxRetries;
-            return this;
-        }
-
-        public int getLookupTaskMaxRetries() {
-            return maxRetries;
-        }
-
-        private void verify() {
-            Preconditions.checkNotNull(shardingService);
-            Preconditions.checkNotNull(actorSystem);
-            Preconditions.checkNotNull(cluster);
-            Preconditions.checkNotNull(distributedConfigDatastore);
-            Preconditions.checkNotNull(distributedOperDatastore);
-        }
-
-        public Props props() {
-            verify();
-            return Props.create(ShardedDataTreeActor.class, this);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardingServiceAddressResolver.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/ShardingServiceAddressResolver.java
deleted file mode 100644 (file)
index b443a69..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import akka.actor.Address;
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-
-/**
- * Resolver for remote {@link ShardedDataTreeActor}'s.
- */
-public class ShardingServiceAddressResolver {
-
-    private final ConcurrentMap<MemberName, Address> memberNameToAddress = new ConcurrentHashMap<>();
-    private final String shardingServiceActorIdentifier;
-    private final MemberName localMemberName;
-
-    public ShardingServiceAddressResolver(final String shardingServiceActorIdentifier,
-                                          final MemberName localMemberName) {
-        this.shardingServiceActorIdentifier = shardingServiceActorIdentifier;
-        this.localMemberName = localMemberName;
-    }
-
-    void addPeerAddress(final MemberName memberName, final Address address) {
-        memberNameToAddress.put(memberName, address);
-    }
-
-    void removePeerAddress(final MemberName memberName) {
-        memberNameToAddress.remove(memberName);
-    }
-
-    Address getPeerAddress(final MemberName memberName) {
-        return memberNameToAddress.get(memberName);
-    }
-
-    StringBuilder getActorPathBuilder(final Address address) {
-        return new StringBuilder().append(address.toString()).append("/user/").append(shardingServiceActorIdentifier);
-    }
-
-    Collection<String> getShardingServicePeerActorAddresses() {
-        final Collection<String> peerAddresses =
-                memberNameToAddress
-                        .entrySet()
-                        .stream()
-                        .filter(entry -> !localMemberName.equals(entry.getKey()))
-                        .map(entry -> getActorPathBuilder(entry.getValue()).toString())
-                        .collect(Collectors.toList());
-
-        return peerAddresses;
-    }
-
-    public String resolve(final MemberName memberName) {
-        Preconditions.checkNotNull(memberName);
-        final Address address = memberNameToAddress.get(memberName);
-        Preconditions.checkNotNull(address, "Requested member[%s] is not present in the resolver ",
-                memberName.toString());
-
-        return getActorPathBuilder(address).toString();
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/InitConfigListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/InitConfigListener.java
deleted file mode 100644 (file)
index fbdf3f8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-/**
- * Message sent to the local ShardManager, once the shard configuration shard is ready and the ShardManager should
- * start its listener.
- */
-public final class InitConfigListener {
-
-    public static final InitConfigListener INSTANCE = new InitConfigListener();
-
-    private InitConfigListener() {
-
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/LookupPrefixShard.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/LookupPrefixShard.java
deleted file mode 100644 (file)
index 9ea641c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Sent to the local {@link ShardedDataTreeActor} when there was a shard created
- * on the local node. The local actor should notify the remote actors with {@link PrefixShardCreated} which should
- * create the required frontend/backend shards.
- */
-@Beta
-public class LookupPrefixShard implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private final DOMDataTreeIdentifier prefix;
-
-    public LookupPrefixShard(final DOMDataTreeIdentifier prefix) {
-        this.prefix = Preconditions.checkNotNull(prefix);
-    }
-
-    public DOMDataTreeIdentifier getPrefix() {
-        return prefix;
-    }
-
-
-    @Override
-    public String toString() {
-        return "LookupPrefixShard{"
-                + "prefix="
-                + prefix
-                + '}';
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerCreated.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerCreated.java
deleted file mode 100644 (file)
index fb7f017..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when attempting
- * to create a producer. The remote node should attempt to create a producer in the local sharding service and reply
- * with success/failure based on the attempt result.
- */
-@Beta
-public class NotifyProducerCreated implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private final Collection<DOMDataTreeIdentifier> subtrees;
-
-    public NotifyProducerCreated(final Collection<DOMDataTreeIdentifier> subtrees) {
-        this.subtrees = ImmutableList.copyOf(Preconditions.checkNotNull(subtrees));
-    }
-
-    public Collection<DOMDataTreeIdentifier> getSubtrees() {
-        return subtrees;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerRemoved.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/NotifyProducerRemoved.java
deleted file mode 100644 (file)
index 24d871e..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import java.io.Serializable;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when attempting
- * to close a producer. The remote node should attempt to close a producer in the local sharding service and reply
- * with success/failure based on the attempt result. If the producer doesn't exist on this node report Success.
- */
-@Beta
-public class NotifyProducerRemoved implements Serializable {
-    private static final long serialVersionUID = 1L;
-    private final Collection<DOMDataTreeIdentifier> subtrees;
-
-    public NotifyProducerRemoved(final Collection<DOMDataTreeIdentifier> subtrees) {
-        this.subtrees = ImmutableList.copyOf(Preconditions.checkNotNull(subtrees));
-    }
-
-    public Collection<DOMDataTreeIdentifier> getSubtrees() {
-        return subtrees;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardCreated.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardCreated.java
deleted file mode 100644 (file)
index d468992..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.controller.cluster.datastore.config.PrefixShardConfiguration;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-
-/**
- * Message sent to the local {@link ShardedDataTreeActor} when a clustered
- * shard was created locally. The backend shards/replicas will be handled by the ShardManager but the
- * {@link ShardedDataTreeActor} needs to handle the registration of the
- * frontends into the {@link org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService}. The configuration only contains
- * the Member nodes that this is still yet to be distributed to. The last node will receive PrefixShardConfiguration
- * with only it's member present.
- */
-@Beta
-public class PrefixShardCreated {
-    private final PrefixShardConfiguration configuration;
-
-    public PrefixShardCreated(final PrefixShardConfiguration configuration) {
-        this.configuration = configuration;
-    }
-
-    public PrefixShardConfiguration getConfiguration() {
-        return configuration;
-    }
-
-    @Override
-    public String toString() {
-        return "PrefixShardCreated{"
-                + "configuration=" + configuration
-                + '}';
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemovalLookup.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemovalLookup.java
deleted file mode 100644 (file)
index d6a4319..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Sent to the local {@link ShardedDataTreeActor} to initiate the lookup of the shard, once the shard is removed from
- * the system entirely the actor responds with a success.
- */
-public class PrefixShardRemovalLookup {
-
-    private final DOMDataTreeIdentifier prefix;
-
-    public PrefixShardRemovalLookup(final DOMDataTreeIdentifier prefix) {
-
-        this.prefix = Preconditions.checkNotNull(prefix);
-    }
-
-    public DOMDataTreeIdentifier getPrefix() {
-        return prefix;
-    }
-
-    @Override
-    public String toString() {
-        return "PrefixShardRemovalLookup{"
-                + "prefix=" + prefix
-                + '}';
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemoved.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/PrefixShardRemoved.java
deleted file mode 100644 (file)
index 1890b64..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.io.Serializable;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to remote {@link ShardedDataTreeActor}'s when there is an attempt to remove the shard,
- * the ShardedDataTreeActor should remove the shard from the current configuration so that the change is picked up
- * in the backend ShardManager.
- */
-@Beta
-public class PrefixShardRemoved implements Serializable {
-    private static final long serialVersionUID = 1L;
-
-    private final DOMDataTreeIdentifier prefix;
-
-    public PrefixShardRemoved(final DOMDataTreeIdentifier prefix) {
-        this.prefix = prefix;
-    }
-
-    public DOMDataTreeIdentifier getPrefix() {
-        return prefix;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerCreated.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerCreated.java
deleted file mode 100644 (file)
index efdf557..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to local {@link ShardedDataTreeActor}'s when there was an
- * attempt to create a producer on the local node.
- */
-@Beta
-public class ProducerCreated {
-    private final Collection<DOMDataTreeIdentifier> subtrees;
-
-    public ProducerCreated(final Collection<DOMDataTreeIdentifier> subtrees) {
-        this.subtrees = subtrees;
-    }
-
-    public Collection<DOMDataTreeIdentifier> getSubtrees() {
-        return subtrees;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerRemoved.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/ProducerRemoved.java
deleted file mode 100644 (file)
index 1cc33a0..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import com.google.common.annotations.Beta;
-import java.util.Collection;
-import org.opendaylight.controller.cluster.sharding.ShardedDataTreeActor;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-
-/**
- * Message sent to local {@link ShardedDataTreeActor}'s when there was an
- * attempt to close a producer on the local node.
- */
-@Beta
-public class ProducerRemoved {
-
-    private final Collection<DOMDataTreeIdentifier> subtrees;
-
-    public ProducerRemoved(final Collection<DOMDataTreeIdentifier> subtrees) {
-        this.subtrees = subtrees;
-    }
-
-    public Collection<DOMDataTreeIdentifier> getSubtrees() {
-        return subtrees;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/StartConfigShardLookup.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/sharding/messages/StartConfigShardLookup.java
deleted file mode 100644 (file)
index 22e5dbf..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding.messages;
-
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-
-/**
- * Message that should be sent to ShardedDataTreeActor when the lookup of the prefix config shard should begin.
- * Replied to with Succes once the shard has a leader.
- */
-public class StartConfigShardLookup {
-
-    private LogicalDatastoreType type;
-
-    public StartConfigShardLookup(final LogicalDatastoreType type) {
-        this.type = type;
-    }
-
-    public LogicalDatastoreType getType() {
-        return type;
-    }
-
-    @Override
-    public String toString() {
-        return "StartConfigShardLookup{type=" + type + '}';
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/OSGI-INF/blueprint/clustered-datastore.xml b/opendaylight/md-sal/sal-distributed-datastore/src/main/resources/OSGI-INF/blueprint/clustered-datastore.xml
deleted file mode 100644 (file)
index 13515cd..0000000
+++ /dev/null
@@ -1,213 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.2.0">
-
-  <cm:property-placeholder persistent-id="org.opendaylight.controller.cluster.datastore.broker" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="max-data-broker-future-callback-queue-size" value="1000"/>
-      <cm:property name="max-data-broker-future-callback-pool-size" value="20"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <odl:static-reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService" />
-
-  <reference id="classLoadingStrategy" interface="org.opendaylight.mdsal.binding.generator.api.ClassLoadingStrategy" />
-
-  <!-- ActorSystemProvider -->
-
-  <bean id="bundleClassLoader" class="org.opendaylight.controller.cluster.akka.osgi.impl.BundleClassLoaderFactory"
-          factory-method="createClassLoader">
-    <argument ref="blueprintBundleContext" />
-  </bean>
-
-  <bean id="actorSystemProps" class="org.opendaylight.controller.cluster.akka.osgi.impl.QuarantinedMonitorActorPropsFactory"
-          factory-method="createProps">
-    <argument ref="blueprintBundleContext" />
-  </bean>
-
-  <bean id="akkaReader" class="org.opendaylight.controller.cluster.common.actor.FileAkkaConfigurationReader"/>
-  <bean id="akkaConfig" class="org.opendaylight.controller.cluster.akka.impl.AkkaConfigFactory"
-          factory-method="createAkkaConfig">
-    <argument ref="akkaReader" />
-  </bean>
-
-  <bean id="actorSystemProvider" class="org.opendaylight.controller.cluster.akka.impl.ActorSystemProviderImpl"
-          destroy-method="close">
-    <argument ref="bundleClassLoader" />
-    <argument ref="actorSystemProps"/>
-    <argument ref="akkaConfig"/>
-  </bean>
-
-  <service ref="actorSystemProvider" interface="org.opendaylight.controller.cluster.ActorSystemProvider"/>
-
-  <!-- Datastore properties -->
-  <cm:cm-properties id="datastoreProps" persistent-id="org.opendaylight.controller.cluster.datastore"/>
-
-  <!-- Distributed Config Datastore -->
-  <bean id="datastoreSnapshotRestore" class="org.opendaylight.controller.cluster.datastore.DatastoreSnapshotRestore"
-          factory-method="instance">
-    <argument value="./clustered-datastore-restore"/>
-  </bean>
-
-  <bean id="introspectorFactory" class="org.opendaylight.controller.cluster.datastore.DatastoreContextIntrospectorFactory">
-    <argument ref="schemaService"/>
-    <argument ref="classLoadingStrategy"/>
-  </bean>
-
-  <bean id="introspectorConfig" factory-ref="introspectorFactory" factory-method="newInstance">
-    <argument type="org.opendaylight.mdsal.common.api.LogicalDatastoreType" value="CONFIGURATION"/>
-  </bean>
-
-  <bean id="updaterConfig" class="org.opendaylight.controller.cluster.datastore.DatastoreContextPropertiesUpdater">
-    <cm:managed-properties persistent-id="org.opendaylight.controller.cluster.datastore" update-strategy="component-managed" update-method="update"/>
-    <argument ref="introspectorConfig"/>
-    <argument ref="datastoreProps"/>
-  </bean>
-
-  <bean id="configDatastore" class="org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory"
-          factory-method="createInstance" destroy-method="close">
-    <argument ref="schemaService"/>
-    <argument>
-      <bean factory-ref="introspectorConfig" factory-method="getContext" />
-    </argument>
-    <argument ref="datastoreSnapshotRestore"/>
-    <argument ref="actorSystemProvider"/>
-    <argument ref="introspectorConfig"/>
-    <argument ref="updaterConfig"/>
-  </bean>
-
-  <service ref="configDatastore" odl:type="distributed-config">
-    <interfaces>
-      <value>org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface</value>
-    </interfaces>
-  </service>
-
-  <!-- Distributed Operational Datastore -->
-  <bean id="fileModuleShardConfigProvider" class="org.opendaylight.controller.cluster.datastore.config.FileModuleShardConfigProvider">
-    <argument value="./configuration/initial/module-shards.conf"/>
-    <argument value="./configuration/initial/modules.conf"/>
-  </bean>
-
-  <bean id="configurationImpl" class="org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl">
-    <argument ref="fileModuleShardConfigProvider"/>
-  </bean>
-
-  <bean id="introspectorOper" factory-ref="introspectorFactory" factory-method="newInstance">
-    <argument type="org.opendaylight.mdsal.common.api.LogicalDatastoreType" value="OPERATIONAL"/>
-  </bean>
-
-  <bean id="updaterOper" class="org.opendaylight.controller.cluster.datastore.DatastoreContextPropertiesUpdater">
-    <cm:managed-properties persistent-id="org.opendaylight.controller.cluster.datastore" update-strategy="component-managed" update-method="update"/>
-    <argument ref="introspectorOper"/>
-    <argument ref="datastoreProps"/>
-  </bean>
-
-  <bean id="operDatastore" class="org.opendaylight.controller.cluster.datastore.DistributedDataStoreFactory"
-          factory-method="createInstance" destroy-method="close">
-    <argument ref="schemaService"/>
-    <argument>
-      <bean factory-ref="introspectorOper" factory-method="getContext" />
-    </argument>
-    <argument ref="datastoreSnapshotRestore"/>
-    <argument ref="actorSystemProvider"/>
-    <argument ref="introspectorOper"/>
-    <argument ref="updaterOper"/>
-    <argument ref="configurationImpl" />
-  </bean>
-
-  <service ref="operDatastore" odl:type="distributed-operational">
-    <interfaces>
-      <value>org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface</value>
-    </interfaces>
-  </service>
-
-  <!-- Concurrent DOMDataBroker -->
-
-  <bean id="listenableFutureExecutor" class="org.opendaylight.yangtools.util.concurrent.SpecialExecutors"
-          factory-method="newBlockingBoundedCachedThreadPool">
-    <argument value="${max-data-broker-future-callback-pool-size}"/>
-    <argument value="${max-data-broker-future-callback-queue-size}"/>
-    <argument value="CommitFutures"/>
-    <argument>
-    <!-- We should use a more specific class -->
-      <bean factory-ref="operDatastore" factory-method="getClass"/>
-    </argument>
-  </bean>
-
-  <bean id="commitStatsTracker" class="org.opendaylight.yangtools.util.DurationStatisticsTracker"
-          factory-method="createConcurrent"/>
-
-  <bean id="clusteredDOMDataBroker" class="org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker"
-          destroy-method="close">
-    <argument>
-      <map>
-        <entry key="CONFIGURATION" value-ref="configDatastore"/>
-        <entry key="OPERATIONAL" value-ref="operDatastore"/>
-      </map>
-    </argument>
-    <argument ref="listenableFutureExecutor"/>
-    <argument ref="commitStatsTracker"/>
-  </bean>
-
-  <service ref="clusteredDOMDataBroker" interface="org.opendaylight.mdsal.dom.api.DOMDataBroker"
-          odl:type="default"/>
-
-  <bean id="legacyDOMDataBroker" class="org.opendaylight.controller.sal.core.compat.LegacyDOMDataBrokerAdapter">
-    <argument ref="clusteredDOMDataBroker"/>
-  </bean>
-
-  <service ref="legacyDOMDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-          odl:type="default"/>
-
-  <!-- JMX beans for the data broker -->
-
-  <bean id="commitStatsMXBean" class="org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl"
-          init-method="register" destroy-method="unregister">
-    <argument ref="commitStatsTracker"/>
-    <argument value="DOMDataBroker"/>
-  </bean>
-
-  <bean id="threadStatsMXBean" class="org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl"
-          factory-method="create" destroy-method="unregister">
-    <argument ref="listenableFutureExecutor"/>
-    <argument value="CommitFutureExecutorStats"/>
-    <argument value="DOMDataBroker"/>
-  </bean>
-
-  <!-- Distributed EntityOwnershipService -->
-  <cm:cm-properties id="strategiesProps" persistent-id="org.opendaylight.controller.cluster.entity.owner.selection.strategies" />
-
-  <bean id="selectionStrategyConfig" class="org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfigReader"
-          factory-method="loadStrategyWithConfig">
-    <argument ref="strategiesProps"/>
-  </bean>
-
-  <bean id="distributedEntityOwnershipService" class="org.opendaylight.controller.cluster.datastore.entityownership.DistributedEntityOwnershipService"
-          factory-method="start" destroy-method="close">
-    <argument>
-      <bean factory-ref="operDatastore" factory-method="getActorUtils"/>
-    </argument>
-    <argument ref="selectionStrategyConfig"/>
-  </bean>
-
-  <service ref="distributedEntityOwnershipService" interface="org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService"
-        odl:type="default"/>
-
-  <!-- CDS shard manager -->
-  <bean id="cdsNodeManager" class="org.opendaylight.controller.cluster.sharding.DistributedShardedDOMDataTree"
-          init-method="init">
-    <argument ref="actorSystemProvider"/>
-    <argument ref="operDatastore"/>
-    <argument ref="configDatastore"/>
-  </bean>
-
-  <service ref="cdsNodeManager" odl:type="default">
-    <interfaces>
-      <value>org.opendaylight.mdsal.dom.api.DOMDataTreeShardingService</value>
-      <value>org.opendaylight.mdsal.dom.api.DOMDataTreeService</value>
-      <value>org.opendaylight.controller.cluster.sharding.DistributedShardFactory</value>
-    </interfaces>
-  </service>
-
-</blueprint>
index af37589f9cfcda9d0b9c9eafebfc144583c62d78..21307e532cee418189e3601354b22464973808bd 100644 (file)
@@ -1,13 +1,16 @@
 // vi: set smarttab et sw=4 tabstop=4:
 module distributed-datastore-provider {
-
     yang-version 1;
     namespace "urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider";
     prefix "distributed-datastore-provider";
 
     description
         "This module contains the base YANG definitions for
-        the distributed datastore provider implementation";
+         the distributed datastore provider implementation";
+
+    revision "2023-12-29" {
+        description "Remote use-tell-based-protocol and shard-snapshot-chunk-size leaves";
+    }
 
     revision "2014-06-12" {
         description
@@ -39,30 +42,6 @@ module distributed-datastore-provider {
     }
 
     grouping data-store-properties {
-        leaf max-shard-data-change-executor-queue-size {
-            default 1000;
-            type non-zero-uint32-type;
-            description "The maximum queue size for each shard's data store data change notification executor.";
-        }
-
-        leaf max-shard-data-change-executor-pool-size {
-            default 20;
-            type non-zero-uint32-type;
-            description "The maximum thread pool size for each shard's data store data change notification executor.";
-        }
-
-        leaf max-shard-data-change-listener-queue-size {
-            default 1000;
-            type non-zero-uint32-type;
-            description "The maximum queue size for each shard's data store data change listener.";
-        }
-
-        leaf max-shard-data-store-executor-queue-size {
-            default 5000;
-            type non-zero-uint32-type;
-            description "The maximum queue size for each shard's data store executor.";
-        }
-
         leaf shard-transaction-idle-timeout-in-minutes {
             default 10;
             type non-zero-uint32-type;
@@ -78,7 +57,17 @@ module distributed-datastore-provider {
         leaf shard-snapshot-data-threshold-percentage {
             default 12;
             type percentage;
-            description "The percentage of Runtime.maxMemory() used by the in-memory journal log before a snapshot is to be taken";
+            description "The percentage of Runtime.maxMemory() used by the in-memory journal log before a snapshot is to be taken.
+                         Disabled, if direct threshold is enabled.";
+        }
+
+        leaf shard-snapshot-data-threshold {
+            default 0;
+            type uint32 {
+                range "0..max";
+            }
+            description "The threshold of in-memory journal size before a snapshot is to be taken. If set to 0, direct threshold
+                         is disabled and percentage is used instead.";
         }
 
 
@@ -144,6 +133,19 @@ module distributed-datastore-provider {
                           an operation (eg transaction create).";
         }
 
+        leaf initial-settle-timeout-multiplier {
+            default 3;
+            type uint32;
+            description "Multiplier for the maximum amount of time to wait for a shard to elect a leader.
+                         Zero value means wait indefinitely (as long as it takes).";
+        }
+
+        leaf recovery-snapshot-interval-seconds {
+            default 0;
+            type uint32;
+            description "Interval after which a snapshot should be taken during the recovery process.";
+        }
+
         leaf shard-batched-modification-count {
             default 1000;
             type non-zero-uint32-type;
@@ -171,6 +173,12 @@ module distributed-datastore-provider {
             description "Enable or disable data persistence";
         }
 
+        leaf snapshotOnRootOverwrite {
+            default false;
+            type boolean;
+            description "Enable or disable capturing snapshots on DataTree root overwrites";
+        }
+
         leaf shard-isolated-leader-check-interval-in-millis {
             default 5000;
             type heartbeat-interval-type;
@@ -204,28 +212,13 @@ module distributed-datastore-provider {
                          cannot be found then the default raft behavior will be applied";
         }
 
-        leaf shard-snapshot-chunk-size {
-            status deprecated;
-            default 2048000;
-            type non-zero-uint32-type;
-            description "When sending a snapshot to a follower, this is the maximum size in bytes for
-                         a chunk of data.";
-        }
-
         leaf maximum-message-slice-size {
-            default 2048000;
+            default 491520;
             type non-zero-uint32-type;
             description "When fragmenting messages thru the akka remoting framework, this is the
                          maximum size in bytes for a message slice.";
         }
 
-        leaf use-tell-based-protocol {
-            default false;
-            type boolean;
-            description "Use a newer protocol between the frontend and backend. This feature is considered
-                         exprerimental at this point.";
-        }
-
         leaf file-backed-streaming-threshold-in-megabytes {
             default 128;
             type non-zero-uint32-type;
@@ -267,6 +260,30 @@ module distributed-datastore-provider {
             type non-zero-uint32-type;
             description "The initial buffer capacity, in bytes, to use when serializing message payloads.";
         }
+
+        leaf use-lz4-compression {
+            default false;
+            type boolean;
+            description "Use lz4 compression for snapshots, sent from leader to follower, for snapshots stored
+                        by LocalSnapshotStore, use akka.conf configuration.";
+        }
+
+        leaf export-on-recovery {
+            default off;
+            type enumeration {
+                enum off;
+                enum json;
+            }
+            description "Export snapshot and journal during recovery. Possible modes: off(default),
+                        json(export to json files). Note that in case of large snapshot,
+                        export will take a lot of time.";
+        }
+
+        leaf recovery-export-base-dir {
+            default persistence-export;
+            type string;
+            description "Directory name for snapshot and journal dumps.";
+        }
     }
 
     container data-store-properties-container {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/yang/entity-owners.yang b/opendaylight/md-sal/sal-distributed-datastore/src/main/yang/entity-owners.yang
deleted file mode 100644 (file)
index 0f37e13..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-module entity-owners {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:entity-owners";
-    prefix "entity-owners";
-
-    description
-        "This module contains the base YANG definitions for
-        an implementation of the EntityOwnershipService which stores
-        entity ownership information in the data store";
-
-    revision "2015-08-04" {
-        description "Initial revision.";
-    }
-
-    container entity-owners {
-
-        // A list of all entities grouped by type
-        list entity-type {
-            key type;
-            leaf type {
-                type string;
-            }
-
-            list entity {
-                key id;
-
-                leaf id {
-                    type instance-identifier;
-                }
-
-                leaf owner {
-                    type string;
-                }
-
-                // A list of all the candidates that would like to own the entity
-                list candidate {
-                    key name;
-                    ordered-by user;
-
-                    leaf name {
-                        type string;
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/yang/prefix-shard-configuration.yang b/opendaylight/md-sal/sal-distributed-datastore/src/main/yang/prefix-shard-configuration.yang
deleted file mode 100644 (file)
index 02d5c30..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-module prefix-shard-configuration {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:clustering:prefix-shard-configuration";
-    prefix "prefix-config";
-
-    description
-        "This module contains the base YANG definitions for
-        shards based on prefix configuration";
-
-    revision "2017-01-10" {
-        description "Initial revision.";
-    }
-
-    container prefix-shards {
-
-        list shard {
-            key prefix;
-            leaf prefix {
-                type instance-identifier;
-                description "Prefix that this shard is rooted at.";
-            }
-
-            container replicas {
-                leaf-list replica {
-                    type string;
-                }
-
-                description "List of cluster member nodes that this shard is replicated on";
-            }
-
-            description "List of prefix-based shards configured.";
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBrokerWriteTransactionTest.java
deleted file mode 100644 (file)
index 1680a79..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doThrow;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.concurrent.ExecutionException;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-
-public class AbstractDOMBrokerWriteTransactionTest {
-
-    @Mock
-    private AbstractDOMTransactionFactory abstractDOMTransactionFactory;
-
-    @Mock
-    private DOMStoreWriteTransaction domStoreWriteTransaction;
-
-    private class AbstractDOMBrokerWriteTransactionTestImpl
-            extends AbstractDOMBrokerWriteTransaction<DOMStoreWriteTransaction> {
-
-        AbstractDOMBrokerWriteTransactionTestImpl() {
-            super(new Object(), Collections.emptyMap(), abstractDOMTransactionFactory);
-        }
-
-        @Override
-        protected DOMStoreWriteTransaction createTransaction(LogicalDatastoreType key) {
-            return null;
-        }
-
-        @Override
-        protected Collection<DOMStoreWriteTransaction> getSubtransactions() {
-            return Collections.singletonList(domStoreWriteTransaction);
-        }
-    }
-
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @Test
-    public void readyRuntimeExceptionAndCancel() throws InterruptedException {
-        RuntimeException thrown = new RuntimeException();
-        doThrow(thrown).when(domStoreWriteTransaction).ready();
-        AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl =
-                new AbstractDOMBrokerWriteTransactionTestImpl();
-
-        FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
-        try {
-            submitFuture.get();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof TransactionCommitFailedException);
-            assertTrue(e.getCause().getCause() == thrown);
-            abstractDOMBrokerWriteTransactionTestImpl.cancel();
-        }
-    }
-
-    @Test
-    public void submitRuntimeExceptionAndCancel() throws InterruptedException {
-        RuntimeException thrown = new RuntimeException();
-        doThrow(thrown).when(abstractDOMTransactionFactory).commit(any(), any());
-        AbstractDOMBrokerWriteTransactionTestImpl abstractDOMBrokerWriteTransactionTestImpl
-                = new AbstractDOMBrokerWriteTransactionTestImpl();
-
-        FluentFuture<? extends CommitInfo> submitFuture = abstractDOMBrokerWriteTransactionTestImpl.commit();
-        try {
-            submitFuture.get();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof TransactionCommitFailedException);
-            assertTrue(e.getCause().getCause() == thrown);
-            abstractDOMBrokerWriteTransactionTestImpl.cancel();
-        }
-    }
-}
index bcdbb98d1c6cf04cdb45aa7489fba5457c99d2cb..15fe8a417f066d95f0588a71d27d289446cd45f0 100644 (file)
@@ -8,16 +8,21 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 
-import org.junit.AfterClass;
+import akka.util.Timeout;
+import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.Uninterruptibles;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.TimeUnit;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -30,13 +35,10 @@ import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransacti
 import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import scala.concurrent.duration.FiniteDuration;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedDataStoreTest {
 
     private static final ClientIdentifier UNKNOWN_ID = ClientIdentifier.create(
@@ -46,88 +48,98 @@ public class ClientBackedDataStoreTest {
             MemberName.forName("member"), FrontendType.forName("frontend"));
     private static final ClientIdentifier CLIENT_IDENTIFIER = ClientIdentifier.create(FRONTEND_IDENTIFIER, 0);
 
-    private static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0);
-    private static final TransactionIdentifier TRANSACTION_IDENTIFIER = new TransactionIdentifier(HISTORY_ID, 0);
-
-    private static SchemaContext SCHEMA_CONTEXT;
+    private static final TransactionIdentifier TRANSACTION_IDENTIFIER =
+        new TransactionIdentifier(new LocalHistoryIdentifier(CLIENT_IDENTIFIER, 0), 0);
 
     @Mock
     private DataStoreClient clientActor;
-
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private Timeout shardElectionTimeout;
     @Mock
     private ActorUtils actorUtils;
-
     @Mock
     private ClientLocalHistory clientLocalHistory;
-
     @Mock
     private ClientTransaction clientTransaction;
-
     @Mock
     private ClientSnapshot clientSnapshot;
 
-    @BeforeClass
-    public static void beforeClass() {
-        SCHEMA_CONTEXT = TestModel.createTestContext();
-    }
-
-    @AfterClass
-    public static void afterClass() {
-        SCHEMA_CONTEXT = null;
-    }
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        when(actorUtils.getSchemaContext()).thenReturn(SCHEMA_CONTEXT);
-        when(actorUtils.getDatastoreContext()).thenReturn(DatastoreContext.newBuilder().build());
-        when(clientTransaction.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
-        when(clientSnapshot.getIdentifier()).thenReturn(TRANSACTION_IDENTIFIER);
+        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
+        doReturn(TRANSACTION_IDENTIFIER).when(clientTransaction).getIdentifier();
+        doReturn(TRANSACTION_IDENTIFIER).when(clientSnapshot).getIdentifier();
 
-        when(clientActor.getIdentifier()).thenReturn(CLIENT_IDENTIFIER);
-        when(clientActor.createTransaction()).thenReturn(clientTransaction);
-        when(clientActor.createLocalHistory()).thenReturn(clientLocalHistory);
-        when(clientActor.createSnapshot()).thenReturn(clientSnapshot);
+        doReturn(clientTransaction).when(clientActor).createTransaction();
+        doReturn(clientLocalHistory).when(clientActor).createLocalHistory();
+        doReturn(clientSnapshot).when(clientActor).createSnapshot();
     }
 
     @Test
     public void testCreateTransactionChain() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreTransactionChain txChain = clientBackedDataStore.createTransactionChain();
-            assertNotNull(txChain);
-            verify(clientActor, Mockito.times(1)).createLocalHistory();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.createTransactionChain());
+            verify(clientActor, times(1)).createLocalHistory();
         }
     }
 
     @Test
     public void testNewReadOnlyTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreReadTransaction tx = clientBackedDataStore.newReadOnlyTransaction();
-            assertNotNull(tx);
-            verify(clientActor, Mockito.times(1)).createSnapshot();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newReadOnlyTransaction());
+            verify(clientActor, times(1)).createSnapshot();
         }
     }
 
     @Test
     public void testNewWriteOnlyTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreWriteTransaction tx = clientBackedDataStore.newWriteOnlyTransaction();
-            assertNotNull(tx);
-            verify(clientActor, Mockito.times(1)).createTransaction();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newWriteOnlyTransaction());
+            verify(clientActor, times(1)).createTransaction();
         }
     }
 
     @Test
     public void testNewReadWriteTransaction() {
-        try (ClientBackedDataStore clientBackedDataStore = new ClientBackedDataStore(
-                actorUtils, UNKNOWN_ID, clientActor)) {
-            final DOMStoreReadWriteTransaction tx = clientBackedDataStore.newReadWriteTransaction();
-            assertNotNull(tx);
-            verify(clientActor, Mockito.times(1)).createTransaction();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            assertNotNull(clientBackedDataStore.newReadWriteTransaction());
+            verify(clientActor, times(1)).createTransaction();
+        }
+    }
+
+    @Test
+    public void testWaitTillReadyBlocking() {
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
+        doReturn(1).when(datastoreContext).getInitialSettleTimeoutMultiplier();
+        doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            final var sw = Stopwatch.createStarted();
+            clientBackedDataStore.waitTillReady();
+            final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+            assertTrue("Expected to be blocked for 50 millis", elapsedMillis >= 50);
+        }
+    }
+
+    @Test
+    public void testWaitTillReadyCountDown() {
+        try (var clientBackedDataStore = new ClientBackedDataStore(actorUtils, UNKNOWN_ID, clientActor)) {
+            doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+
+            ForkJoinPool.commonPool().submit(() -> {
+                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
+                clientBackedDataStore.readinessFuture().set(Empty.value());
+            });
+
+            final var sw = Stopwatch.createStarted();
+            clientBackedDataStore.waitTillReady();
+            final var elapsedMillis = sw.stop().elapsed(TimeUnit.MILLISECONDS);
+
+            assertTrue("Expected to be released in 500 millis", elapsedMillis < 5000);
         }
     }
 }
index 2d09073228b6636ef511e3c56670f098bafe735f..08316b83911c5bb763962009bfafe869d9083112 100644 (file)
@@ -8,26 +8,27 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
+import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
+import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedReadTransactionTest extends ClientBackedTransactionTest<ClientBackedReadTransaction> {
     private ClientBackedReadTransaction object;
 
     @Mock
-    private NormalizedNode<?, ?> data;
+    private ContainerNode data;
     @Mock
     private ClientActorContext clientContext;
     @Mock
@@ -40,28 +41,21 @@ public class ClientBackedReadTransactionTest extends ClientBackedTransactionTest
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(CLIENT_ID).when(clientContext).getIdentifier();
         doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
 
-        doReturn(Futures.immediateCheckedFuture(Boolean.TRUE)).when(delegate).exists(YangInstanceIdentifier.EMPTY);
-        doReturn(Futures.immediateCheckedFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.EMPTY);
+        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
 
         object = new ClientBackedReadTransaction(delegate, null, null);
     }
 
     @Test
     public void testRead() throws Exception {
-        final ListenableFuture<Optional<NormalizedNode<?, ?>>> result = object().read(YangInstanceIdentifier.EMPTY);
-        final Optional<NormalizedNode<?, ?>> resultData = result.get();
-        assertTrue(resultData.isPresent());
-        assertEquals(data, resultData.get());
+        assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
     }
 
     @Test
     public void testExists() throws Exception {
-        final ListenableFuture<Boolean> result = object().exists(YangInstanceIdentifier.EMPTY);
-        assertEquals(Boolean.TRUE, result.get());
+        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
     }
 }
index 56968390321e4996909b34fca2e485e9340e39b4..11301cf3c7ea11495edd233e3c9d60a2c8a7ab91 100644 (file)
@@ -8,21 +8,21 @@
 package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
+import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateFluentFuture;
+import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedReadWriteTransactionTest
         extends ClientBackedTransactionTest<ClientBackedReadWriteTransaction> {
     private ClientBackedReadWriteTransaction object;
@@ -30,9 +30,7 @@ public class ClientBackedReadWriteTransactionTest
     @Mock
     private ClientTransaction delegate;
     @Mock
-    private NormalizedNode<?, ?> data;
-    @Mock
-    private DOMStoreThreePhaseCommitCohort readyCohort;
+    private ContainerNode data;
 
     @Override
     ClientBackedReadWriteTransaction object() {
@@ -41,27 +39,21 @@ public class ClientBackedReadWriteTransactionTest
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
-        doReturn(readyCohort).when(delegate).ready();
 
-        doReturn(Futures.immediateCheckedFuture(Boolean.TRUE)).when(delegate).exists(YangInstanceIdentifier.EMPTY);
-        doReturn(Futures.immediateCheckedFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.EMPTY);
+        doReturn(immediateTrueFluentFuture()).when(delegate).exists(YangInstanceIdentifier.of());
+        doReturn(immediateFluentFuture(Optional.of(data))).when(delegate).read(YangInstanceIdentifier.of());
 
         object = new ClientBackedReadWriteTransaction(delegate, null);
     }
 
     @Test
     public void testRead() throws Exception {
-        final FluentFuture<Optional<NormalizedNode<?, ?>>> result = object().read(YangInstanceIdentifier.EMPTY);
-        final Optional<NormalizedNode<?, ?>> resultData = result.get();
-        assertTrue(resultData.isPresent());
-        assertEquals(data, resultData.get());
+        assertEquals(Optional.of(data), object().read(YangInstanceIdentifier.of()).get());
     }
 
     @Test
     public void testExists() throws Exception {
-        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.EMPTY).get());
+        assertEquals(Boolean.TRUE, object().exists(YangInstanceIdentifier.of()).get());
     }
 }
index 3bec1835745c576b0881325bfb38657038b41992..08daa004f2cec5ffdfeaaa1ec361905e050ae6bf 100644 (file)
@@ -7,12 +7,16 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -23,6 +27,7 @@ import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHist
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientSnapshot;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedTransactionChainTest {
     private ClientBackedTransactionChain chain;
 
@@ -35,47 +40,44 @@ public class ClientBackedTransactionChainTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         final FrontendIdentifier frontendId = FrontendIdentifier.create(
                 MemberName.forName("member"), FrontendType.forName("frontend"));
         final ClientIdentifier clientId = ClientIdentifier.create(frontendId, 0);
         final LocalHistoryIdentifier historyId = new LocalHistoryIdentifier(clientId, 0);
         final TransactionIdentifier transactionId = new TransactionIdentifier(historyId, 0);
 
-        Mockito.when(history.getIdentifier()).thenReturn(historyId);
-        Mockito.when(transaction.getIdentifier()).thenReturn(transactionId);
-        Mockito.when(snapshot.getIdentifier()).thenReturn(transactionId);
-        Mockito.when(history.takeSnapshot()).thenReturn(snapshot);
-        Mockito.when(history.createTransaction()).thenReturn(transaction);
+        doReturn(transactionId).when(transaction).getIdentifier();
+        doReturn(transactionId).when(snapshot).getIdentifier();
+        doReturn(snapshot).when(history).takeSnapshot();
+        doReturn(transaction).when(history).createTransaction();
 
         chain = new ClientBackedTransactionChain(history, false);
     }
 
     @Test
     public void testNewReadOnlyTransaction() {
-        Assert.assertNotNull(chain.newReadOnlyTransaction());
-        Mockito.verify(history).takeSnapshot();
+        assertNotNull(chain.newReadOnlyTransaction());
+        verify(history).takeSnapshot();
     }
 
     @Test
     public void testNewReadWriteTransaction() {
-        Assert.assertNotNull(chain.newReadWriteTransaction());
-        Mockito.verify(history).createTransaction();
+        assertNotNull(chain.newReadWriteTransaction());
+        verify(history).createTransaction();
     }
 
     @Test
     public void testNewWriteOnlyTransaction() {
-        Assert.assertNotNull(chain.newWriteOnlyTransaction());
-        Mockito.verify(history).createTransaction();
+        assertNotNull(chain.newWriteOnlyTransaction());
+        verify(history).createTransaction();
     }
 
     @Test
     public void testClose() {
         chain.newReadOnlyTransaction();
         chain.close();
-        Mockito.verify(snapshot).abort();
-        Mockito.verify(history).close();
+        verify(snapshot).abort();
+        verify(history).close();
     }
 
     @Test
@@ -83,7 +85,7 @@ public class ClientBackedTransactionChainTest {
         chain.snapshotClosed(snapshot);
         // snap is removed, so cannot be aborted
         chain.close();
-        Mockito.verify(snapshot, Mockito.never()).abort();
-        Mockito.verify(history).close();
+        verify(snapshot, never()).abort();
+        verify(history).close();
     }
 }
\ No newline at end of file
index 39e8d74dcee8f8d42f8576891d94dde019d40afe..9573a3ed3069c98a74722b0f3858c4b454b0c462 100644 (file)
@@ -7,8 +7,10 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -18,11 +20,11 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 import org.opendaylight.controller.cluster.databroker.actors.dds.AbstractClientHandle;
 
 public abstract class ClientBackedTransactionTest<T extends ClientBackedTransaction<?>> {
-    private static FrontendIdentifier FRONTEND_ID = FrontendIdentifier.create(
+    private static final FrontendIdentifier FRONTEND_ID = FrontendIdentifier.create(
             MemberName.forName("member"), FrontendType.forName("frontend"));
     protected static final ClientIdentifier CLIENT_ID = ClientIdentifier.create(FRONTEND_ID, 0);
 
-    private static LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 0);
+    private static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 0);
     protected static final TransactionIdentifier TRANSACTION_ID = new TransactionIdentifier(HISTORY_ID, 0);
 
     abstract T object();
@@ -31,6 +33,7 @@ public abstract class ClientBackedTransactionTest<T extends ClientBackedTransact
     public void testClose() {
         final AbstractClientHandle<?> delegate = object().delegate();
         object().close();
-        Mockito.verify(delegate).abort();
+        // Called twice because of immediate cleaning
+        verify(delegate, times(2)).abort();
     }
 }
\ No newline at end of file
index 4a6ee3f944b63b869d8e3d136129da799c4b5e43..45430083510318c77dd5aff5e20225a8d2e947ba 100644 (file)
@@ -7,35 +7,35 @@
  */
 package org.opendaylight.controller.cluster.databroker;
 
-import org.junit.Assert;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
+
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTest<ClientBackedWriteTransaction> {
     private ClientBackedWriteTransaction object;
 
     @Mock
     private ClientTransaction delegate;
     @Mock
-    private NormalizedNode<?, ?> data;
-    @Mock
-    private YangInstanceIdentifier path;
+    private ContainerNode data;
     @Mock
     private DOMStoreThreePhaseCommitCohort readyCohort;
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        Mockito.doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
-        Mockito.doReturn(readyCohort).when(delegate).ready();
+        doReturn(TRANSACTION_ID).when(delegate).getIdentifier();
+        doReturn(readyCohort).when(delegate).ready();
 
         object = new ClientBackedWriteTransaction(delegate, null);
     }
@@ -47,26 +47,26 @@ public class ClientBackedWriteTransactionTest extends ClientBackedTransactionTes
 
     @Test
     public void testWrite() {
-        object().write(path, data);
-        Mockito.verify(delegate).write(path, data);
+        object().write(YangInstanceIdentifier.of(), data);
+        verify(delegate).write(YangInstanceIdentifier.of(), data);
     }
 
     @Test
     public void testMerge() {
-        object().merge(path, data);
-        Mockito.verify(delegate).merge(path, data);
+        object().merge(YangInstanceIdentifier.of(), data);
+        verify(delegate).merge(YangInstanceIdentifier.of(), data);
     }
 
     @Test
     public void testDelete() {
-        object().delete(path);
-        Mockito.verify(delegate).delete(path);
+        object().delete(YangInstanceIdentifier.of());
+        verify(delegate).delete(YangInstanceIdentifier.of());
     }
 
     @Test
     public void testReady() {
         final DOMStoreThreePhaseCommitCohort result = object().ready();
-        Assert.assertNotNull(result);
-        Mockito.verify(delegate).ready();
+        assertNotNull(result);
+        verify(delegate).ready();
     }
 }
@@ -5,8 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
+package org.opendaylight.controller.cluster.databroker;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -20,13 +19,10 @@ import org.opendaylight.yangtools.util.DurationStatisticsTracker;
  * @author Thomas Pantelis
  */
 public class CommitStatsMXBeanImplTest {
-
     @Test
     public void test() {
-
         DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
-        CommitStatsMXBeanImpl bean =
-                new CommitStatsMXBeanImpl(commitStatsTracker, "Test");
+        CommitStatsMXBeanImpl bean = new CommitStatsMXBeanImpl(commitStatsTracker, "Test");
 
         commitStatsTracker.addDuration(100);
 
index 7f0e0f5577621ca26cff4ab371c964108f727c60..630d582f0c29851d7b5dacd488101b47a459b58f 100644 (file)
@@ -24,7 +24,6 @@ import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediate
 import static org.opendaylight.yangtools.util.concurrent.FluentFutures.immediateTrueFluentFuture;
 
 import com.google.common.base.Throwables;
-import com.google.common.collect.ClassToInstanceMap;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
@@ -34,8 +33,6 @@ import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
@@ -49,23 +46,18 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.stubbing.Answer;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.DataStoreUnavailableException;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.mdsal.dom.broker.TransactionCommitFailedExceptionMapper;
+import org.opendaylight.mdsal.dom.spi.TransactionCommitFailedExceptionMapper;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
@@ -74,7 +66,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * Unit tests for DOMConcurrentDataCommitCoordinator.
@@ -84,8 +76,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 public class ConcurrentDOMDataBrokerTest {
 
     private final DOMDataTreeWriteTransaction transaction = mock(DOMDataTreeWriteTransaction.class);
-    private final DOMStoreThreePhaseCommitCohort mockCohort1 = mock(DOMStoreThreePhaseCommitCohort.class);
-    private final DOMStoreThreePhaseCommitCohort mockCohort2 = mock(DOMStoreThreePhaseCommitCohort.class);
+    private final DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
     private final ThreadPoolExecutor futureExecutor =
             new ThreadPoolExecutor(0, 1, 5, TimeUnit.SECONDS, new SynchronousQueue<>());
     private ConcurrentDOMDataBroker coordinator;
@@ -121,8 +112,7 @@ public class ConcurrentDOMDataBrokerTest {
             final SettableFuture<Boolean> future = SettableFuture.create();
             if (doAsync) {
                 new Thread(() -> {
-                    Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue,
-                            10, TimeUnit.SECONDS);
+                    Uninterruptibles.awaitUninterruptibly(asyncCanCommitContinue, 10, TimeUnit.SECONDS);
                     future.set(Boolean.TRUE);
                 }).start();
             } else {
@@ -132,16 +122,11 @@ public class ConcurrentDOMDataBrokerTest {
             return future;
         };
 
-        doAnswer(asyncCanCommit).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
+        doAnswer(asyncCanCommit).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).commit();
 
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).commit();
-
-        ListenableFuture<? extends CommitInfo> future =
-                coordinator.commit(transaction, Arrays.asList(mockCohort1, mockCohort2));
+        ListenableFuture<? extends CommitInfo> future = coordinator.commit(transaction, mockCohort);
 
         final CountDownLatch doneLatch = new CountDownLatch(1);
         final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
@@ -169,35 +154,22 @@ public class ConcurrentDOMDataBrokerTest {
 
         assertEquals("Task count", doAsync ? 1 : 0, futureExecutor.getTaskCount());
 
-        InOrder inOrder = inOrder(mockCohort1, mockCohort2);
-        inOrder.verify(mockCohort1).canCommit();
-        inOrder.verify(mockCohort2).canCommit();
-        inOrder.verify(mockCohort1).preCommit();
-        inOrder.verify(mockCohort2).preCommit();
-        inOrder.verify(mockCohort1).commit();
-        inOrder.verify(mockCohort2).commit();
+        InOrder inOrder = inOrder(mockCohort);
+        inOrder.verify(mockCohort, times(1)).canCommit();
+        inOrder.verify(mockCohort, times(1)).preCommit();
+        inOrder.verify(mockCohort, times(1)).commit();
     }
 
     @Test
     public void testSubmitWithNegativeCanCommitResponse() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort3).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        ListenableFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+        doReturn(Futures.immediateFuture(Boolean.FALSE)).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, null, mockCohort1, mockCohort2, mockCohort3);
+        assertFailure(coordinator.commit(transaction, mockCohort), null, mockCohort);
     }
 
     private static void assertFailure(final ListenableFuture<?> future, final Exception expCause,
-            final DOMStoreThreePhaseCommitCohort... mockCohorts) throws Exception {
+            final DOMStoreThreePhaseCommitCohort mockCohort) throws Exception {
         try {
             future.get(5, TimeUnit.SECONDS);
             fail("Expected TransactionCommitFailedException");
@@ -206,11 +178,7 @@ public class ConcurrentDOMDataBrokerTest {
             if (expCause != null) {
                 assertSame("Expected cause", expCause.getClass(), tcf.getCause().getClass());
             }
-
-            InOrder inOrder = inOrder((Object[])mockCohorts);
-            for (DOMStoreThreePhaseCommitCohort c: mockCohorts) {
-                inOrder.verify(c).abort();
-            }
+            verify(mockCohort, times(1)).abort();
         } catch (TimeoutException e) {
             throw e;
         }
@@ -218,97 +186,42 @@ public class ConcurrentDOMDataBrokerTest {
 
     @Test
     public void testSubmitWithCanCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2));
+        final Exception cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, cause, mockCohort1, mockCohort2);
-    }
-
-    @Test
-    public void testSubmitWithCanCommitDataStoreUnavailableException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-        NoShardLeaderException rootCause = new NoShardLeaderException("mock");
-        DataStoreUnavailableException cause = new DataStoreUnavailableException(rootCause.getMessage(), rootCause);
-        doReturn(Futures.immediateFailedFuture(rootCause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-            transaction, Arrays.asList(mockCohort1, mockCohort2));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2);
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithPreCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
-                .when(mockCohort3).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
+        doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+        final IllegalStateException cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).preCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
 
-        assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithCommitException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).preCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort1).abort();
-
-        doReturn(immediateTrueFluentFuture()).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).preCommit();
-        IllegalStateException cause = new IllegalStateException("mock");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        DOMStoreThreePhaseCommitCohort mockCohort3 = mock(DOMStoreThreePhaseCommitCohort.class);
-        doReturn(immediateTrueFluentFuture()).when(mockCohort3).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).preCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock2")))
-                .when(mockCohort3).commit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort3).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2, mockCohort3));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2, mockCohort3);
+        doReturn(immediateTrueFluentFuture()).when(mockCohort).canCommit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).preCommit();
+        final IllegalStateException cause = new IllegalStateException("mock");
+        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort).commit();
+        doReturn(immediateNullFluentFuture()).when(mockCohort).abort();
+
+        assertFailure(coordinator.commit(transaction, mockCohort), cause, mockCohort);
     }
 
     @Test
     public void testSubmitWithAbortException() throws Exception {
-        doReturn(immediateTrueFluentFuture()).when(mockCohort1).canCommit();
-        doReturn(Futures.immediateFailedFuture(new IllegalStateException("mock abort error")))
-                .when(mockCohort1).abort();
+        final Exception canCommitCause = new IllegalStateException("canCommit error");
+        doReturn(Futures.immediateFailedFuture(canCommitCause)).when(mockCohort).canCommit();
+        final Exception abortCause = new IllegalStateException("abort error");
+        doReturn(Futures.immediateFailedFuture(abortCause)).when(mockCohort).abort();
 
-        IllegalStateException cause = new IllegalStateException("mock canCommit error");
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockCohort2).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohort2).abort();
-
-        FluentFuture<? extends CommitInfo> future = coordinator.commit(
-                transaction, Arrays.asList(mockCohort1, mockCohort2));
-
-        assertFailure(future, cause, mockCohort1, mockCohort2);
+        assertFailure(coordinator.commit(transaction, mockCohort), canCommitCause, mockCohort);
     }
 
     @Test
@@ -361,17 +274,12 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeReadWriteTransaction dataTxn = dataBroker.newReadWriteTransaction();
 
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY);
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             verify(configDomStore, never()).newReadWriteTransaction();
             verify(operationalDomStore, times(1)).newReadWriteTransaction();
-
-            dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
-
-            verify(configDomStore, times(1)).newReadWriteTransaction();
-            verify(operationalDomStore, times(1)).newReadWriteTransaction();
         }
 
     }
@@ -390,16 +298,11 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeWriteTransaction dataTxn = dataBroker.newWriteOnlyTransaction();
 
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
-            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
+            dataTxn.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(), mock(ContainerNode.class));
 
             verify(configDomStore, never()).newWriteOnlyTransaction();
             verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
-
-            dataTxn.put(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY, mock(NormalizedNode.class));
-
-            verify(configDomStore, times(1)).newWriteOnlyTransaction();
-            verify(operationalDomStore, times(1)).newWriteOnlyTransaction();
         }
     }
 
@@ -417,16 +320,11 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor)) {
             DOMDataTreeReadTransaction dataTxn = dataBroker.newReadOnlyTransaction();
 
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY);
-            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY);
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
+            dataTxn.read(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             verify(configDomStore, never()).newReadOnlyTransaction();
             verify(operationalDomStore, times(1)).newReadOnlyTransaction();
-
-            dataTxn.read(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
-
-            verify(configDomStore, times(1)).newReadOnlyTransaction();
-            verify(operationalDomStore, times(1)).newReadOnlyTransaction();
         }
     }
 
@@ -435,7 +333,6 @@ public class ConcurrentDOMDataBrokerTest {
         DOMStore configDomStore = mock(DOMStore.class);
         DOMStore operationalDomStore = mock(DOMStore.class);
         DOMStoreReadWriteTransaction mockStoreReadWriteTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreThreePhaseCommitCohort mockCohort = mock(DOMStoreThreePhaseCommitCohort.class);
 
         doReturn(mockStoreReadWriteTransaction).when(operationalDomStore).newReadWriteTransaction();
         doReturn(mockCohort).when(mockStoreReadWriteTransaction).ready();
@@ -450,15 +347,15 @@ public class ConcurrentDOMDataBrokerTest {
                 configDomStore), futureExecutor) {
             @Override
             public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
-                    Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-                commitCohorts.addAll(cohorts);
+                    DOMStoreThreePhaseCommitCohort cohort) {
+                commitCohorts.add(cohort);
                 latch.countDown();
-                return super.commit(writeTx, cohorts);
+                return super.commit(writeTx, cohort);
             }
         }) {
             DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
 
-            domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY);
+            domDataReadWriteTransaction.delete(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
 
             domDataReadWriteTransaction.commit();
 
@@ -468,56 +365,6 @@ public class ConcurrentDOMDataBrokerTest {
         }
     }
 
-    @Test
-    public void testSubmitWithOnlyTwoSubTransactions() throws InterruptedException {
-        DOMStore configDomStore = mock(DOMStore.class);
-        DOMStore operationalDomStore = mock(DOMStore.class);
-        DOMStoreReadWriteTransaction operationalTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreReadWriteTransaction configTransaction = mock(DOMStoreReadWriteTransaction.class);
-        DOMStoreThreePhaseCommitCohort mockCohortOperational = mock(DOMStoreThreePhaseCommitCohort.class);
-        DOMStoreThreePhaseCommitCohort mockCohortConfig = mock(DOMStoreThreePhaseCommitCohort.class);
-
-        doReturn(operationalTransaction).when(operationalDomStore).newReadWriteTransaction();
-        doReturn(configTransaction).when(configDomStore).newReadWriteTransaction();
-
-        doReturn(mockCohortOperational).when(operationalTransaction).ready();
-        doReturn(immediateFalseFluentFuture()).when(mockCohortOperational).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohortOperational).abort();
-
-        doReturn(mockCohortConfig).when(configTransaction).ready();
-        doReturn(immediateFalseFluentFuture()).when(mockCohortConfig).canCommit();
-        doReturn(immediateNullFluentFuture()).when(mockCohortConfig).abort();
-
-        final CountDownLatch latch = new CountDownLatch(1);
-        final List<DOMStoreThreePhaseCommitCohort> commitCohorts = new ArrayList<>();
-
-        try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
-                LogicalDatastoreType.OPERATIONAL, operationalDomStore, LogicalDatastoreType.CONFIGURATION,
-                configDomStore), futureExecutor) {
-            @Override
-            @SuppressWarnings("checkstyle:hiddenField")
-            public FluentFuture<? extends CommitInfo> commit(DOMDataTreeWriteTransaction writeTx,
-                    Collection<DOMStoreThreePhaseCommitCohort> cohorts) {
-                commitCohorts.addAll(cohorts);
-                latch.countDown();
-                return super.commit(writeTx, cohorts);
-            }
-        }) {
-            DOMDataTreeReadWriteTransaction domDataReadWriteTransaction = dataBroker.newReadWriteTransaction();
-
-            domDataReadWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY,
-                    mock(NormalizedNode.class));
-            domDataReadWriteTransaction.merge(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY,
-                    mock(NormalizedNode.class));
-
-            domDataReadWriteTransaction.commit();
-
-            assertTrue(latch.await(10, TimeUnit.SECONDS));
-
-            assertTrue(commitCohorts.size() == 2);
-        }
-    }
-
     @Test
     public void testCreateTransactionChain() {
         DOMStore domStore = mock(DOMStore.class);
@@ -525,7 +372,7 @@ public class ConcurrentDOMDataBrokerTest {
                 LogicalDatastoreType.OPERATIONAL, domStore, LogicalDatastoreType.CONFIGURATION, domStore),
                 futureExecutor)) {
 
-            dataBroker.createTransactionChain(mock(DOMTransactionChainListener.class));
+            dataBroker.createTransactionChain();
 
             verify(domStore, times(2)).createTransactionChain();
         }
@@ -545,15 +392,14 @@ public class ConcurrentDOMDataBrokerTest {
             doReturn(mockChain).when(domStore).createTransactionChain();
             doReturn(operationalTransaction).when(mockChain).newWriteOnlyTransaction();
 
-            DOMTransactionChain transactionChain = dataBroker.createTransactionChain(
-                    mock(DOMTransactionChainListener.class));
+            DOMTransactionChain transactionChain = dataBroker.createTransactionChain();
 
             DOMDataTreeWriteTransaction domDataWriteTransaction = transactionChain.newWriteOnlyTransaction();
 
             verify(mockChain, never()).newWriteOnlyTransaction();
 
-            domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.EMPTY,
-                    mock(NormalizedNode.class));
+            domDataWriteTransaction.put(LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(),
+                    mock(ContainerNode.class));
         }
     }
 
@@ -580,26 +426,21 @@ public class ConcurrentDOMDataBrokerTest {
 
     @Test
     public void testExtensions() {
-        DistributedDataStore mockConfigStore = mock(DistributedDataStore.class);
-        DistributedDataStore mockOperStore = mock(DistributedDataStore.class);
-        try (ConcurrentDOMDataBroker dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
+        final var mockConfigStore = mock(ClientBackedDataStore.class);
+        final var mockOperStore = mock(ClientBackedDataStore.class);
+        try (var dataBroker = new ConcurrentDOMDataBroker(ImmutableMap.of(
                 LogicalDatastoreType.OPERATIONAL, mockOperStore,
                 LogicalDatastoreType.CONFIGURATION, mockConfigStore), futureExecutor)) {
+            assertNotNull(dataBroker.extension(DataTreeChangeExtension.class));
 
-            ClassToInstanceMap<DOMDataBrokerExtension> supportedExtensions = dataBroker.getExtensions();
-            assertNotNull(supportedExtensions.getInstance(DOMDataTreeChangeService.class));
-
-            DOMDataTreeCommitCohortRegistry cohortRegistry = supportedExtensions.getInstance(
-                DOMDataTreeCommitCohortRegistry.class);
+            final var cohortRegistry = dataBroker.extension(CommitCohortExtension.class);
             assertNotNull(cohortRegistry);
 
-            DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
-            DOMDataTreeIdentifier path = new DOMDataTreeIdentifier(
-                    org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION,
-                    YangInstanceIdentifier.EMPTY);
-            cohortRegistry.registerCommitCohort(path, mockCohort);
+            final var cohort = mock(DOMDataTreeCommitCohort.class);
+            final var path = DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.of());
+            cohortRegistry.registerCommitCohort(path, cohort);
 
-            verify(mockConfigStore).registerCommitCohort(path, mockCohort);
+            verify(mockConfigStore).registerCommitCohort(path, cohort);
         }
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/TestClientBackedDataStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/TestClientBackedDataStore.java
new file mode 100644 (file)
index 0000000..9b790ce
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.databroker;
+
+import akka.actor.ActorSystem;
+import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
+import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
+import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
+import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
+import org.opendaylight.controller.cluster.datastore.LocalShardStore;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.shardmanager.AbstractShardManagerCreator;
+import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager;
+import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager.GetLocalShards;
+import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager.GetLocalShardsReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+
+public class TestClientBackedDataStore extends ClientBackedDataStore implements LocalShardStore {
+
+    public TestClientBackedDataStore(final ActorSystem actorSystem, final ClusterWrapper cluster,
+                                     final Configuration configuration,
+                                     final DatastoreContextFactory datastoreContextFactory,
+                                     final DatastoreSnapshot restoreFromSnapshot) {
+        super(actorSystem, cluster, configuration, datastoreContextFactory, restoreFromSnapshot);
+    }
+
+    TestClientBackedDataStore(final ActorUtils actorUtils, final ClientIdentifier identifier,
+                              final DataStoreClient clientActor) {
+        super(actorUtils, identifier, clientActor);
+    }
+
+    @Override
+    protected AbstractShardManagerCreator<?> getShardManagerCreator() {
+        return new TestShardManager.TestShardManagerCreator();
+    }
+
+    @Override
+    public GetLocalShardsReply getLocalShards() {
+        final ActorUtils utils = getActorUtils();
+        return (GetLocalShardsReply) utils.executeOperation(utils.getShardManager(), GetLocalShards.INSTANCE);
+    }
+}
index a9fa06d655a4a00337e36db850b2ff1c63270472..37f38810ce5a8a4a344720a6a7dc2e867713fe8c 100644 (file)
@@ -7,10 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.lenient;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
@@ -20,15 +22,14 @@ import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.function.Function;
+import java.util.List;
+import java.util.Map;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
@@ -44,17 +45,19 @@ import org.opendaylight.controller.cluster.access.concepts.RequestFailure;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.SuccessEnvelope;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Promise;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<AbstractProxyTransaction>> {
-
     private static final String PERSISTENCE_ID = "per-1";
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.EMPTY;
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
 
     @Mock
     private DataTree dataTree;
@@ -68,7 +71,6 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
 
     @Before
     public void setUp() throws Exception {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         final TestProbe contextProbe = new TestProbe(system, "context");
         final TestProbe clientContextProbe = new TestProbe(system, "client-context");
@@ -84,12 +86,11 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         client.getConnection(0L);
         contextProbe.expectMsgClass(ConnectClientRequest.class);
         final long sequence = 0L;
-        contextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(),
-                Collections.emptyList(), dataTree, 3));
+        contextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(), List.of(), dataTree, 3));
         final InternalCommand<ShardBackendInfo> command = clientContextProbe.expectMsgClass(InternalCommand.class);
         command.execute(client);
         //data tree mock
-        when(dataTree.takeSnapshot()).thenReturn(dataTreeSnapshot);
+        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
 
         handle = createHandle(parent);
     }
@@ -113,7 +114,7 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
 
     @Test
     public void testGetIdentifier() {
-        Assert.assertEquals(TRANSACTION_ID, handle.getIdentifier());
+        assertEquals(TRANSACTION_ID, handle.getIdentifier());
     }
 
     @Test
@@ -122,7 +123,7 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         handle.abort();
         final Envelope<?> envelope = backendProbe.expectMsgClass(Envelope.class);
         final AbortLocalTransactionRequest request = (AbortLocalTransactionRequest) envelope.getMessage();
-        Assert.assertEquals(TRANSACTION_ID, request.getTarget());
+        assertEquals(TRANSACTION_ID, request.getTarget());
         checkClosed();
     }
 
@@ -132,31 +133,27 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         handle.localAbort(new RuntimeException("fail"));
         final Envelope<?> envelope = backendProbe.expectMsgClass(Envelope.class);
         final AbortLocalTransactionRequest request = (AbortLocalTransactionRequest) envelope.getMessage();
-        Assert.assertEquals(TRANSACTION_ID, request.getTarget());
+        assertEquals(TRANSACTION_ID, request.getTarget());
         checkClosed();
     }
 
     @Test
     public void testEnsureClosed() {
         doHandleOperation(handle);
-        final Collection<AbstractProxyTransaction> transactions = handle.ensureClosed();
-        Assert.assertNotNull(transactions);
-        Assert.assertEquals(1, transactions.size());
+        final Map<Long, AbstractProxyTransaction> transactions = handle.ensureClosed();
+        assertNotNull(transactions);
+        assertEquals(1, transactions.size());
     }
 
     @Test
     public void testEnsureProxy() {
-        final Function<Long, AbstractProxyTransaction> function = mock(Function.class);
-        final AbstractProxyTransaction expected = mock(AbstractProxyTransaction.class);
-        when(function.apply(0L)).thenReturn(expected);
-        final AbstractProxyTransaction proxy = handle.ensureProxy(PATH, function);
-        verify(function).apply(0L);
-        Assert.assertEquals(expected, proxy);
+        final var proxy = handle.ensureProxy(PATH);
+        assertEquals(0, proxy.getIdentifier().getTransactionId());
     }
 
     @Test
     public void testParent() {
-        Assert.assertEquals(parent, handle.parent());
+        assertEquals(parent, handle.parent());
     }
 
     protected void checkClosed() throws Exception {
@@ -175,7 +172,7 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
     protected <R extends Request<?, R>> R backendRespondToRequest(final Class<R> expectedRequestClass,
                                                             final Response<?, ?> response) {
         final RequestEnvelope envelope = backendProbe.expectMsgClass(RequestEnvelope.class);
-        Assert.assertEquals(expectedRequestClass, envelope.getMessage().getClass());
+        assertEquals(expectedRequestClass, envelope.getMessage().getClass());
         final AbstractClientConnection<ShardBackendInfo> connection = client.getConnection(0L);
         final long sessionId = envelope.getSessionId();
         final long txSequence = envelope.getTxSequence();
@@ -206,8 +203,13 @@ public abstract class AbstractClientHandleTest<T extends AbstractClientHandle<Ab
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(any())).thenReturn(promise.future());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+
+        final EffectiveModelContext context = mock(EffectiveModelContext.class);
+        lenient().doCallRealMethod().when(context).getQName();
+        lenient().doReturn(context).when(mock).getSchemaContext();
+        lenient().doReturn(DatastoreContext.newBuilder().build()).when(mock).getDatastoreContext();
+
         return mock;
     }
-
 }
index 12f7478bd16cd981faf4bf846f3d7704119bcfb7..7f934622ab215ac82ffe762661b2a67828ca6f21 100644 (file)
@@ -7,41 +7,44 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import com.google.common.primitives.UnsignedLong;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mock;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection;
 import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import scala.concurrent.Promise;
 import scala.concurrent.impl.Promise.DefaultPromise;
 
 public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory> {
     protected static final String SHARD_NAME = "default";
     protected static final String PERSISTENCE_ID = "per-1";
-    protected static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(CLIENT_ID, 1L);
+    protected static final LocalHistoryIdentifier HISTORY_ID = new LocalHistoryIdentifier(TestUtils.CLIENT_ID, 1L);
 
     @Mock
     private DataTree tree;
+    @Mock
+    private DatastoreContext datastoreContext;
 
     protected abstract T object();
 
@@ -70,62 +73,63 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
 
     @Test
     public void testCreateSnapshotProxy() {
-        final AbstractProxyTransaction snapshotProxy = object().createSnapshotProxy(TRANSACTION_ID, 0L);
-        Assert.assertNotNull(snapshotProxy);
-        Assert.assertNotEquals(TRANSACTION_ID, snapshotProxy.getIdentifier());
+        final AbstractProxyTransaction snapshotProxy = object().createSnapshotProxy(TestUtils.TRANSACTION_ID, 0L);
+        assertNotNull(snapshotProxy);
+        assertNotEquals(TestUtils.TRANSACTION_ID, snapshotProxy.getIdentifier());
     }
 
     @Test
     public void testCreateTransactionProxy() {
-        AbstractProxyTransaction transactionProxy = object().createTransactionProxy(TRANSACTION_ID, 0L);
-        Assert.assertNotNull(transactionProxy);
-        Assert.assertNotEquals(TRANSACTION_ID, transactionProxy.getIdentifier());
+        AbstractProxyTransaction transactionProxy = object().createTransactionProxy(TestUtils.TRANSACTION_ID, 0L);
+        assertNotNull(transactionProxy);
+        assertNotEquals(TestUtils.TRANSACTION_ID, transactionProxy.getIdentifier());
     }
 
     @Test
     public void testState() {
-        Assert.assertEquals(AbstractClientHistory.State.IDLE, object().state());
+        assertEquals(AbstractClientHistory.State.IDLE, object().state());
     }
 
     @Test
     public void testUpdateState() {
         object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.CLOSED);
-        Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+        assertEquals(AbstractClientHistory.State.CLOSED, object().state());
     }
 
     @Test
     public void testDoClose() {
-        object().createTransactionProxy(TRANSACTION_ID, 0L);
+        object().createTransactionProxy(TestUtils.TRANSACTION_ID, 0L);
         object().doClose();
-        Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+        assertEquals(AbstractClientHistory.State.CLOSED, object().state());
     }
 
     @Test
     public void testGetIdentifier() {
-        Assert.assertEquals(HISTORY_ID, object().getIdentifier());
+        assertEquals(HISTORY_ID, object().getIdentifier());
     }
 
     @Test
     public void testNextTx() {
-        Assert.assertTrue(object().nextTx() + 1 == object().nextTx());
+        assertEquals(object().nextTx() + 1, object().nextTx());
     }
 
     @Test
     public void testResolveShardForPath() {
-        final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.EMPTY);
-        Assert.assertEquals(0L, shardForPath.longValue());
+        final Long shardForPath = object().resolveShardForPath(YangInstanceIdentifier.of());
+        assertNotNull(shardForPath);
+        assertEquals(0L, (long) shardForPath);
     }
 
     @Test
     public void testLocalAbort() {
         object().localAbort(new Throwable());
-        Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+        assertEquals(AbstractClientHistory.State.CLOSED, object().state());
     }
 
     @Test
     public void testOnProxyDestroyed() {
-        final ProxyHistory proxyHistory = Mockito.mock(ProxyHistory.class);
-        when(proxyHistory.getIdentifier()).thenReturn(HISTORY_ID);
+        final ProxyHistory proxyHistory = mock(ProxyHistory.class);
+        doReturn(HISTORY_ID).when(proxyHistory).getIdentifier();
 
         object().onProxyDestroyed(proxyHistory);
         verify(proxyHistory).getIdentifier();
@@ -134,13 +138,13 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
     @Test
     public void testCreateTransaction() {
         final ClientTransaction transaction = object().createTransaction();
-        Assert.assertNotNull(transaction);
+        assertNotNull(transaction);
     }
 
     @Test
     public void testTakeSnapshot() {
         final ClientSnapshot clientSnapshot = object().takeSnapshot();
-        Assert.assertEquals(object().getIdentifier(), clientSnapshot.getIdentifier().getHistoryId());
+        assertEquals(object().getIdentifier(), clientSnapshot.getIdentifier().getHistoryId());
     }
 
     @Test
@@ -153,10 +157,10 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
                 SHARD_NAME, UnsignedLong.ZERO, Optional.of(tree), 10);
         final ConnectedClientConnection<ShardBackendInfo> newConn = AccessClientUtil.createConnectedConnection(
                 clientActorContext(), cookie, info);
-        object().createSnapshotProxy(TRANSACTION_ID, shard);
+        object().createSnapshotProxy(TestUtils.TRANSACTION_ID, shard);
 
         final HistoryReconnectCohort reconnectCohort = object().startReconnect(newConn);
-        Assert.assertNotNull(reconnectCohort);
+        assertNotNull(reconnectCohort);
     }
 
     @Test
@@ -169,19 +173,22 @@ public abstract class AbstractClientHistoryTest<T extends AbstractClientHistory>
                 SHARD_NAME, UnsignedLong.ZERO, Optional.of(tree), 10);
         final ConnectedClientConnection<ShardBackendInfo> newConn = AccessClientUtil.createConnectedConnection(
                 clientActorContext(), cookie, info);
-        object().createSnapshotProxy(TRANSACTION_ID, shard);
+        object().createSnapshotProxy(TestUtils.TRANSACTION_ID, shard);
 
         final HistoryReconnectCohort reconnectCohort = object().startReconnect(newConn);
-        Assert.assertNull(reconnectCohort);
+        assertNull(reconnectCohort);
     }
 
-    protected static ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
+    protected final ActorUtils createActorUtilsMock(final ActorSystem system, final ActorRef actor) {
         final ActorUtils mock = mock(ActorUtils.class);
         final Promise<PrimaryShardInfo> promise = new DefaultPromise<>();
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(any())).thenReturn(promise.future());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(any());
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(mock).getDatastoreContext();
+
         return mock;
     }
 }
\ No newline at end of file
index bdba5d13e57379103aecf8e4c9c939ad805ed899..9254802810b38f3c4cdf2b86f1e6690377bd6eb0 100644 (file)
@@ -7,9 +7,12 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 
 import akka.actor.ActorRef;
@@ -18,10 +21,9 @@ import akka.actor.ActorSystem;
 import akka.actor.Status;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
-import java.util.Collections;
+import java.util.List;
 import java.util.Optional;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
@@ -30,12 +32,13 @@ import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.client.InternalCommand;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 import scala.concurrent.Promise;
 
 public abstract class AbstractDataStoreClientBehaviorTest {
@@ -48,16 +51,17 @@ public abstract class AbstractDataStoreClientBehaviorTest {
     private TestProbe clientActorProbe;
     private TestProbe actorContextProbe;
     private AbstractDataStoreClientBehavior behavior;
+    private ActorUtils util;
 
     @Before
     public void setUp() {
         system = ActorSystem.apply();
         clientActorProbe = new TestProbe(system, "client");
         actorContextProbe = new TestProbe(system, "actor-context");
-        final ActorUtils context = createActorContextMock(system, actorContextProbe.ref());
+        util = createActorContextMock(system, actorContextProbe.ref());
         clientContext =
                 AccessClientUtil.createClientActorContext(system, clientActorProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        behavior = createBehavior(clientContext, context);
+        behavior = createBehavior(clientContext, util);
     }
 
     @SuppressWarnings("checkstyle:hiddenField")
@@ -71,7 +75,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
 
     @Test
     public void testResolveShardForPath() {
-        Assert.assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.EMPTY).longValue());
+        assertEquals(0L, behavior.resolveShardForPath(YangInstanceIdentifier.of()).longValue());
     }
 
     @Test
@@ -85,32 +89,32 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final GetClientRequest request = new GetClientRequest(probe.ref());
         final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand(request);
         final Status.Success success = probe.expectMsgClass(Status.Success.class);
-        Assert.assertEquals(behavior, success.status());
-        Assert.assertSame(behavior, nextBehavior);
+        assertEquals(behavior, success.status());
+        assertSame(behavior, nextBehavior);
     }
 
     @Test
     public void testOnCommandUnhandled() {
         final AbstractDataStoreClientBehavior nextBehavior = behavior.onCommand("unhandled");
-        Assert.assertSame(behavior, nextBehavior);
+        assertSame(behavior, nextBehavior);
     }
 
     @Test
     public void testCreateLocalHistory() {
         final ClientLocalHistory history = behavior.createLocalHistory();
-        Assert.assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
+        assertEquals(behavior.getIdentifier(), history.getIdentifier().getClientId());
     }
 
     @Test
     public void testCreateTransaction() {
         final ClientTransaction transaction = behavior.createTransaction();
-        Assert.assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
+        assertEquals(behavior.getIdentifier(), transaction.getIdentifier().getHistoryId().getClientId());
     }
 
     @Test
     public void testCreateSnapshot() {
         final ClientSnapshot snapshot = behavior.createSnapshot();
-        Assert.assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
+        assertEquals(behavior.getIdentifier(), snapshot.getIdentifier().getHistoryId().getClientId());
     }
 
     @Test
@@ -119,48 +123,49 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final InternalCommand<ShardBackendInfo> internalCommand =
                 clientActorProbe.expectMsgClass(InternalCommand.class);
         internalCommand.execute(behavior);
-        try {
-            behavior.createLocalHistory();
-            Assert.fail("Behavior is closed and shouldn't allow to create new history.");
-        } catch (final IllegalStateException e) {
-            //ok
-        }
+
+        assertThrows(IllegalStateException.class, () -> behavior.createLocalHistory());
     }
 
     @Test
     public void testGetIdentifier() {
-        Assert.assertEquals(CLIENT_ID, behavior.getIdentifier());
+        assertEquals(CLIENT_ID, behavior.getIdentifier());
     }
 
     @Test
     public void testGetConnection() {
+        final var datastoreContext = mock(DatastoreContext.class);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(util).getDatastoreContext();
+
         //set up data tree mock
         final CursorAwareDataTreeModification modification = mock(CursorAwareDataTreeModification.class);
-        when(modification.readNode(YangInstanceIdentifier.EMPTY)).thenReturn(Optional.empty());
+        doReturn(Optional.empty()).when(modification).readNode(YangInstanceIdentifier.of());
         final DataTreeSnapshot snapshot = mock(DataTreeSnapshot.class);
-        when(snapshot.newModification()).thenReturn(modification);
+        doReturn(modification).when(snapshot).newModification();
         final DataTree dataTree = mock(DataTree.class);
-        when(dataTree.takeSnapshot()).thenReturn(snapshot);
+        doReturn(snapshot).when(dataTree).takeSnapshot();
 
         final TestProbe backendProbe = new TestProbe(system, "backend");
         final long shard = 0L;
-        behavior.createTransaction().read(YangInstanceIdentifier.EMPTY);
+
+        behavior.createTransaction().read(YangInstanceIdentifier.of());
         final AbstractClientConnection<ShardBackendInfo> connection = behavior.getConnection(shard);
         //check cached connection for same shard
-        Assert.assertSame(connection, behavior.getConnection(shard));
+        assertSame(connection, behavior.getConnection(shard));
 
         final ConnectClientRequest connectClientRequest = actorContextProbe.expectMsgClass(ConnectClientRequest.class);
-        Assert.assertEquals(CLIENT_ID, connectClientRequest.getTarget());
+        assertEquals(CLIENT_ID, connectClientRequest.getTarget());
         final long sequence = 0L;
-        Assert.assertEquals(sequence, connectClientRequest.getSequence());
-        actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(),
-                Collections.emptyList(), dataTree, 3));
-        Assert.assertEquals(clientActorProbe.ref(), connection.localActor());
+        assertEquals(sequence, connectClientRequest.getSequence());
+        actorContextProbe.reply(new ConnectClientSuccess(CLIENT_ID, sequence, backendProbe.ref(), List.of(), dataTree,
+                3));
+        assertEquals(clientActorProbe.ref(), connection.localActor());
         //capture and execute command passed to client context
         final InternalCommand<ShardBackendInfo> command = clientActorProbe.expectMsgClass(InternalCommand.class);
         command.execute(behavior);
         //check, whether command was reaplayed
-        verify(modification).readNode(YangInstanceIdentifier.EMPTY);
+        verify(modification).readNode(YangInstanceIdentifier.of());
     }
 
     private static ActorUtils createActorContextMock(final ActorSystem system, final ActorRef actor) {
@@ -169,8 +174,7 @@ public abstract class AbstractDataStoreClientBehaviorTest {
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(SHARD)).thenReturn(promise.future());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(SHARD);
         return mock;
     }
-
 }
index a4c653f01e34b5dace3e4a179ea62d4eec032c71..b28c84b09e64c75a710f961053fbcbcf693c9dfe 100644 (file)
@@ -7,9 +7,14 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static org.hamcrest.CoreMatchers.both;
+import static org.hamcrest.CoreMatchers.allOf;
 import static org.hamcrest.CoreMatchers.hasItem;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.hamcrest.core.Is.isA;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -27,11 +32,11 @@ import java.util.function.Consumer;
 import org.hamcrest.BaseMatcher;
 import org.hamcrest.Description;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
@@ -56,13 +61,18 @@ import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifie
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransaction> {
     protected static final TransactionIdentifier TRANSACTION_ID = TestUtils.TRANSACTION_ID;
     private static final ClientIdentifier CLIENT_ID = TestUtils.CLIENT_ID;
@@ -77,11 +87,11 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     protected static final YangInstanceIdentifier PATH_3 = YangInstanceIdentifier.builder()
             .node(QName.create("ns-1", "node-3"))
             .build();
-    protected static final ContainerNode DATA_1 = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
+    protected static final ContainerNode DATA_1 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(PATH_1.getLastPathArgument().getNodeType()))
             .build();
-    protected static final ContainerNode DATA_2 = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
+    protected static final ContainerNode DATA_2 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(PATH_2.getLastPathArgument().getNodeType()))
             .build();
     protected static final String PERSISTENCE_ID = "per-1";
 
@@ -89,6 +99,11 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     private DataTreeSnapshot snapshot;
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
     private TestProbe backendProbe;
     private TestProbe clientContextProbe;
@@ -98,21 +113,27 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         clientContextProbe = new TestProbe(system, "clientContext");
         backendProbe = new TestProbe(system, "backend");
         context = AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID,
                 PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
+
         final ProxyHistory parent = ProxyHistory.createClient(history, connection, HISTORY_ID);
         transaction = createTransaction(parent, TestUtils.TRANSACTION_ID, snapshot);
         tester = new TransactionTester<>(transaction, connection, backendProbe);
     }
 
+    protected final void mockForRemote() {
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+    }
+
     @SuppressWarnings("checkstyle:hiddenField")
     protected abstract T createTransaction(ProxyHistory parent, TransactionIdentifier id, DataTreeSnapshot snapshot);
 
@@ -192,47 +213,46 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         transaction.replayMessages(mockSuccessor, entries);
 
         final ModifyTransactionRequest transformed = successor.expectTransactionRequest(ModifyTransactionRequest.class);
-        Assert.assertNotNull(transformed);
-        Assert.assertEquals(successful1.getSequence(), transformed.getSequence());
-        Assert.assertTrue(transformed.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.ABORT, transformed.getPersistenceProtocol().get());
+        assertNotNull(transformed);
+        assertEquals(successful1.getSequence(), transformed.getSequence());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), transformed.getPersistenceProtocol());
 
         ReadTransactionRequest tmpRead = successor.expectTransactionRequest(ReadTransactionRequest.class);
-        Assert.assertNotNull(tmpRead);
-        Assert.assertEquals(successful2.getTarget(), tmpRead.getTarget());
-        Assert.assertEquals(successful2.getSequence(), tmpRead.getSequence());
-        Assert.assertEquals(successful2.getPath(), tmpRead.getPath());
-        Assert.assertEquals(successor.localActor(), tmpRead.getReplyTo());
+        assertNotNull(tmpRead);
+        assertEquals(successful2.getTarget(), tmpRead.getTarget());
+        assertEquals(successful2.getSequence(), tmpRead.getSequence());
+        assertEquals(successful2.getPath(), tmpRead.getPath());
+        assertEquals(successor.localActor(), tmpRead.getReplyTo());
 
         tmpRead = successor.expectTransactionRequest(ReadTransactionRequest.class);
-        Assert.assertNotNull(tmpRead);
-        Assert.assertEquals(request1.getTarget(), tmpRead.getTarget());
-        Assert.assertEquals(request1.getSequence(), tmpRead.getSequence());
-        Assert.assertEquals(request1.getPath(), tmpRead.getPath());
-        Assert.assertEquals(successor.localActor(), tmpRead.getReplyTo());
+        assertNotNull(tmpRead);
+        assertEquals(request1.getTarget(), tmpRead.getTarget());
+        assertEquals(request1.getSequence(), tmpRead.getSequence());
+        assertEquals(request1.getPath(), tmpRead.getPath());
+        assertEquals(successor.localActor(), tmpRead.getReplyTo());
 
         final ExistsTransactionRequest tmpExist = successor.expectTransactionRequest(ExistsTransactionRequest.class);
-        Assert.assertNotNull(tmpExist);
-        Assert.assertEquals(request2.getTarget(), tmpExist.getTarget());
-        Assert.assertEquals(request2.getSequence(), tmpExist.getSequence());
-        Assert.assertEquals(request2.getPath(), tmpExist.getPath());
-        Assert.assertEquals(successor.localActor(), tmpExist.getReplyTo());
+        assertNotNull(tmpExist);
+        assertEquals(request2.getTarget(), tmpExist.getTarget());
+        assertEquals(request2.getSequence(), tmpExist.getSequence());
+        assertEquals(request2.getPath(), tmpExist.getPath());
+        assertEquals(successor.localActor(), tmpExist.getReplyTo());
     }
 
     protected void checkModifications(final ModifyTransactionRequest modifyRequest) {
         final List<TransactionModification> modifications = modifyRequest.getModifications();
-        Assert.assertEquals(3, modifications.size());
-        Assert.assertThat(modifications, hasItem(both(isA(TransactionWrite.class)).and(hasPath(PATH_1))));
-        Assert.assertThat(modifications, hasItem(both(isA(TransactionMerge.class)).and(hasPath(PATH_2))));
-        Assert.assertThat(modifications, hasItem(both(isA(TransactionDelete.class)).and(hasPath(PATH_3))));
+        assertEquals(3, modifications.size());
+        assertThat(modifications, hasItem(allOf(isA(TransactionWrite.class), hasPath(PATH_1))));
+        assertThat(modifications, hasItem(allOf(isA(TransactionMerge.class), hasPath(PATH_2))));
+        assertThat(modifications, hasItem(allOf(isA(TransactionDelete.class), hasPath(PATH_3))));
     }
 
     @SuppressWarnings("checkstyle:hiddenField")
-    protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Void>> consumer,
+    protected <R extends TransactionRequest<R>> void testRequestResponse(final Consumer<VotingFuture<Empty>> consumer,
             final Class<R> expectedRequest,
             final BiFunction<TransactionIdentifier, Long, TransactionSuccess<?>> replySupplier) {
         final TransactionTester<T> tester = getTester();
-        final VotingFuture<Void> future = mock(VotingFuture.class);
+        final VotingFuture<Empty> future = mock(VotingFuture.class);
         transaction.seal();
         consumer.accept(future);
         final TransactionRequest<?> req = tester.expectTransactionRequest(expectedRequest);
@@ -244,9 +264,9 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         transaction.handleReplayedRemoteRequest(request, createCallbackMock(), Ticker.systemTicker().read());
         final RequestEnvelope envelope = backendProbe.expectMsgClass(RequestEnvelope.class);
         final R received = (R) envelope.getMessage();
-        Assert.assertTrue(received.getClass().equals(request.getClass()));
-        Assert.assertEquals(TRANSACTION_ID, received.getTarget());
-        Assert.assertEquals(clientContextProbe.ref(), received.getReplyTo());
+        assertTrue(received.getClass().equals(request.getClass()));
+        assertEquals(TRANSACTION_ID, received.getTarget());
+        assertEquals(clientContextProbe.ref(), received.getReplyTo());
         return received;
     }
 
@@ -269,7 +289,7 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
     }
 
     protected static BaseMatcher<TransactionModification> hasPath(final YangInstanceIdentifier path) {
-        return new BaseMatcher<TransactionModification>() {
+        return new BaseMatcher<>() {
 
             @Override
             public boolean matches(final Object item) {
@@ -299,7 +319,7 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext2");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
@@ -317,13 +337,18 @@ public abstract class AbstractProxyTransactionTest<T extends AbstractProxyTransa
         final TestProbe clientContextProbe = new TestProbe(system, "remoteClientContext");
         final TestProbe backendProbe = new TestProbe(system, "remoteBackend");
         final AbstractClientHistory history = mock(AbstractClientHistory.class);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 5);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
         final ProxyHistory proxyHistory = ProxyHistory.createClient(history, connection, HISTORY_ID);
+
         final RemoteProxyTransaction transaction =
                 new RemoteProxyTransaction(proxyHistory, TRANSACTION_ID, false, false, false);
         return new TransactionTester<>(transaction, connection, backendProbe);
index 2b234304003cace8490c201247319768a814c1c3..298470021ee20c04c35f4fdb963c5f10a04cbca9 100644 (file)
@@ -7,23 +7,29 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorSystem;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLocalHistory> {
     private ActorSystem system;
     private AbstractDataStoreClientBehavior behavior;
@@ -37,14 +43,12 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         system = ActorSystem.apply();
 
         final TestProbe clientContextProbe = new TestProbe(system, "client");
         final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
         clientActorContext = AccessClientUtil.createClientActorContext(
-                system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+                system, clientContextProbe.ref(), TestUtils.CLIENT_ID, PERSISTENCE_ID);
         final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
         behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
 
@@ -69,21 +73,21 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
     @Test
     public void testClose() {
         object().close();
-        Assert.assertEquals(AbstractClientHistory.State.CLOSED, object().state());
+        assertEquals(AbstractClientHistory.State.CLOSED, object().state());
     }
 
     @Override
     @Test
     public void testDoCreateTransaction() {
         final ClientTransaction clientTransaction = object().doCreateTransaction();
-        Assert.assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
+        assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
     }
 
     @Override
     @Test
     public void testOnTransactionAbort() {
         final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
-        Assert.assertTrue(clientSnapshot.abort());
+        assertTrue(clientSnapshot.abort());
     }
 
     @Override
@@ -91,14 +95,14 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
     public void testCreateHistoryProxy() {
         final AbstractClientConnection<ShardBackendInfo> clientConnection = behavior.getConnection(0L);
         final ProxyHistory historyProxy = object().createHistoryProxy(HISTORY_ID, clientConnection);
-        Assert.assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
+        assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
     }
 
     @Override
     @Test
     public void testDoCreateSnapshot() {
         final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
-        Assert.assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
+        assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
                 clientSnapshot.getIdentifier().getHistoryId());
     }
 
@@ -110,28 +114,30 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
         // make transaction ready
         object().onTransactionReady(tx, cohort);
         // state should be set to IDLE
-        Assert.assertEquals(AbstractClientHistory.State.IDLE, object.state());
+        assertEquals(AbstractClientHistory.State.IDLE, object.state());
 
         // complete transaction
         object().onTransactionComplete(tx.getIdentifier());
         // state is still IDLE
-        Assert.assertEquals(AbstractClientHistory.State.IDLE, object.state());
+        assertEquals(AbstractClientHistory.State.IDLE, object.state());
     }
 
     @Override
     @Test
     public void testOnTransactionReady() {
-        final AbstractTransactionCommitCohort result = object().onTransactionReady(
-                object().createTransaction(), cohort);
-        Assert.assertEquals(result, cohort);
+        final AbstractTransactionCommitCohort result = object().onTransactionReady(object().createTransaction(),
+            cohort);
+        assertEquals(result, cohort);
     }
 
     @Override
-    @Test(expected = IllegalStateException.class)
+    @Test
     public void testOnTransactionReadyDuplicate() {
         final ClientTransaction tx = object().createTransaction();
         object().onTransactionReady(tx, cohort);
-        object().onTransactionReady(tx, cohort);
+        final IllegalStateException ise = assertThrows(IllegalStateException.class,
+            () -> object().onTransactionReady(tx, cohort));
+        assertThat(ise.getMessage(), containsString(" is idle when readying transaction "));
     }
 
     @Test
@@ -139,7 +145,7 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
         object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.TX_OPEN);
         final AbstractTransactionCommitCohort transactionCommitCohort =
                 object().onTransactionReady(transaction, cohort);
-        Assert.assertEquals(cohort, transactionCommitCohort);
+        assertEquals(cohort, transactionCommitCohort);
     }
 
     @Test
@@ -147,11 +153,13 @@ public class ClientLocalHistoryTest extends AbstractClientHistoryTest<ClientLoca
         object().updateState(AbstractClientHistory.State.IDLE, AbstractClientHistory.State.CLOSED);
         final AbstractTransactionCommitCohort transactionCommitCohort =
                 object().onTransactionReady(transaction, cohort);
-        Assert.assertEquals(cohort, transactionCommitCohort);
+        assertEquals(cohort, transactionCommitCohort);
     }
 
-    @Test(expected = IllegalStateException.class)
+    @Test
     public void testOnTransactionReadyAndCompleteIdleFail() {
-        object().onTransactionReady(transaction, cohort);
+        final IllegalStateException ise = assertThrows(IllegalStateException.class,
+            () -> object().onTransactionReady(transaction, cohort));
+        assertThat(ise.getMessage(), endsWith(" is idle when readying transaction null"));
     }
 }
index 8e5d5650f8780f28ebd5d10b140b4555c876abc1..cc50a9ba580002eb72faf6fdeb950d2ae161c124 100644 (file)
@@ -9,26 +9,23 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
 
-import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 public class ClientSnapshotTest extends AbstractClientHandleTest<ClientSnapshot> {
-
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.EMPTY;
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of();
 
     @Before
     @Override
     public void setUp() throws Exception {
         super.setUp();
-        when(getDataTreeSnapshot().readNode(PATH)).thenReturn(Optional.empty());
+        doReturn(Optional.empty()).when(getDataTreeSnapshot()).readNode(PATH);
     }
 
     @Override
@@ -43,16 +40,15 @@ public class ClientSnapshotTest extends AbstractClientHandleTest<ClientSnapshot>
 
     @Test
     public void testExists() throws Exception {
-        final ListenableFuture<Boolean> exists = getHandle().exists(PATH);
+        final var exists = getHandle().exists(PATH);
         verify(getDataTreeSnapshot()).readNode(PATH);
         assertEquals(Boolean.FALSE, getWithTimeout(exists));
     }
 
     @Test
     public void testRead() throws Exception {
-        final ListenableFuture<Optional<NormalizedNode<?, ?>>> exists = getHandle().read(PATH);
+        final var read = getHandle().read(PATH);
         verify(getDataTreeSnapshot()).readNode(PATH);
-        assertFalse(getWithTimeout(exists).isPresent());
+        assertFalse(getWithTimeout(read).isPresent());
     }
-
 }
index 481c7d753189961de4d86575ca70fd5aa92355d7..e54b275c9516fd858aaeb6a0461f0ac1ed2e1c70 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
@@ -27,11 +29,11 @@ import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
@@ -47,26 +49,38 @@ import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitR
 import org.opendaylight.controller.cluster.access.commands.TransactionPreCommitSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RuntimeRequestException;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.yangtools.yang.common.Empty;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ClientTransactionCommitCohortTest {
-
     private static final String PERSISTENCE_ID = "per-1";
     private static final int TRANSACTIONS = 3;
 
+    private final List<TransactionTester<RemoteProxyTransaction>> transactions = new ArrayList<>();
+
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
-    private List<TransactionTester<RemoteProxyTransaction>> transactions;
     private ClientTransactionCommitCohort cohort;
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
-        transactions = new ArrayList<>();
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         for (int i = 0; i < TRANSACTIONS; i++) {
             transactions.add(createTransactionTester(new TestProbe(system, "backend" + i), context, history));
         }
@@ -96,7 +110,7 @@ public class ClientTransactionCommitCohortTest {
     @Test
     public void testPreCommit() throws Exception {
         testOpSuccess(ClientTransactionCommitCohort::preCommit, this::expectPreCommit, this::replyPreCommitSuccess,
-                null);
+            Empty.value());
     }
 
     @Test
@@ -106,7 +120,8 @@ public class ClientTransactionCommitCohortTest {
 
     @Test
     public void testCommit() throws Exception {
-        testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess, null);
+        testOpSuccess(ClientTransactionCommitCohort::commit, this::expectCommit, this::replyCommitSuccess,
+            CommitInfo.empty());
     }
 
     @Test
@@ -116,7 +131,7 @@ public class ClientTransactionCommitCohortTest {
 
     @Test
     public void testAbort() throws Exception {
-        testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, null);
+        testOpSuccess(ClientTransactionCommitCohort::abort, this::expectAbort, this::replyAbortSuccess, Empty.value());
     }
 
     @Test
@@ -126,8 +141,7 @@ public class ClientTransactionCommitCohortTest {
 
     private void expectCanCommit(final TransactionTester<RemoteProxyTransaction> tester) {
         final ModifyTransactionRequest request = tester.expectTransactionRequest(ModifyTransactionRequest.class);
-        Assert.assertTrue(request.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.THREE_PHASE, request.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.THREE_PHASE), request.getPersistenceProtocol());
     }
 
     void expectPreCommit(final TransactionTester<?> tester) {
@@ -169,7 +183,7 @@ public class ClientTransactionCommitCohortTest {
     private static TransactionTester<RemoteProxyTransaction> createTransactionTester(final TestProbe backendProbe,
                                                              final ClientActorContext context,
                                                              final AbstractClientHistory history) {
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
@@ -205,7 +219,7 @@ public class ClientTransactionCommitCohortTest {
                                    final T expectedResult) throws Exception {
         final ListenableFuture<T> result = operation.apply(cohort);
         replySuccess(transactions, expectFunction, replyFunction);
-        Assert.assertEquals(expectedResult, getWithTimeout(result));
+        assertEquals(expectedResult, getWithTimeout(result));
     }
 
     /**
@@ -234,7 +248,7 @@ public class ClientTransactionCommitCohortTest {
         //check future fail
         final ExecutionException exception =
                 assertOperationThrowsException(() -> getWithTimeout(canCommit), ExecutionException.class);
-        Assert.assertEquals(e, exception.getCause());
+        assertEquals(e, exception.getCause());
     }
 
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursorTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/databroker/actors/dds/ClientTransactionCursorTest.java
deleted file mode 100644 (file)
index 2e7f8f3..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.databroker.actors.dds;
-
-import static org.mockito.Mockito.verify;
-
-import java.util.Arrays;
-import java.util.stream.Collectors;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-
-public class ClientTransactionCursorTest {
-
-    private static final QName NODE_1 = QName.create("ns-1", "node-1");
-    private static final QName NODE_2 = QName.create(NODE_1, "node-2");
-    private static final QName NODE_3 = QName.create(NODE_1, "node-3");
-
-    @Mock
-    private ClientTransaction transaction;
-    private ClientTransactionCursor cursor;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        cursor = new ClientTransactionCursor(transaction);
-    }
-
-    @Test
-    public void testEnterOneNode() {
-        cursor.enter(YangInstanceIdentifier.NodeIdentifier.create(NODE_1));
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
-        final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testEnterNodeIterables() {
-        final Iterable<YangInstanceIdentifier.PathArgument> collect = toPathArg(NODE_1, NODE_2);
-        cursor.enter(collect);
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_3));
-        final YangInstanceIdentifier expected = createId(NODE_1, NODE_2, NODE_3);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testEnterNodeVarargs() {
-        cursor.enter(YangInstanceIdentifier.NodeIdentifier.create(NODE_1),
-                YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_3));
-        final YangInstanceIdentifier expected = createId(NODE_1, NODE_2, NODE_3);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testExitOneLevel() {
-        cursor.enter(toPathArg(NODE_1, NODE_2));
-        cursor.exit();
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
-        final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testExitTwoLevels() {
-        cursor.enter(toPathArg(NODE_1, NODE_2, NODE_3));
-        cursor.exit(2);
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_2));
-        final YangInstanceIdentifier expected = createId(NODE_1, NODE_2);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testClose() {
-        cursor.close();
-        verify(transaction).closeCursor(cursor);
-    }
-
-    @Test
-    public void testDelete() {
-        cursor.delete(YangInstanceIdentifier.NodeIdentifier.create(NODE_1));
-        final YangInstanceIdentifier expected = createId(NODE_1);
-        verify(transaction).delete(expected);
-    }
-
-    @Test
-    public void testMerge() {
-        final YangInstanceIdentifier.NodeIdentifier path = YangInstanceIdentifier.NodeIdentifier.create(NODE_1);
-        final ContainerNode data = createData(path.getNodeType());
-        cursor.merge(path, data);
-        final YangInstanceIdentifier expected = createId(NODE_1);
-        verify(transaction).merge(expected, data);
-    }
-
-    @Test
-    public void testWrite() {
-        final YangInstanceIdentifier.NodeIdentifier path = YangInstanceIdentifier.NodeIdentifier.create(NODE_1);
-        final ContainerNode data = createData(path.getNodeType());
-        cursor.write(path, data);
-        final YangInstanceIdentifier expected = createId(NODE_1);
-        verify(transaction).write(expected, data);
-    }
-
-    private static Iterable<YangInstanceIdentifier.PathArgument> toPathArg(final QName... pathArguments) {
-        return Arrays.stream(pathArguments)
-                .map(YangInstanceIdentifier.NodeIdentifier::create)
-                .collect(Collectors.toList());
-    }
-
-    private static YangInstanceIdentifier createId(final QName... pathArguments) {
-        return YangInstanceIdentifier.create(toPathArg(pathArguments));
-    }
-
-    private static ContainerNode createData(final QName id) {
-        return Builders.containerBuilder()
-                .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(id))
-                .build();
-    }
-
-}
\ No newline at end of file
index bb8fbf15e63ff587dabb36244a61c2b4f5f92acc..74b3b6252d356ef371d4bdbff750daf471634475 100644 (file)
@@ -8,12 +8,10 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertFutureEquals;
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertOperationThrowsException;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
 
 import com.google.common.util.concurrent.FluentFuture;
@@ -24,22 +22,22 @@ import org.junit.Test;
 import org.mockito.Mock;
 import org.opendaylight.controller.cluster.access.commands.CommitLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
+import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
 
 public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransaction> {
-
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.builder()
-            .node(QName.create("ns-1", "node-1"))
-            .build();
-    private static final NormalizedNode<?, ?> DATA = Builders.containerBuilder()
-            .withNodeIdentifier(YangInstanceIdentifier.NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
-            .build();
+    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier.of(QName.create("ns-1", "node-1"));
+    private static final ContainerNode DATA = ImmutableNodes.newContainerBuilder()
+        .withNodeIdentifier(NodeIdentifier.create(PATH.getLastPathArgument().getNodeType()))
+        .build();
 
     @Mock
     private CursorAwareDataTreeModification modification;
@@ -62,20 +60,6 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
         transaction.read(PATH);
     }
 
-    @Test
-    public void testOpenCloseCursor() {
-        final DOMDataTreeWriteCursor cursor = getHandle().openCursor();
-        getHandle().closeCursor(cursor);
-        getHandle().openCursor().delete(PATH.getLastPathArgument());
-        verify(modification).delete(PATH);
-    }
-
-    @Test
-    public void testOpenSecondCursor() throws Exception {
-        getHandle().openCursor();
-        assertOperationThrowsException(getHandle()::openCursor, IllegalStateException.class);
-    }
-
     @Test
     public void testExists() throws Exception {
         final FluentFuture<Boolean> exists = getHandle().exists(PATH);
@@ -85,11 +69,9 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
 
     @Test
     public void testRead() throws Exception {
-        final FluentFuture<Optional<NormalizedNode<?, ?>>> resultFuture = getHandle().read(PATH);
+        final FluentFuture<Optional<NormalizedNode>> resultFuture = getHandle().read(PATH);
         verify(modification).readNode(PATH);
-        final Optional<NormalizedNode<?, ?>> result = getWithTimeout(resultFuture);
-        assertTrue(result.isPresent());
-        assertEquals(DATA, result.get());
+        assertEquals(Optional.of(DATA), getWithTimeout(resultFuture));
     }
 
     @Test
@@ -114,8 +96,8 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
     public void testReadyEmpty() throws Exception {
         final DOMStoreThreePhaseCommitCohort cohort = getHandle().ready();
         assertFutureEquals(Boolean.TRUE, cohort.canCommit());
-        assertFutureEquals(null, cohort.preCommit());
-        assertFutureEquals(null, cohort.commit());
+        assertFutureEquals(Empty.value(), cohort.preCommit());
+        assertFutureEquals(CommitInfo.empty(), cohort.commit());
     }
 
     @Test
@@ -128,8 +110,8 @@ public class ClientTransactionTest extends AbstractClientHandleTest<ClientTransa
                 backendRespondToRequest(CommitLocalTransactionRequest.class, response);
         assertEquals(modification, request.getModification());
         assertFutureEquals(Boolean.TRUE, actual);
-        assertFutureEquals(null, cohort.preCommit());
-        assertFutureEquals(null, cohort.commit());
+        assertFutureEquals(Empty.value(), cohort.preCommit());
+        assertFutureEquals(CommitInfo.empty(), cohort.commit());
     }
 
     @Test
index b543d77221e7d7287b903ccfb164c073321c68cc..32f062ec6ba623fc6f9dbf9ec490e38cf20790fe 100644 (file)
@@ -8,8 +8,8 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.HISTORY_ID;
@@ -25,8 +25,9 @@ import java.util.Optional;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
@@ -35,24 +36,34 @@ import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequ
 import org.opendaylight.controller.cluster.access.commands.PersistenceProtocol;
 import org.opendaylight.controller.cluster.access.commands.TransactionCommitSuccess;
 import org.opendaylight.controller.cluster.access.concepts.RequestSuccess;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class DirectTransactionCommitCohortTest {
-
     private static final String PERSISTENCE_ID = "per-1";
 
     @Mock
     private AbstractClientHistory history;
+    @Mock
+    private DatastoreContext datastoreContext;
+    @Mock
+    private ActorUtils actorUtils;
+
     private ActorSystem system;
     private TransactionTester<?> transaction;
     private DirectTransactionCommitCohort cohort;
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         final TestProbe clientContextProbe = new TestProbe(system, "clientContext");
         final ClientActorContext context =
                 AccessClientUtil.createClientActorContext(system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+        doReturn(1000).when(datastoreContext).getShardBatchedModificationCount();
+        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
+        doReturn(actorUtils).when(history).actorUtils();
+
         transaction = createTransactionTester(new TestProbe(system, "backend"), context, history);
         final AbstractProxyTransaction proxy = transaction.getTransaction();
         proxy.seal();
@@ -68,8 +79,7 @@ public class DirectTransactionCommitCohortTest {
     public void testCanCommit() throws Exception {
         final ListenableFuture<Boolean> canCommit = cohort.canCommit();
         final ModifyTransactionRequest request = transaction.expectTransactionRequest(ModifyTransactionRequest.class);
-        assertTrue(request.getPersistenceProtocol().isPresent());
-        assertEquals(PersistenceProtocol.SIMPLE, request.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.SIMPLE), request.getPersistenceProtocol());
         final RequestSuccess<?, ?> success = new TransactionCommitSuccess(transaction.getTransaction().getIdentifier(),
                 transaction.getLastReceivedMessage().getSequence());
         transaction.replySuccess(success);
@@ -78,28 +88,28 @@ public class DirectTransactionCommitCohortTest {
 
     @Test
     public void testPreCommit() throws Exception {
-        final ListenableFuture<Void> preCommit = cohort.preCommit();
-        assertNull(getWithTimeout(preCommit));
+        final ListenableFuture<?> preCommit = cohort.preCommit();
+        assertNotNull(getWithTimeout(preCommit));
     }
 
     @Test
     public void testAbort() throws Exception {
-        final ListenableFuture<Void> abort = cohort.abort();
+        final ListenableFuture<?> abort = cohort.abort();
         verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
-        assertNull(getWithTimeout(abort));
+        assertNotNull(getWithTimeout(abort));
     }
 
     @Test
     public void testCommit() throws Exception {
-        final ListenableFuture<Void> commit = cohort.commit();
+        final ListenableFuture<?> commit = cohort.commit();
         verify(history).onTransactionComplete(transaction.getTransaction().getIdentifier());
-        assertNull(getWithTimeout(commit));
+        assertNotNull(getWithTimeout(commit));
     }
 
     private static TransactionTester<?> createTransactionTester(final TestProbe backendProbe,
                                                                 final ClientActorContext context,
                                                                 final AbstractClientHistory history) {
-        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.BORON,
+        final ShardBackendInfo backend = new ShardBackendInfo(backendProbe.ref(), 0L, ABIVersion.current(),
                 "default", UnsignedLong.ZERO, Optional.empty(), 3);
         final AbstractClientConnection<ShardBackendInfo> connection =
                 AccessClientUtil.createConnectedConnection(context, 0L, backend);
index a546955ab82ae306abfa9126e9a49edfa6ce2597..312edb335c9a01ad195af5fdb3ac984a6e252d50 100644 (file)
@@ -8,10 +8,12 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
+import java.util.Set;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@@ -20,11 +22,16 @@ public class DistributedDataStoreClientBehaviorTest extends AbstractDataStoreCli
     @Override
     protected AbstractDataStoreClientBehavior createBehavior(final ClientActorContext clientContext,
                                                              final ActorUtils context) {
-        final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
         final ShardStrategy strategy = mock(ShardStrategy.class);
-        when(strategy.findShard(any())).thenReturn(SHARD);
-        when(factory.getStrategy(any())).thenReturn(strategy);
-        when(context.getShardStrategyFactory()).thenReturn(factory);
+        doReturn(SHARD).when(strategy).findShard(any());
+        final ShardStrategyFactory factory = mock(ShardStrategyFactory.class);
+        doReturn(strategy).when(factory).getStrategy(any());
+        doReturn(factory).when(context).getShardStrategyFactory();
+
+        final Configuration config = mock(Configuration.class);
+        doReturn(Set.of(SHARD)).when(config).getAllShardNames();
+        doReturn(config).when(context).getConfiguration();
+
         return new DistributedDataStoreClientBehavior(clientContext, context);
     }
 }
index f68fce4e231acb038013beed34735350e8a8c56a..fbfdc0a924cdec64f3d7dc89c2b711d5b2a7a7b5 100644 (file)
@@ -8,20 +8,20 @@
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.TRANSACTION_ID;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.getWithTimeout;
 
 import com.google.common.util.concurrent.ListenableFuture;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class EmptyTransactionCommitCohortTest {
-
     @Mock
     private AbstractClientHistory history;
 
@@ -29,7 +29,6 @@ public class EmptyTransactionCommitCohortTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         cohort = new EmptyTransactionCommitCohort(history, TRANSACTION_ID);
     }
 
@@ -41,22 +40,21 @@ public class EmptyTransactionCommitCohortTest {
 
     @Test
     public void testPreCommit() throws Exception {
-        final ListenableFuture<Void> preCommit = cohort.preCommit();
-        assertNull(getWithTimeout(preCommit));
+        assertNotNull(getWithTimeout(cohort.preCommit()));
     }
 
     @Test
     public void testAbort() throws Exception {
-        final ListenableFuture<Void> abort = cohort.abort();
+        final ListenableFuture<?> abort = cohort.abort();
         verify(history).onTransactionComplete(TRANSACTION_ID);
-        assertNull(getWithTimeout(abort));
+        assertNotNull(getWithTimeout(abort));
     }
 
     @Test
     public void testCommit() throws Exception {
-        final ListenableFuture<Void> commit = cohort.commit();
+        final ListenableFuture<?> commit = cohort.commit();
         verify(history).onTransactionComplete(TRANSACTION_ID);
-        Assert.assertNull(getWithTimeout(commit));
+        assertNotNull(getWithTimeout(commit));
     }
 
 }
\ No newline at end of file
index 2543ca8247fc8886c6fd2a5b61853dc831a5439f..104981c1fd6839899e01f6aea578e29f2d6635a1 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -17,7 +19,6 @@ import akka.testkit.TestProbe;
 import com.google.common.base.Ticker;
 import java.util.Optional;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
@@ -35,8 +36,8 @@ import org.opendaylight.controller.cluster.access.commands.ReadTransactionSucces
 import org.opendaylight.controller.cluster.access.commands.TransactionPurgeRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.concepts.Response;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
 
 public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         extends AbstractProxyTransactionTest<T> {
@@ -64,7 +65,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
     @SuppressWarnings("unchecked")
     private void setupExecuteInActor() {
         doAnswer(inv -> {
-            inv.<InternalCommand<?>>getArgument(0).execute(mock(ClientActorBehavior.class));
+            inv.getArgument(0, InternalCommand.class).execute(mock(ClientActorBehavior.class));
             return null;
         }).when(context).executeInActor(any(InternalCommand.class));
     }
@@ -81,10 +82,9 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
         verify(callback).accept(captor.capture());
         final Response<?, ?> value = captor.getValue();
-        Assert.assertTrue(value instanceof ReadTransactionSuccess);
+        assertTrue(value instanceof ReadTransactionSuccess);
         final ReadTransactionSuccess success = (ReadTransactionSuccess) value;
-        Assert.assertTrue(success.getData().isPresent());
-        Assert.assertEquals(DATA_1, success.getData().get());
+        assertEquals(Optional.of(DATA_1), success.getData());
     }
 
     @Test
@@ -99,16 +99,15 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final ArgumentCaptor<Response<?, ?>> captor = ArgumentCaptor.forClass(Response.class);
         verify(callback).accept(captor.capture());
         final Response<?, ?> value = captor.getValue();
-        Assert.assertTrue(value instanceof ExistsTransactionSuccess);
+        assertTrue(value instanceof ExistsTransactionSuccess);
         final ExistsTransactionSuccess success = (ExistsTransactionSuccess) value;
-        Assert.assertTrue(success.getExists());
+        assertTrue(success.getExists());
     }
 
     @Test
     public void testHandleForwardedRemotePurgeRequest() {
         final TestProbe probe = createProbe();
-        final TransactionPurgeRequest request =
-                new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
+        final TransactionPurgeRequest request = new TransactionPurgeRequest(TRANSACTION_ID, 0L, probe.ref());
         testHandleForwardedRemoteRequest(request);
     }
 
@@ -118,8 +117,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         final TestProbe probe = createProbe();
         final AbortLocalTransactionRequest request = new AbortLocalTransactionRequest(TRANSACTION_ID, probe.ref());
         final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
-        Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.ABORT, modifyRequest.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), modifyRequest.getPersistenceProtocol());
     }
 
     @Override
@@ -132,8 +130,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
         doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
         final ModifyTransactionRequest modifyRequest = testForwardToRemote(request, ModifyTransactionRequest.class);
         verify(modification).applyToCursor(any());
-        Assert.assertTrue(modifyRequest.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.THREE_PHASE, modifyRequest.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.THREE_PHASE), modifyRequest.getPersistenceProtocol());
         checkModifications(modifyRequest);
     }
 
@@ -152,7 +149,7 @@ public abstract class LocalProxyTransactionTest<T extends LocalProxyTransaction>
     }
 
     protected <R extends TransactionRequest<R>> R testForwardToLocal(final TransactionRequest<?> toForward,
-                                                                  final Class<R> expectedMessageClass) {
+                                                                     final Class<R> expectedMessageClass) {
         final Consumer<Response<?, ?>> callback = createCallbackMock();
         final TransactionTester<LocalReadWriteProxyTransaction> transactionTester = createLocalProxy();
         final LocalReadWriteProxyTransaction successor = transactionTester.getTransaction();
index 7a4fb742038a5ed6d15305b5437f1fe6565d1a20..651c7d2028591693e76dc7a4c508d779d1803068 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.when;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertOperationThrowsException;
 
@@ -14,16 +17,14 @@ import akka.testkit.TestProbe;
 import com.google.common.base.Ticker;
 import com.google.common.base.VerifyException;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ModifyTransactionRequestBuilder;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class LocalReadOnlyProxyTransactionTest extends LocalProxyTransactionTest<LocalReadOnlyProxyTransaction> {
-
     private DataTreeSnapshot snapshot;
 
     @Override
@@ -39,74 +40,76 @@ public class LocalReadOnlyProxyTransactionTest extends LocalProxyTransactionTest
 
     @Test
     public void testIsSnapshotOnly() {
-        Assert.assertTrue(transaction.isSnapshotOnly());
+        assertTrue(transaction.isSnapshotOnly());
     }
 
     @Test
     public void testReadOnlyView() {
-        Assert.assertEquals(snapshot, transaction.readOnlyView());
+        assertEquals(snapshot, transaction.readOnlyView());
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDirectCommit() {
-        transaction.directCommit();
+        assertThrows(UnsupportedOperationException.class, () -> transaction.directCommit());
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testCanCommit() {
-        transaction.canCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.canCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testPreCommit() {
-        transaction.preCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.preCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDoCommit() {
-        transaction.doCommit(new VotingFuture<>(new Object(), 1));
+        assertThrows(UnsupportedOperationException.class,
+            () -> transaction.doCommit(new VotingFuture<>(new Object(), 1)));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testDelete() {
-        transaction.delete(PATH_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.delete(PATH_1));
     }
 
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testMerge() {
-        transaction.merge(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.merge(PATH_1, DATA_1));
     }
 
+    @Test
     @Override
-    @Test(expected = UnsupportedOperationException.class)
     public void testWrite() {
-        transaction.write(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.write(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoDelete() {
-        transaction.doDelete(PATH_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doDelete(PATH_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoMerge() {
-        transaction.doMerge(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doMerge(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testDoWrite() {
-        transaction.doWrite(PATH_1, DATA_1);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.doWrite(PATH_1, DATA_1));
     }
 
-    @Test(expected = UnsupportedOperationException.class)
+    @Test
     public void testCommitRequest() {
-        transaction.commitRequest(true);
+        assertThrows(UnsupportedOperationException.class, () -> transaction.commitRequest(true));
     }
 
     @Test
index 2a38f183ef05443d7b7ccaf298e0c9290147f532..ad772d0703e25ceac187fd062362b2c06c6ef9cd 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -20,7 +22,6 @@ import com.google.common.base.Ticker;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.Optional;
 import java.util.function.Consumer;
-import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mock;
 import org.opendaylight.controller.cluster.access.commands.AbortLocalTransactionRequest;
@@ -40,9 +41,9 @@ import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.Response;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.CursorAwareDataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.CursorAwareDataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTest<LocalReadWriteProxyTransaction> {
     @Mock
@@ -60,12 +61,12 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
 
     @Test
     public void testIsSnapshotOnly() {
-        Assert.assertFalse(transaction.isSnapshotOnly());
+        assertFalse(transaction.isSnapshotOnly());
     }
 
     @Test
     public void testReadOnlyView() {
-        Assert.assertEquals(modification, transaction.readOnlyView());
+        assertEquals(modification, transaction.readOnlyView());
     }
 
     @Test
@@ -125,8 +126,8 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         transaction.doWrite(PATH_1, DATA_1);
         final boolean coordinated = true;
         final CommitLocalTransactionRequest request = transaction.commitRequest(coordinated);
-        Assert.assertEquals(coordinated, request.isCoordinated());
-        Assert.assertEquals(modification, request.getModification());
+        assertEquals(coordinated, request.isCoordinated());
+        assertEquals(modification, request.getModification());
     }
 
     @Test
@@ -141,7 +142,7 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
     public void testSealOnly() throws Exception {
         assertOperationThrowsException(() -> transaction.getSnapshot(), IllegalStateException.class);
         transaction.sealOnly();
-        Assert.assertEquals(modification, transaction.getSnapshot());
+        assertEquals(modification, transaction.getSnapshot());
     }
 
     @Test
@@ -150,7 +151,7 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         final RemoteProxyTransaction successor = transactionTester.getTransaction();
         doAnswer(LocalProxyTransactionTest::applyToCursorAnswer).when(modification).applyToCursor(any());
         transaction.sealOnly();
-        final TransactionRequest<?> request = transaction.flushState().get();
+        final TransactionRequest<?> request = transaction.flushState().orElseThrow();
         transaction.forwardToSuccessor(successor, request, null);
         verify(modification).applyToCursor(any());
         transactionTester.getTransaction().seal();
@@ -244,8 +245,8 @@ public class LocalReadWriteProxyTransactionTest extends LocalProxyTransactionTes
         verify(modification).delete(PATH_3);
         final CommitLocalTransactionRequest commitRequest =
                 getTester().expectTransactionRequest(CommitLocalTransactionRequest.class);
-        Assert.assertEquals(modification, commitRequest.getModification());
-        Assert.assertEquals(coordinated, commitRequest.isCoordinated());
+        assertEquals(modification, commitRequest.getModification());
+        assertEquals(coordinated, commitRequest.isCoordinated());
     }
 
 }
index f44cdf327d2d2f1a4088375545e02f00cf521e84..6ef398c78bb3a390520d03e930537eecb64868f1 100644 (file)
@@ -7,12 +7,13 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
@@ -21,17 +22,17 @@ import akka.actor.Status;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collections;
+import java.util.List;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientFailure;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientRequest;
 import org.opendaylight.controller.cluster.access.commands.ConnectClientSuccess;
@@ -49,9 +50,10 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutureCache;
 import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import scala.concurrent.Promise;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ModuleShardBackendResolverTest {
 
     private static final MemberName MEMBER_NAME = MemberName.forName("member-1");
@@ -73,20 +75,19 @@ public class ModuleShardBackendResolverTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         system = ActorSystem.apply();
         contextProbe = new TestProbe(system, "context");
 
         shardManagerProbe = new TestProbe(system, "ShardManager");
 
         final ActorUtils actorUtils = createActorUtilsMock(system, contextProbe.ref());
-        when(actorUtils.getShardManager()).thenReturn(shardManagerProbe.ref());
+        doReturn(shardManagerProbe.ref()).when(actorUtils).getShardManager();
 
         moduleShardBackendResolver = new ModuleShardBackendResolver(CLIENT_ID, actorUtils);
-        when(actorUtils.getShardStrategyFactory()).thenReturn(shardStrategyFactory);
-        when(shardStrategyFactory.getStrategy(YangInstanceIdentifier.EMPTY)).thenReturn(shardStrategy);
+        doReturn(shardStrategyFactory).when(actorUtils).getShardStrategyFactory();
+        doReturn(shardStrategy).when(shardStrategyFactory).getStrategy(YangInstanceIdentifier.of());
         final PrimaryShardInfoFutureCache cache = new PrimaryShardInfoFutureCache();
-        when(actorUtils.getPrimaryShardInfoCache()).thenReturn(cache);
+        doReturn(cache).when(actorUtils).getPrimaryShardInfoCache();
     }
 
     @After
@@ -96,16 +97,16 @@ public class ModuleShardBackendResolverTest {
 
     @Test
     public void testResolveShardForPathNonNullCookie() {
-        when(shardStrategy.findShard(YangInstanceIdentifier.EMPTY)).thenReturn(DefaultShardStrategy.DEFAULT_SHARD);
-        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.EMPTY);
-        Assert.assertEquals(0L, cookie.longValue());
+        doReturn(DefaultShardStrategy.DEFAULT_SHARD).when(shardStrategy).findShard(YangInstanceIdentifier.of());
+        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
+        assertEquals(0L, (long) cookie);
     }
 
     @Test
     public void testResolveShardForPathNullCookie() {
-        when(shardStrategy.findShard(YangInstanceIdentifier.EMPTY)).thenReturn("foo");
-        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.EMPTY);
-        Assert.assertEquals(1L, cookie.longValue());
+        doReturn("foo").when(shardStrategy).findShard(YangInstanceIdentifier.of());
+        final Long cookie = moduleShardBackendResolver.resolveShardForPath(YangInstanceIdentifier.of());
+        assertEquals(1L, (long) cookie);
     }
 
     @Test
@@ -114,13 +115,13 @@ public class ModuleShardBackendResolverTest {
         contextProbe.expectMsgClass(ConnectClientRequest.class);
         final TestProbe backendProbe = new TestProbe(system, "backend");
         final ConnectClientSuccess msg = new ConnectClientSuccess(CLIENT_ID, 0L, backendProbe.ref(),
-                Collections.emptyList(), dataTree, 3);
+                List.of(), dataTree, 3);
         contextProbe.reply(msg);
         final CompletionStage<ShardBackendInfo> stage = moduleShardBackendResolver.getBackendInfo(0L);
         final ShardBackendInfo shardBackendInfo = TestUtils.getWithTimeout(stage.toCompletableFuture());
-        Assert.assertEquals(0L, shardBackendInfo.getCookie().longValue());
-        Assert.assertEquals(dataTree, shardBackendInfo.getDataTree().get());
-        Assert.assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shardBackendInfo.getName());
+        assertEquals(0L, shardBackendInfo.getCookie().longValue());
+        assertEquals(dataTree, shardBackendInfo.getDataTree().orElseThrow());
+        assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shardBackendInfo.getName());
     }
 
     @Test
@@ -134,7 +135,7 @@ public class ModuleShardBackendResolverTest {
         final ExecutionException caught =
                 TestUtils.assertOperationThrowsException(() -> TestUtils.getWithTimeout(stage.toCompletableFuture()),
                         ExecutionException.class);
-        Assert.assertEquals(cause, caught.getCause());
+        assertEquals(cause, caught.getCause());
     }
 
     @Test
@@ -144,7 +145,7 @@ public class ModuleShardBackendResolverTest {
         contextProbe.expectMsgClass(ConnectClientRequest.class);
         final TestProbe staleBackendProbe = new TestProbe(system, "staleBackend");
         final ConnectClientSuccess msg = new ConnectClientSuccess(CLIENT_ID, 0L, staleBackendProbe.ref(),
-                Collections.emptyList(), dataTree, 3);
+                List.of(), dataTree, 3);
         contextProbe.reply(msg);
         //get backend info
         final ShardBackendInfo staleBackendInfo = TestUtils.getWithTimeout(backendInfo.toCompletableFuture());
@@ -155,11 +156,11 @@ public class ModuleShardBackendResolverTest {
         contextProbe.expectMsgClass(ConnectClientRequest.class);
         final TestProbe refreshedBackendProbe = new TestProbe(system, "refreshedBackend");
         final ConnectClientSuccess msg2 = new ConnectClientSuccess(CLIENT_ID, 1L, refreshedBackendProbe.ref(),
-                Collections.emptyList(), dataTree, 3);
+                List.of(), dataTree, 3);
         contextProbe.reply(msg2);
         final ShardBackendInfo refreshedBackendInfo = TestUtils.getWithTimeout(refreshed.toCompletableFuture());
-        Assert.assertEquals(staleBackendInfo.getCookie(), refreshedBackendInfo.getCookie());
-        Assert.assertEquals(refreshedBackendProbe.ref(), refreshedBackendInfo.getActor());
+        assertEquals(staleBackendInfo.getCookie(), refreshedBackendInfo.getCookie());
+        assertEquals(refreshedBackendProbe.ref(), refreshedBackendInfo.getActor());
     }
 
     @SuppressWarnings("unchecked")
@@ -174,7 +175,7 @@ public class ModuleShardBackendResolverTest {
         final Registration callbackReg = moduleShardBackendResolver.notifyWhenBackendInfoIsStale(mockCallback);
 
         regMessage.getCallback().accept(DefaultShardStrategy.DEFAULT_SHARD);
-        verify(mockCallback, timeout(5000)).accept(Long.valueOf(0));
+        verify(mockCallback, timeout(5000)).accept((long) 0);
 
         reset(mockCallback);
         callbackReg.close();
@@ -190,8 +191,7 @@ public class ModuleShardBackendResolverTest {
         final ActorSelection selection = system.actorSelection(actor.path());
         final PrimaryShardInfo shardInfo = new PrimaryShardInfo(selection, (short) 0);
         promise.success(shardInfo);
-        when(mock.findPrimaryShardAsync(DefaultShardStrategy.DEFAULT_SHARD)).thenReturn(promise.future());
-        when(mock.getClientDispatcher()).thenReturn(system.dispatchers().defaultGlobalDispatcher());
+        doReturn(promise.future()).when(mock).findPrimaryShardAsync(DefaultShardStrategy.DEFAULT_SHARD);
         return mock;
     }
 }
index 2731b7d315700a6af8d032161325ce77e56ea755..41847973fb04f6532b6b9d25dbbc3cf38eee86e9 100644 (file)
@@ -10,6 +10,8 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 import static org.hamcrest.CoreMatchers.both;
 import static org.hamcrest.CoreMatchers.hasItem;
 import static org.hamcrest.CoreMatchers.isA;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
 import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.assertFutureEquals;
 
 import akka.testkit.TestProbe;
@@ -17,7 +19,6 @@ import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.List;
 import java.util.Optional;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.commands.ExistsTransactionRequest;
 import org.opendaylight.controller.cluster.access.commands.ExistsTransactionSuccess;
@@ -39,13 +40,13 @@ import org.opendaylight.controller.cluster.access.commands.TransactionWrite;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
 
 public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<RemoteProxyTransaction> {
-
     @Override
     protected RemoteProxyTransaction createTransaction(final ProxyHistory parent, final TransactionIdentifier id,
                                                        final DataTreeSnapshot snapshot) {
+        mockForRemote();
         return new RemoteProxyTransaction(parent, TRANSACTION_ID, false, false, false);
     }
 
@@ -64,9 +65,9 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
     @Test
     public void testRead() throws Exception {
         final TransactionTester<RemoteProxyTransaction> tester = getTester();
-        final FluentFuture<Optional<NormalizedNode<?, ?>>> read = transaction.read(PATH_2);
+        final FluentFuture<Optional<NormalizedNode>> read = transaction.read(PATH_2);
         final ReadTransactionRequest req = tester.expectTransactionRequest(ReadTransactionRequest.class);
-        final Optional<NormalizedNode<?, ?>> result = Optional.of(DATA_1);
+        final Optional<NormalizedNode> result = Optional.of(DATA_1);
         tester.replySuccess(new ReadTransactionSuccess(TRANSACTION_ID, req.getSequence(), result));
         assertFutureEquals(result, read);
     }
@@ -99,8 +100,7 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final ListenableFuture<Boolean> result = transaction.directCommit();
         final TransactionTester<RemoteProxyTransaction> tester = getTester();
         final ModifyTransactionRequest req = tester.expectTransactionRequest(ModifyTransactionRequest.class);
-        Assert.assertTrue(req.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.SIMPLE, req.getPersistenceProtocol().get());
+        assertEquals(Optional.of(PersistenceProtocol.SIMPLE), req.getPersistenceProtocol());
         tester.replySuccess(new TransactionCommitSuccess(TRANSACTION_ID, req.getSequence()));
         assertFutureEquals(true, result);
     }
@@ -150,9 +150,9 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         builder.setCommit(false);
         final ModifyTransactionRequest request = builder.build();
         final ModifyTransactionRequest received = testForwardToRemote(request, ModifyTransactionRequest.class);
-        Assert.assertEquals(request.getPersistenceProtocol(), received.getPersistenceProtocol());
-        Assert.assertEquals(request.getModifications(), received.getModifications());
-        Assert.assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getPersistenceProtocol(), received.getPersistenceProtocol());
+        assertEquals(request.getModifications(), received.getModifications());
+        assertEquals(request.getTarget(), received.getTarget());
     }
 
     @Test
@@ -164,9 +164,9 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         builder.setCommit(true);
         final ModifyTransactionRequest request = builder.build();
         final ModifyTransactionRequest received = testForwardToRemote(request, ModifyTransactionRequest.class);
-        Assert.assertEquals(request.getPersistenceProtocol(), received.getPersistenceProtocol());
-        Assert.assertEquals(request.getModifications(), received.getModifications());
-        Assert.assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getPersistenceProtocol(), received.getPersistenceProtocol());
+        assertEquals(request.getModifications(), received.getModifications());
+        assertEquals(request.getTarget(), received.getTarget());
     }
 
     @Test
@@ -178,9 +178,8 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         builder.setAbort();
         final ModifyTransactionRequest request = builder.build();
         final ModifyTransactionRequest received = testForwardToRemote(request, ModifyTransactionRequest.class);
-        Assert.assertEquals(request.getTarget(), received.getTarget());
-        Assert.assertTrue(received.getPersistenceProtocol().isPresent());
-        Assert.assertEquals(PersistenceProtocol.ABORT, received.getPersistenceProtocol().get());
+        assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(Optional.of(PersistenceProtocol.ABORT), received.getPersistenceProtocol());
     }
 
     @Test
@@ -189,8 +188,8 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final ReadTransactionRequest request =
                 new ReadTransactionRequest(TRANSACTION_ID, 0L, probe.ref(), PATH_1, false);
         final ReadTransactionRequest received = testForwardToRemote(request, ReadTransactionRequest.class);
-        Assert.assertEquals(request.getTarget(), received.getTarget());
-        Assert.assertEquals(request.getPath(), received.getPath());
+        assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getPath(), received.getPath());
     }
 
     @Test
@@ -199,8 +198,8 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final ExistsTransactionRequest request =
                 new ExistsTransactionRequest(TRANSACTION_ID, 0L, probe.ref(), PATH_1, false);
         final ExistsTransactionRequest received = testForwardToRemote(request, ExistsTransactionRequest.class);
-        Assert.assertEquals(request.getTarget(), received.getTarget());
-        Assert.assertEquals(request.getPath(), received.getPath());
+        assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getPath(), received.getPath());
     }
 
     @Test
@@ -209,7 +208,7 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final TransactionPreCommitRequest request =
                 new TransactionPreCommitRequest(TRANSACTION_ID, 0L, probe.ref());
         final TransactionPreCommitRequest received = testForwardToRemote(request, TransactionPreCommitRequest.class);
-        Assert.assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getTarget(), received.getTarget());
     }
 
     @Test
@@ -218,18 +217,16 @@ public class RemoteProxyTransactionTest extends AbstractProxyTransactionTest<Rem
         final TransactionDoCommitRequest request =
                 new TransactionDoCommitRequest(TRANSACTION_ID, 0L, probe.ref());
         final TransactionDoCommitRequest received = testForwardToRemote(request, TransactionDoCommitRequest.class);
-        Assert.assertEquals(request.getTarget(), received.getTarget());
+        assertEquals(request.getTarget(), received.getTarget());
     }
 
 
-    private <T extends TransactionModification> void testModification(final Runnable modification,
-                                                                      final Class<T> cls,
-                                                                      final YangInstanceIdentifier expectedPath) {
+    private <T extends TransactionModification> void testModification(final Runnable modification, final Class<T> cls,
+            final YangInstanceIdentifier expectedPath) {
         modification.run();
         final ModifyTransactionRequest request = transaction.commitRequest(false);
         final List<TransactionModification> modifications = request.getModifications();
-        Assert.assertEquals(1, modifications.size());
-        Assert.assertThat(modifications, hasItem(both(isA(cls)).and(hasPath(expectedPath))));
+        assertEquals(1, modifications.size());
+        assertThat(modifications, hasItem(both(isA(cls)).and(hasPath(expectedPath))));
     }
-
 }
index 361c9267e08419082a48595ee9542e881b197596..1d68e979dbd5b25bc681de38555e34c1e194e0ef 100644 (file)
@@ -7,23 +7,28 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
-import static org.opendaylight.controller.cluster.databroker.actors.dds.TestUtils.CLIENT_ID;
+import static org.hamcrest.CoreMatchers.startsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorSystem;
 import akka.testkit.TestProbe;
 import akka.testkit.javadsl.TestKit;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.client.ClientActorContext;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleClientHistory> {
     private ActorSystem system;
     private AbstractDataStoreClientBehavior behavior;
@@ -35,14 +40,12 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         system = ActorSystem.apply();
 
         final TestProbe clientContextProbe = new TestProbe(system, "client");
         final TestProbe actorContextProbe = new TestProbe(system, "actor-context");
         clientActorContext = AccessClientUtil.createClientActorContext(
-                system, clientContextProbe.ref(), CLIENT_ID, PERSISTENCE_ID);
+                system, clientContextProbe.ref(), TestUtils.CLIENT_ID, PERSISTENCE_ID);
         final ActorUtils actorUtilsMock = createActorUtilsMock(system, actorContextProbe.ref());
         behavior = new SimpleDataStoreClientBehavior(clientActorContext, actorUtilsMock, SHARD_NAME);
 
@@ -68,7 +71,7 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
     @Test
     public void testDoCreateTransaction() {
         final ClientTransaction clientTransaction = object().doCreateTransaction();
-        Assert.assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
+        assertEquals(object().getIdentifier(), clientTransaction.getIdentifier().getHistoryId());
     }
 
     @Override
@@ -76,14 +79,14 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
     public void testCreateHistoryProxy() {
         final AbstractClientConnection<ShardBackendInfo> clientConnection = behavior.getConnection(0L);
         final ProxyHistory historyProxy = object().createHistoryProxy(HISTORY_ID, clientConnection);
-        Assert.assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
+        assertEquals(object().getIdentifier(), historyProxy.getIdentifier());
     }
 
     @Override
     @Test
     public void testDoCreateSnapshot() {
         final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
-        Assert.assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
+        assertEquals(new TransactionIdentifier(object().getIdentifier(), object().nextTx()).getHistoryId(),
                 clientSnapshot.getIdentifier().getHistoryId());
     }
 
@@ -97,14 +100,14 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
         object().onTransactionComplete(transaction.getIdentifier());
         // it is possible to make transaction ready again
         final AbstractTransactionCommitCohort result = object().onTransactionReady(transaction, cohort);
-        Assert.assertEquals(result, cohort);
+        assertEquals(result, cohort);
     }
 
     @Override
     @Test
     public void testOnTransactionAbort() {
         final ClientSnapshot clientSnapshot = object().doCreateSnapshot();
-        Assert.assertTrue(clientSnapshot.abort());
+        assertTrue(clientSnapshot.abort());
     }
 
     @Override
@@ -112,14 +115,16 @@ public class SingleClientHistoryTest extends AbstractClientHistoryTest<SingleCli
     public void testOnTransactionReady() {
         final AbstractTransactionCommitCohort result = object().onTransactionReady(
                 object().createTransaction(), cohort);
-        Assert.assertEquals(result, cohort);
+        assertEquals(result, cohort);
     }
 
     @Override
-    @Test(expected = IllegalStateException.class)
+    @Test
     public void testOnTransactionReadyDuplicate() {
         final ClientTransaction transaction = object().createTransaction();
         object().onTransactionReady(transaction, cohort);
-        object().onTransactionReady(transaction, cohort);
+        final IllegalStateException ise = assertThrows(IllegalStateException.class,
+            () -> object().onTransactionReady(transaction, cohort));
+        assertThat(ise.getMessage(), startsWith("Duplicate cohort "));
     }
 }
\ No newline at end of file
index 092d262ae8fe034ccc83d073a4b585a961a3713f..f4e0be9c3d9f64b5f8607732da2b6d4ef93bbad0 100644 (file)
@@ -7,9 +7,10 @@
  */
 package org.opendaylight.controller.cluster.databroker.actors.dds;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
@@ -45,8 +46,8 @@ final class TestUtils {
      * @param <T>      type
      * @throws Exception exception
      */
-    static <T> void assertFutureEquals(final T expected, final Future<T> actual) throws Exception {
-        Assert.assertEquals(expected, getWithTimeout(actual));
+    static <T> void assertFutureEquals(final T expected, final Future<? extends T> actual) throws Exception {
+        assertEquals(expected, getWithTimeout(actual));
     }
 
     /**
index e52012b459e44035e12f0f1fba1f9367991d5e02..f9fb3b830d1b5eb53401b66c6d99efad6e3d53c0 100644 (file)
@@ -9,14 +9,13 @@ package org.opendaylight.controller.cluster.databroker.actors.dds;
 
 import akka.actor.ActorRef;
 import akka.testkit.TestProbe;
-import javax.annotation.Nonnull;
+import org.eclipse.jdt.annotation.NonNull;
 import org.junit.Assert;
 import org.opendaylight.controller.cluster.access.ABIVersion;
 import org.opendaylight.controller.cluster.access.client.AbstractClientConnection;
 import org.opendaylight.controller.cluster.access.client.AccessClientUtil;
 import org.opendaylight.controller.cluster.access.commands.TransactionFailure;
 import org.opendaylight.controller.cluster.access.commands.TransactionRequest;
-import org.opendaylight.controller.cluster.access.concepts.AbstractRequestFailureProxy;
 import org.opendaylight.controller.cluster.access.concepts.FailureEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
@@ -84,20 +83,19 @@ class TransactionTester<T extends AbstractProxyTransaction> {
     private static class MockFailure extends RequestFailure<TransactionIdentifier, TransactionFailure> {
         private static final long serialVersionUID = 1L;
 
-        MockFailure(@Nonnull final TransactionIdentifier target, final long sequence,
-                            @Nonnull final RequestException cause) {
+        MockFailure(final @NonNull TransactionIdentifier target, final long sequence,
+                            final @NonNull RequestException cause) {
             super(target, sequence, cause);
         }
 
-        @Nonnull
         @Override
-        protected TransactionFailure cloneAsVersion(@Nonnull final ABIVersion targetVersion) {
+        protected TransactionFailure cloneAsVersion(final ABIVersion targetVersion) {
             throw new UnsupportedOperationException("Not implemented");
         }
 
         @Override
-        protected AbstractRequestFailureProxy<TransactionIdentifier, TransactionFailure> externalizableProxy(
-                @Nonnull final ABIVersion version) {
+        protected RequestFailure.SerialForm<TransactionIdentifier, TransactionFailure> externalizableProxy(
+                final ABIVersion version) {
             throw new UnsupportedOperationException("Not implemented");
         }
     }
index 62986b2ebfe86f57a378d973e1be36e01eb27c29..9f19ca045d6144639ecdcfb19da7301013036775 100644 (file)
@@ -7,14 +7,16 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.runners.Parameterized.Parameter;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
 
@@ -22,10 +24,10 @@ import akka.actor.ActorSystem;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -36,18 +38,22 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
+import org.junit.Ignore;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.junit.runners.Parameterized.Parameter;
 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
+import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
 import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
+import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
@@ -57,29 +63,29 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 public abstract class AbstractDistributedDataStoreIntegrationTest {
-
     @Parameter
-    public Class<? extends AbstractDataStore> testParameter;
+    public Class<? extends ClientBackedDataStore> testParameter;
 
     protected ActorSystem system;
 
@@ -93,8 +99,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testWriteTransactionWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "transactionIntegrationTest", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "transactionIntegrationTest", "test-1")) {
 
             testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
@@ -109,8 +114,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testWriteTransactionWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testWriteTransactionWithMultipleShards",
+            "cars-1", "people-1")) {
 
             DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
@@ -129,7 +134,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             writeTx = dataStore.newWriteOnlyTransaction();
 
-            final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
             writeTx.write(carPath, car);
 
@@ -142,21 +147,16 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // Verify the data in the store
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            Optional<NormalizedNode<?, ?>> optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testReadWriteTransactionWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testReadWriteTransactionWithSingleShard", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithSingleShard",
+            "test-1")) {
 
             // 1. Create a read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
@@ -164,16 +164,14 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             // 2. Write some data
             final YangInstanceIdentifier nodePath = TestModel.TEST_PATH;
-            final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+            final NormalizedNode nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
             readWriteTx.write(nodePath, nodeToWrite);
 
             // 3. Read the data from Tx
             final Boolean exists = readWriteTx.exists(nodePath).get(5, TimeUnit.SECONDS);
             assertEquals("exists", Boolean.TRUE, exists);
 
-            Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", nodeToWrite, optional.get());
+            assertEquals(Optional.of(nodeToWrite), readWriteTx.read(nodePath).get(5, TimeUnit.SECONDS));
 
             // 4. Ready the Tx for commit
             final DOMStoreThreePhaseCommitCohort cohort = readWriteTx.ready();
@@ -184,17 +182,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // 6. Verify the data in the store
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", nodeToWrite, optional.get());
+            assertEquals(Optional.of(nodeToWrite), readTx.read(nodePath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testReadWriteTransactionWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testReadWriteTransactionWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testReadWriteTransactionWithMultipleShards",
+            "cars-1", "people-1")) {
 
             DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
@@ -213,7 +209,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             readWriteTx = dataStore.newReadWriteTransaction();
 
-            final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
             readWriteTx.write(carPath, car);
 
@@ -224,52 +220,67 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final Boolean exists = readWriteTx.exists(carPath).get(5, TimeUnit.SECONDS);
             assertEquals("exists", Boolean.TRUE, exists);
 
-            Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
+            assertEquals("Data node", Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
 
             testKit.doCommit(readWriteTx.ready());
 
             // Verify the data in the store
             DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
-        final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testSingleTransactionsWritesInQuickSuccession", "cars-1")) {
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = testKit.setupDataStore(testParameter, "testSingleTransactionsWritesInQuickSuccession",
+            "cars-1")) {
 
-            final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
+            final var txChain = dataStore.createTransactionChain();
 
-            DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+            var writeTx = txChain.newWriteOnlyTransaction();
             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
             writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
             testKit.doCommit(writeTx.ready());
 
-            writeTx = txChain.newWriteOnlyTransaction();
-
             int numCars = 5;
             for (int i = 0; i < numCars; i++) {
-                writeTx.write(CarsModel.newCarPath("car" + i),
-                    CarsModel.newCarEntry("car" + i, BigInteger.valueOf(20000)));
+                writeTx = txChain.newWriteOnlyTransaction();
+                writeTx.write(CarsModel.newCarPath("car" + i), CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
+
+                testKit.doCommit(writeTx.ready());
+
+                try (var tx = txChain.newReadOnlyTransaction()) {
+                    tx.read(CarsModel.BASE_PATH).get();
+                }
             }
 
-            testKit.doCommit(writeTx.ready());
+            // wait to let the shard catch up with purged
+            await("transaction state propagation").atMost(5, TimeUnit.SECONDS)
+                .pollInterval(500, TimeUnit.MILLISECONDS)
+                .untilAsserted(() -> {
+                    // verify frontend metadata has no holes in purged transactions causing overtime memory leak
+                    final var localShard = dataStore.getActorUtils().findLocalShard("cars-1") .orElseThrow();
+                    final var frontendMetadata = (FrontendShardDataTreeSnapshotMetadata) dataStore.getActorUtils()
+                            .executeOperation(localShard, new RequestFrontendMetadata());
+
+                    final var clientMeta = frontendMetadata.getClients().get(0);
+                    final var iterator = clientMeta.getCurrentHistories().iterator();
+                    var metadata = iterator.next();
+                    while (iterator.hasNext() && metadata.getHistoryId() != 1) {
+                        metadata = iterator.next();
+                    }
+                    assertEquals("[[0..10]]", metadata.getPurgedTransactions().ranges().toString());
+                });
 
-            final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
-                    .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+            final var body = txChain.newReadOnlyTransaction().read(CarsModel.CAR_LIST_PATH)
+                .get(5, TimeUnit.SECONDS)
+                .orElseThrow()
+                .body();
+            assertThat(body, instanceOf(Collection.class));
+            assertEquals("# cars", numCars, ((Collection<?>) body).size());
         }
     }
 
@@ -289,8 +300,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         datastoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(1)
         .shardInitializationTimeout(200, TimeUnit.MILLISECONDS).frontendRequestTimeoutInSeconds(2);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             final Object result = dataStore.getActorUtils().executeOperation(
                 dataStore.getActorUtils().getShardManager(), new FindLocalShard(shardName, true));
             assertTrue("Expected LocalShardFound. Actual: " + result, result instanceof LocalShardFound);
@@ -335,18 +345,10 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
                 // leader was elected in time, the Tx
                 // should have timed out and throw an appropriate
                 // exception cause.
-                try {
-                    txCohort.get().canCommit().get(10, TimeUnit.SECONDS);
-                    fail("Expected NoShardLeaderException");
-                } catch (final ExecutionException e) {
-                    final String msg = "Unexpected exception: "
-                            + Throwables.getStackTraceAsString(e.getCause());
-                    if (DistributedDataStore.class.equals(testParameter)) {
-                        assertTrue(Throwables.getRootCause(e) instanceof NoShardLeaderException);
-                    } else {
-                        assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
-                    }
-                }
+                final var ex = assertThrows(ExecutionException.class,
+                    () -> txCohort.get().canCommit().get(10, TimeUnit.SECONDS));
+                assertTrue("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+                    Throwables.getRootCause(ex) instanceof RequestTimeoutException);
             } finally {
                 try {
                     if (writeTxToClose != null) {
@@ -374,8 +376,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testTransactionAbort() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "transactionAbortIntegrationTest", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "transactionAbortIntegrationTest", "test-1")) {
 
             final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
@@ -397,8 +398,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @SuppressWarnings("checkstyle:IllegalCatch")
     public void testTransactionChainWithSingleShard() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testTransactionChainWithSingleShard", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithSingleShard", "test-1")) {
 
             // 1. Create a Tx chain and write-only Tx
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
@@ -407,7 +407,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             assertNotNull("newWriteOnlyTransaction returned null", writeTx);
 
             // 2. Write some data
-            final NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+            final NormalizedNode testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
             writeTx.write(TestModel.TEST_PATH, testNode);
 
             // 3. Ready the Tx for commit
@@ -433,9 +433,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // the data from the first
             // Tx is visible after being readied.
             DOMStoreReadTransaction readTx = txChain.newReadOnlyTransaction();
-            Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", testNode, optional.get());
+            assertEquals(Optional.of(testNode), readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS));
 
             // 6. Create a new RW Tx from the chain, write more data,
             // and ready it
@@ -451,9 +449,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // from the last RW Tx to
             // verify it is visible.
             readTx = txChain.newReadWriteTransaction();
-            optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", outerNode, optional.get());
+            assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
 
             // 8. Wait for the 2 commits to complete and close the
             // chain.
@@ -471,17 +467,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             // 9. Create a new read Tx from the data store and verify
             // committed data.
             readTx = dataStore.newReadOnlyTransaction();
-            optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", outerNode, optional.get());
+            assertEquals(Optional.of(outerNode), readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testTransactionChainWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testTransactionChainWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testTransactionChainWithMultipleShards",
+            "cars-1", "people-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -498,7 +492,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
 
-            final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
             readWriteTx.write(carPath, car);
 
@@ -506,13 +500,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
             readWriteTx.merge(personPath, person);
 
-            Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", car, optional.get());
-
-            optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readWriteTx.read(personPath).get(5, TimeUnit.SECONDS));
 
             final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
 
@@ -533,28 +522,23 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            optional = readTx.read(carPath).get(5, TimeUnit.SECONDS);
-            assertFalse("isPresent", optional.isPresent());
-
-            optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", person, optional.get());
+            assertEquals(Optional.empty(), readTx.read(carPath).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(person), readTx.read(personPath).get(5, TimeUnit.SECONDS));
         }
     }
 
     @Test
     public void testCreateChainedTransactionsInQuickSuccession() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionsInQuickSuccession", "cars-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionsInQuickSuccession",
+            "cars-1")) {
 
             final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            DOMTransactionChain txChain = broker.createTransactionChain();
 
             final List<ListenableFuture<?>> futures = new ArrayList<>();
 
@@ -568,7 +552,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
                 final DOMDataTreeReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
 
                 rwTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.newCarPath("car" + i),
-                    CarsModel.newCarEntry("car" + i, BigInteger.valueOf(20000)));
+                    CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
 
                 futures.add(rwTx.commit());
             }
@@ -577,10 +561,10 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
                 f.get(5, TimeUnit.SECONDS);
             }
 
-            final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+            final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
                     .read(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
             assertTrue("isPresent", optional.isPresent());
-            assertEquals("# cars", numCars, ((Collection<?>) optional.get().getValue()).size());
+            assertEquals("# cars", numCars, ((Collection<?>) optional.orElseThrow().body()).size());
 
             txChain.close();
 
@@ -591,8 +575,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionAfterEmptyTxReadied() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionAfterEmptyTxReadied", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterEmptyTxReadied",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -602,7 +586,7 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
             final DOMStoreReadWriteTransaction rwTx2 = txChain.newReadWriteTransaction();
 
-            final Optional<NormalizedNode<?, ?>> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+            final Optional<NormalizedNode> optional = rwTx2.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
             assertFalse("isPresent", optional.isPresent());
 
             txChain.close();
@@ -612,8 +596,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionWhenPreviousNotReady() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionWhenPreviousNotReady", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionWhenPreviousNotReady",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -632,8 +616,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testCreateChainedTransactionAfterClose() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testCreateChainedTransactionAfterClose", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testCreateChainedTransactionAfterClose",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
             txChain.close();
@@ -647,8 +631,8 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testChainWithReadOnlyTxAfterPreviousReady() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainWithReadOnlyTxAfterPreviousReady", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainWithReadOnlyTxAfterPreviousReady",
+            "test-1")) {
 
             final DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -658,10 +642,10 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
 
             // Create read-only tx's and issue a read.
-            FluentFuture<Optional<NormalizedNode<?, ?>>> readFuture1 = txChain
+            FluentFuture<Optional<NormalizedNode>> readFuture1 = txChain
                     .newReadOnlyTransaction().read(TestModel.TEST_PATH);
 
-            FluentFuture<Optional<NormalizedNode<?, ?>>> readFuture2 = txChain
+            FluentFuture<Optional<NormalizedNode>> readFuture2 = txChain
                     .newReadOnlyTransaction().read(TestModel.TEST_PATH);
 
             // Create another write tx and issue the write.
@@ -689,38 +673,34 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
 
     @Test
     public void testChainedTransactionFailureWithSingleShard() throws Exception {
-        final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainedTransactionFailureWithSingleShard", "cars-1")) {
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithSingleShard",
+            "cars-1")) {
 
-            final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
+            final var broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            final var listener = mock(FutureCallback.class);
+            final var txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
-            final DOMDataTreeReadWriteTransaction writeTx = txChain.newReadWriteTransaction();
+            final var writeTx = txChain.newReadWriteTransaction();
 
             writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
                 PeopleModel.emptyContainer());
 
-            final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                    .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            final var invalidData = Builders.containerBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                    .build();
 
             writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
 
-            try {
-                writeTx.commit().get(5, TimeUnit.SECONDS);
-                fail("Expected TransactionCommitFailedException");
-            } catch (final ExecutionException e) {
-                // Expected
-            }
+            assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
 
-            verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
-                any(Throwable.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
             txChain.close();
             broker.close();
@@ -730,40 +710,36 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testChainedTransactionFailureWithMultipleShards", "cars-1", "people-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testChainedTransactionFailureWithMultipleShards",
+            "cars-1", "people-1")) {
 
             final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
                 ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.CONFIGURATION, dataStore).build(),
                 MoreExecutors.directExecutor());
 
-            final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-            final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+            final var listener = mock(FutureCallback.class);
+            final DOMTransactionChain txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
             final DOMDataTreeWriteTransaction writeTx = txChain.newReadWriteTransaction();
 
             writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH,
                 PeopleModel.emptyContainer());
 
-            final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                    .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            final ContainerNode invalidData = Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                .build();
 
             writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
 
             // Note that merge will validate the data and fail but put
             // succeeds b/c deep validation is not
             // done for put for performance reasons.
-            try {
-                writeTx.commit().get(5, TimeUnit.SECONDS);
-                fail("Expected TransactionCommitFailedException");
-            } catch (final ExecutionException e) {
-                // Expected
-            }
+            assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
 
-            verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx),
-                any(Throwable.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
             txChain.close();
             broker.close();
@@ -773,16 +749,15 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
     @Test
     public void testDataTreeChangeListenerRegistration() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, "testDataTreeChangeListenerRegistration", "test-1")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testDataTreeChangeListenerRegistration",
+            "test-1")) {
 
             testKit.testWriteTransaction(dataStore, TestModel.TEST_PATH,
                 ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
             final MockDataTreeChangeListener listener = new MockDataTreeChangeListener(1);
 
-            ListenerRegistration<MockDataTreeChangeListener> listenerReg = dataStore
-                    .registerTreeChangeListener(TestModel.TEST_PATH, listener);
+            final var listenerReg = dataStore.registerTreeChangeListener(TestModel.TEST_PATH, listener);
 
             assertNotNull("registerTreeChangeListener returned null", listenerReg);
 
@@ -828,13 +803,13 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         final String name = "transactionIntegrationTest";
 
         final ContainerNode carsNode = CarsModel.newCarsNode(
-            CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000L)),
-                CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000L))));
+            CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000)),
+                CarsModel.newCarEntry("sportage", Uint64.valueOf(30000))));
 
         DataTree dataTree = new InMemoryDataTreeFactory().create(
             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.full());
         AbstractShardTest.writeToStore(dataTree, CarsModel.BASE_PATH, carsNode);
-        NormalizedNode<?, ?> root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.EMPTY);
+        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         final Snapshot carsSnapshot = Snapshot.create(
             new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -843,10 +818,10 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
         dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
             SchemaContextHelper.full());
 
-        final NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+        final NormalizedNode peopleNode = PeopleModel.create();
         AbstractShardTest.writeToStore(dataTree, PeopleModel.BASE_PATH, peopleNode);
 
-        root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.EMPTY);
+        root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         final Snapshot peopleSnapshot = Snapshot.create(
             new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -856,19 +831,68 @@ public abstract class AbstractDistributedDataStoreIntegrationTest {
             new DatastoreSnapshot.ShardSnapshot("cars", carsSnapshot),
             new DatastoreSnapshot.ShardSnapshot("people", peopleSnapshot)));
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, name, "module-shards-member1.conf", true, "cars", "people")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, name, "module-shards-member1.conf", true,
+            "cars", "people")) {
 
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
             // two reads
-            Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", carsNode, optional.get());
+            assertEquals(Optional.of(carsNode), readTx.read(CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS));
+            assertEquals(Optional.of(peopleNode), readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS));
+        }
+    }
 
-            optional = readTx.read(PeopleModel.BASE_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("Data node", peopleNode, optional.get());
+    @Test
+    @Ignore("ClientBackedDatastore does not have stable indexes/term, the snapshot index seems to fluctuate")
+    // FIXME: re-enable this test
+    public void testSnapshotOnRootOverwrite() throws Exception {
+        final var testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder.snapshotOnRootOverwrite(true));
+        try (var dataStore = testKit.setupDataStore(testParameter, "testRootOverwrite",
+            "module-shards-default-cars-member1.conf", true, "cars", "default")) {
+
+            final var rootNode = Builders.containerBuilder()
+                .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
+                .withChild(CarsModel.create())
+                .build();
+
+            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
+            IntegrationTestKit.verifyShardState(dataStore, "cars",
+                state -> assertEquals(1, state.getSnapshotIndex()));
+
+            // root has been written expect snapshot at index 0
+            verifySnapshot("member-1-shard-cars-testRootOverwrite", 1, 1);
+
+            for (int i = 0; i < 10; i++) {
+                testKit.testWriteTransaction(dataStore, CarsModel.newCarPath("car " + i),
+                    CarsModel.newCarEntry("car " + i, Uint64.ONE));
+            }
+
+            // fake snapshot causes the snapshotIndex to move
+            IntegrationTestKit.verifyShardState(dataStore, "cars",
+                state -> assertEquals(10, state.getSnapshotIndex()));
+
+            // however the real snapshot still has not changed and was taken at index 0
+            verifySnapshot("member-1-shard-cars-testRootOverwrite", 1, 1);
+
+            // root overwrite so expect a snapshot
+            testKit.testWriteTransaction(dataStore, YangInstanceIdentifier.of(), rootNode);
+
+            // this was a real snapshot so everything should be in it(1 + 10 + 1)
+            IntegrationTestKit.verifyShardState(dataStore, "cars",
+                state -> assertEquals(12, state.getSnapshotIndex()));
+
+            verifySnapshot("member-1-shard-cars-testRootOverwrite", 12, 1);
         }
     }
+
+    private static void verifySnapshot(final String persistenceId, final long lastAppliedIndex,
+            final long lastAppliedTerm) {
+        await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
+                List<Snapshot> snap = InMemorySnapshotStore.getSnapshots(persistenceId, Snapshot.class);
+                assertEquals(1, snap.size());
+                assertEquals(lastAppliedIndex, snap.get(0).getLastAppliedIndex());
+                assertEquals(lastAppliedTerm, snap.get(0).getLastAppliedTerm());
+            }
+        );
+    }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractShardManagerTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractShardManagerTest.java
deleted file mode 100644 (file)
index 283e686..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.mockito.Mockito.mock;
-import static org.mockito.MockitoAnnotations.initMocks;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.mockito.Mock;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.shardmanager.ShardManagerTest.TestShardManager;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-
-public class AbstractShardManagerTest extends AbstractClusterRefActorTest {
-
-    protected static final MemberName MEMBER_1 = MemberName.forName("member-1");
-
-    protected static int ID_COUNTER = 1;
-    protected static ActorRef mockShardActor;
-    protected static ShardIdentifier mockShardName;
-
-    protected final String shardMrgIDSuffix = "config" + ID_COUNTER++;
-    protected final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-    protected final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder()
-            .dataStoreName(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
-            .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(6);
-
-    @Mock
-    protected static CountDownLatch ready;
-
-    protected TestShardManager.Builder newTestShardMgrBuilder() {
-        return TestShardManager.builder(datastoreContextBuilder).distributedDataStore(mock(DistributedDataStore.class));
-    }
-
-    protected TestShardManager.Builder newTestShardMgrBuilder(final Configuration config) {
-        return TestShardManager.builder(datastoreContextBuilder).configuration(config)
-                .distributedDataStore(mock(DistributedDataStore.class));
-    }
-
-    protected Props newShardMgrProps(final Configuration config) {
-        return newTestShardMgrBuilder(config).waitTillReadyCountDownLatch(ready).props();
-    }
-
-    @Before
-    public void setUp() {
-        initMocks(this);
-
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-
-        if (mockShardActor == null) {
-            mockShardName = ShardIdentifier.create(Shard.DEFAULT_NAME, MEMBER_1, "config");
-            mockShardActor = getSystem().actorOf(MessageCollectorActor.props(), mockShardName.toString());
-        }
-
-        MessageCollectorActor.clearMessages(mockShardActor);
-    }
-
-    @After
-    public void tearDown() {
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-
-        mockShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        mockShardActor = null;
-
-        actorFactory.close();
-    }
-}
index aaef33e4af97ce035120b7a6aa189aef7dff737d..ab95f7028a998598cfce153dba8031e66a4cc0d5 100644 (file)
@@ -66,22 +66,22 @@ import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -92,23 +92,22 @@ import scala.concurrent.duration.FiniteDuration;
  * @author Thomas Pantelis
  */
 public abstract class AbstractShardTest extends AbstractActorTest {
-    protected static final SchemaContext SCHEMA_CONTEXT = TestModel.createTestContext();
-
-    private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
+    protected static final EffectiveModelContext SCHEMA_CONTEXT = TestModel.createTestContext();
 
+    protected static final AtomicInteger SHARD_NUM = new AtomicInteger();
     protected static final int HEARTBEAT_MILLIS = 100;
 
-    protected final ShardIdentifier shardID = ShardIdentifier.create("inventory", MemberName.forName("member-1"),
-        "config" + NEXT_SHARD_NUM.getAndIncrement());
-
     protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder()
             .shardJournalRecoveryLogBatchSize(3).shardSnapshotBatchCount(5000)
             .shardHeartbeatIntervalInMillis(HEARTBEAT_MILLIS);
 
     protected final TestActorFactory actorFactory = new TestActorFactory(getSystem());
+    protected final int nextShardNum = SHARD_NUM.getAndIncrement();
+    protected final ShardIdentifier shardID = ShardIdentifier.create("inventory", MemberName.forName("member-1"),
+        "config" + nextShardNum);
 
     @Before
-    public void setUp() {
+    public void setUp() throws Exception {
         InMemorySnapshotStore.clear();
         InMemoryJournal.clear();
     }
@@ -133,7 +132,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
             .schemaContextProvider(() -> SCHEMA_CONTEXT);
     }
 
-    protected void testRecovery(final Set<Integer> listEntryKeys) throws Exception {
+    protected void testRecovery(final Set<Integer> listEntryKeys, final boolean stopActorOnFinish) throws Exception {
         // Create the actor and wait for recovery complete.
 
         final int nListEntries = listEntryKeys.size();
@@ -151,26 +150,25 @@ public abstract class AbstractShardTest extends AbstractActorTest {
             }
         };
 
-        final TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
-                Props.create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
-                "testRecovery");
+        final TestActorRef<Shard> shard = TestActorRef.create(getSystem(), Props.create(Shard.class,
+                new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()), "testRecovery");
 
         assertTrue("Recovery complete", recoveryComplete.await(5, TimeUnit.SECONDS));
 
         // Verify data in the data store.
 
-        final NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+        final NormalizedNode outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
         assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
         assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
-                outerList.getValue() instanceof Iterable);
-        for (final Object entry: (Iterable<?>) outerList.getValue()) {
+                outerList.body() instanceof Iterable);
+        for (final Object entry: (Iterable<?>) outerList.body()) {
             assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
                     entry instanceof MapEntryNode);
             final MapEntryNode mapEntry = (MapEntryNode)entry;
-            final Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
-                    mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+            final Optional<DataContainerChild> idLeaf =
+                    mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
             assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
-            final Object value = idLeaf.get().getValue();
+            final Object value = idLeaf.orElseThrow().body();
             assertTrue("Unexpected value for leaf " + TestModel.ID_QNAME.getLocalName() + ": " + value,
                     listEntryKeys.remove(value));
         }
@@ -186,7 +184,9 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         assertEquals("Last applied", nListEntries,
                 shard.underlyingActor().getShardMBean().getLastApplied());
 
-        shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        if (stopActorOnFinish) {
+            shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        }
     }
 
     protected void verifyLastApplied(final TestActorRef<Shard> shard, final long expectedValue) {
@@ -220,9 +220,9 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }).when(mock).commit(any(DataTreeCandidate.class));
 
         doAnswer(invocation -> {
-            actual.setSchemaContext(invocation.getArgument(0));
+            actual.setEffectiveModelContext(invocation.getArgument(0));
             return null;
-        }).when(mock).setSchemaContext(any(SchemaContext.class));
+        }).when(mock).setEffectiveModelContext(any(EffectiveModelContext.class));
 
         doAnswer(invocation -> actual.takeSnapshot()).when(mock).takeSnapshot();
 
@@ -274,7 +274,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     }
 
     protected static BatchedModifications prepareBatchedModifications(final TransactionIdentifier transactionID,
-            final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final boolean doCommitOnReady) {
+            final YangInstanceIdentifier path, final NormalizedNode data, final boolean doCommitOnReady) {
         final MutableCompositeModification modification = new MutableCompositeModification();
         modification.addModification(new WriteModification(path, data));
         return prepareBatchedModifications(transactionID, modification, doCommitOnReady);
@@ -282,24 +282,24 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
     protected static ForwardedReadyTransaction prepareForwardedReadyTransaction(final TestActorRef<Shard> shard,
             final TransactionIdentifier transactionID, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data, final boolean doCommitOnReady) {
+            final NormalizedNode data, final boolean doCommitOnReady) {
         ReadWriteShardDataTreeTransaction rwTx = shard.underlyingActor().getDataStore()
                 .newReadWriteTransaction(transactionID);
         rwTx.getSnapshot().write(path, data);
         return new ForwardedReadyTransaction(transactionID, CURRENT_VERSION, rwTx, doCommitOnReady, Optional.empty());
     }
 
-    public static NormalizedNode<?,?> readStore(final TestActorRef<? extends Shard> shard,
+    public static NormalizedNode readStore(final TestActorRef<? extends Shard> shard,
             final YangInstanceIdentifier id) {
-        return shard.underlyingActor().getDataStore().readNode(id).orNull();
+        return shard.underlyingActor().getDataStore().readNode(id).orElse(null);
     }
 
-    public static NormalizedNode<?,?> readStore(final DataTree store, final YangInstanceIdentifier id) {
+    public static NormalizedNode readStore(final DataTree store, final YangInstanceIdentifier id) {
         return store.takeSnapshot().readNode(id).orElse(null);
     }
 
     public void writeToStore(final TestActorRef<Shard> shard, final YangInstanceIdentifier id,
-            final NormalizedNode<?,?> node) throws InterruptedException, ExecutionException {
+            final NormalizedNode node) throws InterruptedException, ExecutionException {
         Future<Object> future = Patterns.ask(shard, newBatchedModifications(nextTransactionId(),
                 id, node, true, true, 1), new Timeout(5, TimeUnit.SECONDS));
         try {
@@ -310,15 +310,15 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     }
 
     public static void writeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
-            final NormalizedNode<?,?> node) throws DataValidationFailedException {
+            final NormalizedNode node) throws DataValidationFailedException {
         BatchedModifications batched = newBatchedModifications(nextTransactionId(), id, node, true, true, 1);
         DataTreeModification modification = store.getDataTree().takeSnapshot().newModification();
         batched.apply(modification);
         store.notifyListeners(commitTransaction(store.getDataTree(), modification));
     }
 
-    public static void writeToStore(final DataTree store, final YangInstanceIdentifier id,
-            final NormalizedNode<?,?> node) throws DataValidationFailedException {
+    public static void writeToStore(final DataTree store, final YangInstanceIdentifier id, final NormalizedNode node)
+            throws DataValidationFailedException {
         final DataTreeModification transaction = store.takeSnapshot().newModification();
 
         transaction.write(id, node);
@@ -328,8 +328,8 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         store.commit(candidate);
     }
 
-    public void mergeToStore(final ShardDataTree store, final YangInstanceIdentifier id,
-            final NormalizedNode<?,?> node) throws DataValidationFailedException {
+    public void mergeToStore(final ShardDataTree store, final YangInstanceIdentifier id, final NormalizedNode node)
+        throws DataValidationFailedException {
         final BatchedModifications batched = new BatchedModifications(nextTransactionId(), CURRENT_VERSION);
         batched.addModification(new MergeModification(id, node));
         batched.setReady();
@@ -347,7 +347,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
         writeToStore(testStore, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
-        final NormalizedNode<?, ?> root = readStore(testStore, YangInstanceIdentifier.EMPTY);
+        final NormalizedNode root = readStore(testStore, YangInstanceIdentifier.of());
 
         InMemorySnapshotStore.addSnapshot(shardID.toString(), Snapshot.create(
                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
@@ -364,7 +364,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     }
 
     static BatchedModifications newBatchedModifications(final TransactionIdentifier transactionID,
-            final YangInstanceIdentifier path, final NormalizedNode<?, ?> data, final boolean ready,
+            final YangInstanceIdentifier path, final NormalizedNode data, final boolean ready,
             final boolean doCommitOnReady, final int messagesSent) {
         final BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
         batched.addModification(new WriteModification(path, data));
@@ -377,7 +377,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     }
 
     static BatchedModifications newReadyBatchedModifications(final TransactionIdentifier transactionID,
-            final YangInstanceIdentifier path, final NormalizedNode<?, ?> data,
+            final YangInstanceIdentifier path, final NormalizedNode data,
             final SortedSet<String> participatingShardNames) {
         final BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
         batched.addModification(new WriteModification(path, data));
@@ -388,26 +388,25 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
     @SuppressWarnings("unchecked")
     static void verifyOuterListEntry(final TestActorRef<Shard> shard, final Object expIDValue) {
-        final NormalizedNode<?, ?> outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
+        final NormalizedNode outerList = readStore(shard, TestModel.OUTER_LIST_PATH);
         assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
         assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
-                outerList.getValue() instanceof Iterable);
-        final Object entry = ((Iterable<Object>)outerList.getValue()).iterator().next();
+                outerList.body() instanceof Iterable);
+        final Object entry = ((Iterable<Object>)outerList.body()).iterator().next();
         assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
                 entry instanceof MapEntryNode);
         final MapEntryNode mapEntry = (MapEntryNode)entry;
-        final Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
-                mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+        final Optional<DataContainerChild> idLeaf =
+                mapEntry.findChildByArg(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
         assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
-        assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.get().getValue());
+        assertEquals(TestModel.ID_QNAME.getLocalName() + " value", expIDValue, idLeaf.orElseThrow().body());
     }
 
     public static DataTreeCandidateTip mockCandidate(final String name) {
         final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
         final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
-        doReturn(ModificationType.WRITE).when(mockCandidateNode).getModificationType();
-        doReturn(Optional.of(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)))
-                .when(mockCandidateNode).getDataAfter();
+        doReturn(ModificationType.WRITE).when(mockCandidateNode).modificationType();
+        doReturn(ImmutableNodes.containerNode(CarsModel.CARS_QNAME)).when(mockCandidateNode).dataAfter();
         doReturn(CarsModel.BASE_PATH).when(mockCandidate).getRootPath();
         doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
         return mockCandidate;
@@ -416,8 +415,8 @@ public abstract class AbstractShardTest extends AbstractActorTest {
     static DataTreeCandidateTip mockUnmodifiedCandidate(final String name) {
         final DataTreeCandidateTip mockCandidate = mock(DataTreeCandidateTip.class, name);
         final DataTreeCandidateNode mockCandidateNode = mock(DataTreeCandidateNode.class, name + "-node");
-        doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).getModificationType();
-        doReturn(YangInstanceIdentifier.EMPTY).when(mockCandidate).getRootPath();
+        doReturn(ModificationType.UNMODIFIED).when(mockCandidateNode).modificationType();
+        doReturn(YangInstanceIdentifier.of()).when(mockCandidate).getRootPath();
         doReturn(mockCandidateNode).when(mockCandidate).getRootNode();
         return mockCandidate;
     }
@@ -447,7 +446,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
 
     public static class CapturingShardDataTreeCohort extends ShardDataTreeCohort {
         private volatile ShardDataTreeCohort delegate;
-        private FutureCallback<Void> canCommit;
+        private FutureCallback<Empty> canCommit;
         private FutureCallback<DataTreeCandidate> preCommit;
         private FutureCallback<UnsignedLong> commit;
 
@@ -455,7 +454,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
             this.delegate = delegate;
         }
 
-        public FutureCallback<Void> getCanCommit() {
+        public FutureCallback<Empty> getCanCommit() {
             assertNotNull("canCommit was not invoked", canCommit);
             return canCommit;
         }
@@ -471,8 +470,8 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public TransactionIdentifier getIdentifier() {
-            return delegate.getIdentifier();
+        TransactionIdentifier transactionId() {
+            return delegate.transactionId();
         }
 
         @Override
@@ -486,7 +485,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public void canCommit(final FutureCallback<Void> callback) {
+        public void canCommit(final FutureCallback<Empty> callback) {
             canCommit = mockFutureCallback(callback);
             delegate.canCommit(canCommit);
         }
@@ -520,7 +519,7 @@ public abstract class AbstractShardTest extends AbstractActorTest {
         }
 
         @Override
-        public void abort(final FutureCallback<Void> callback) {
+        public void abort(final FutureCallback<Empty> callback) {
             delegate.abort(callback);
         }
 
index a3726e270bea47a1ad4b840e20ab14b6b5753709..67987c3e37bb147b6e56d04165035f06d93923f8 100644 (file)
@@ -45,12 +45,20 @@ public abstract class AbstractTest {
         TX_COUNTER.set(1L);
     }
 
+    protected static TransactionIdentifier newTransactionId(final long txId) {
+        return new TransactionIdentifier(HISTORY_ID, txId);
+    }
+
     protected static TransactionIdentifier nextTransactionId() {
-        return new TransactionIdentifier(HISTORY_ID, TX_COUNTER.getAndIncrement());
+        return newTransactionId(TX_COUNTER.getAndIncrement());
+    }
+
+    protected static LocalHistoryIdentifier newHistoryId(final long historyId) {
+        return new LocalHistoryIdentifier(CLIENT_ID, historyId);
     }
 
     protected static LocalHistoryIdentifier nextHistoryId() {
-        return new LocalHistoryIdentifier(CLIENT_ID, HISTORY_COUNTER.incrementAndGet());
+        return newHistoryId(HISTORY_COUNTER.incrementAndGet());
     }
 
     protected static <T> T waitOnAsyncTask(final CompletionStage<T> completionStage, final FiniteDuration timeout)
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionProxyTest.java
deleted file mode 100644 (file)
index 3e9208e..0000000
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.testkit.javadsl.TestKit;
-import akka.util.Timeout;
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Timer;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.FluentFuture;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.mockito.ArgumentCaptor;
-import org.mockito.ArgumentMatcher;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.TransactionProxyTest.TestException;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModificationsReply;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.messages.ReadDataReply;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
-import org.opendaylight.controller.cluster.datastore.modification.AbstractModification;
-import org.opendaylight.controller.cluster.datastore.modification.Modification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Abstract base class for TransactionProxy unit tests.
- *
- * @author Thomas Pantelis
- */
-public abstract class AbstractTransactionProxyTest extends AbstractTest {
-    protected final Logger log = LoggerFactory.getLogger(getClass());
-
-    private static ActorSystem system;
-    private static SchemaContext SCHEMA_CONTEXT;
-
-    private final Configuration configuration = new MockConfiguration() {
-        Map<String, ShardStrategy> strategyMap = ImmutableMap.<String, ShardStrategy>builder().put(
-                TestModel.JUNK_QNAME.getLocalName(), new ShardStrategy() {
-                    @Override
-                    public String findShard(final YangInstanceIdentifier path) {
-                        return TestModel.JUNK_QNAME.getLocalName();
-                    }
-
-                    @Override
-                    public YangInstanceIdentifier getPrefixForPath(final YangInstanceIdentifier path) {
-                        return YangInstanceIdentifier.EMPTY;
-                    }
-                }).put(
-                CarsModel.BASE_QNAME.getLocalName(), new ShardStrategy() {
-                    @Override
-                    public String findShard(final YangInstanceIdentifier path) {
-                        return CarsModel.BASE_QNAME.getLocalName();
-                    }
-
-                    @Override
-                    public YangInstanceIdentifier getPrefixForPath(final YangInstanceIdentifier path) {
-                        return YangInstanceIdentifier.EMPTY;
-                    }
-                }).build();
-
-        @Override
-        public ShardStrategy getStrategyForModule(final String moduleName) {
-            return strategyMap.get(moduleName);
-        }
-
-        @Override
-        public String getModuleNameFromNameSpace(final String nameSpace) {
-            if (TestModel.JUNK_QNAME.getNamespace().toASCIIString().equals(nameSpace)) {
-                return TestModel.JUNK_QNAME.getLocalName();
-            } else if (CarsModel.BASE_QNAME.getNamespace().toASCIIString().equals(nameSpace)) {
-                return CarsModel.BASE_QNAME.getLocalName();
-            }
-            return null;
-        }
-    };
-
-    @Mock
-    protected ActorUtils mockActorContext;
-
-    protected TransactionContextFactory mockComponentFactory;
-
-    @Mock
-    private ClusterWrapper mockClusterWrapper;
-
-    protected final String memberName = "mock-member";
-
-    private final int operationTimeoutInSeconds = 2;
-    protected final Builder dataStoreContextBuilder = DatastoreContext.newBuilder()
-            .operationTimeoutInSeconds(operationTimeoutInSeconds);
-
-    @BeforeClass
-    public static void setUpClass() {
-
-        Config config = ConfigFactory.parseMap(ImmutableMap.<String, Object>builder()
-                .put("akka.actor.default-dispatcher.type",
-                        "akka.testkit.CallingThreadDispatcherConfigurator").build())
-                .withFallback(ConfigFactory.load());
-        system = ActorSystem.create("test", config);
-        SCHEMA_CONTEXT = TestModel.createTestContext();
-    }
-
-    @AfterClass
-    public static void tearDownClass() {
-        TestKit.shutdownActorSystem(system);
-        system = null;
-        SCHEMA_CONTEXT = null;
-    }
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(getSystem()).when(mockActorContext).getActorSystem();
-        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
-        doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
-        doReturn(new ShardStrategyFactory(configuration,
-                LogicalDatastoreType.CONFIGURATION)).when(mockActorContext).getShardStrategyFactory();
-        doReturn(SCHEMA_CONTEXT).when(mockActorContext).getSchemaContext();
-        doReturn(new Timeout(operationTimeoutInSeconds, TimeUnit.SECONDS)).when(mockActorContext).getOperationTimeout();
-        doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
-        doReturn(mockClusterWrapper).when(mockActorContext).getClusterWrapper();
-        doReturn(dataStoreContextBuilder.build()).when(mockActorContext).getDatastoreContext();
-        doReturn(new Timeout(5, TimeUnit.SECONDS)).when(mockActorContext).getTransactionCommitOperationTimeout();
-
-        final ClientIdentifier mockClientId = MockIdentifiers.clientIdentifier(getClass(), memberName);
-        mockComponentFactory = new TransactionContextFactory(mockActorContext, mockClientId);
-
-        Timer timer = new MetricRegistry().timer("test");
-        doReturn(timer).when(mockActorContext).getOperationTimer(any(String.class));
-    }
-
-    protected ActorSystem getSystem() {
-        return system;
-    }
-
-    protected CreateTransaction eqCreateTransaction(final String expMemberName,
-            final TransactionType type) {
-        class CreateTransactionArgumentMatcher implements ArgumentMatcher<CreateTransaction> {
-            @Override
-            public boolean matches(CreateTransaction argument) {
-                return argument.getTransactionId().getHistoryId().getClientId().getFrontendId().getMemberName()
-                        .getName().equals(expMemberName) && argument.getTransactionType() == type.ordinal();
-            }
-        }
-
-        return argThat(new CreateTransactionArgumentMatcher());
-    }
-
-    protected DataExists eqDataExists() {
-        class DataExistsArgumentMatcher implements ArgumentMatcher<DataExists> {
-            @Override
-            public boolean matches(DataExists argument) {
-                return argument.getPath().equals(TestModel.TEST_PATH);
-            }
-        }
-
-        return argThat(new DataExistsArgumentMatcher());
-    }
-
-    protected ReadData eqReadData() {
-        return eqReadData(TestModel.TEST_PATH);
-    }
-
-    protected ReadData eqReadData(final YangInstanceIdentifier path) {
-        class ReadDataArgumentMatcher implements ArgumentMatcher<ReadData> {
-            @Override
-            public boolean matches(ReadData argument) {
-                return argument.getPath().equals(path);
-            }
-        }
-
-        return argThat(new ReadDataArgumentMatcher());
-    }
-
-    protected Future<Object> readyTxReply(final String path) {
-        return Futures.successful((Object)new ReadyTransactionReply(path));
-    }
-
-
-    protected Future<ReadDataReply> readDataReply(final NormalizedNode<?, ?> data) {
-        return Futures.successful(new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    protected Future<DataExistsReply> dataExistsReply(final boolean exists) {
-        return Futures.successful(new DataExistsReply(exists, DataStoreVersions.CURRENT_VERSION));
-    }
-
-    protected Future<BatchedModificationsReply> batchedModificationsReply(final int count) {
-        return Futures.successful(new BatchedModificationsReply(count));
-    }
-
-    @SuppressWarnings("unchecked")
-    protected Future<Object> incompleteFuture() {
-        return mock(Future.class);
-    }
-
-    protected ActorSelection actorSelection(final ActorRef actorRef) {
-        return getSystem().actorSelection(actorRef.path());
-    }
-
-    protected void expectBatchedModifications(final ActorRef actorRef, final int count) {
-        doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectBatchedModifications(final int count) {
-        doReturn(batchedModificationsReply(count)).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectBatchedModificationsReady(final ActorRef actorRef) {
-        expectBatchedModificationsReady(actorRef, false);
-    }
-
-    protected void expectBatchedModificationsReady(final ActorRef actorRef, final boolean doCommitOnReady) {
-        doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
-            readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectIncompleteBatchedModifications() {
-        doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectFailedBatchedModifications(final ActorRef actorRef) {
-        doReturn(Futures.failed(new TestException())).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-    }
-
-    protected void expectReadyLocalTransaction(final ActorRef actorRef, final boolean doCommitOnReady) {
-        doReturn(doCommitOnReady ? Futures.successful(new CommitTransactionReply().toSerializable()) :
-            readyTxReply(actorRef.path().toString())).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(actorRef)), isA(ReadyLocalTransaction.class), any(Timeout.class));
-    }
-
-    protected CreateTransactionReply createTransactionReply(final ActorRef actorRef, final short transactionVersion) {
-        return new CreateTransactionReply(actorRef.path().toString(), nextTransactionId(), transactionVersion);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem) {
-        return setupActorContextWithoutInitialCreateTransaction(actorSystem, DefaultShardStrategy.DEFAULT_SHARD);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
-            final String shardName) {
-        return setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
-                DataStoreVersions.CURRENT_VERSION);
-    }
-
-    protected ActorRef setupActorContextWithoutInitialCreateTransaction(final ActorSystem actorSystem,
-            final String shardName, final short transactionVersion) {
-        ActorRef actorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-        log.info("Created mock shard actor {}", actorRef);
-
-        doReturn(actorSystem.actorSelection(actorRef.path()))
-                .when(mockActorContext).actorSelection(actorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(actorSystem, actorRef, transactionVersion))
-                .when(mockActorContext).findPrimaryShardAsync(eq(shardName));
-
-        return actorRef;
-    }
-
-    protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef) {
-        return primaryShardInfoReply(actorSystem, actorRef, DataStoreVersions.CURRENT_VERSION);
-    }
-
-    protected Future<PrimaryShardInfo> primaryShardInfoReply(final ActorSystem actorSystem, final ActorRef actorRef,
-            final short transactionVersion) {
-        return Futures.successful(new PrimaryShardInfo(actorSystem.actorSelection(actorRef.path()),
-                transactionVersion));
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type, final short transactionVersion, final String shardName) {
-        ActorRef shardActorRef = setupActorContextWithoutInitialCreateTransaction(actorSystem, shardName,
-                transactionVersion);
-
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, transactionVersion,
-                memberName, shardActorRef);
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type, final short transactionVersion, final String prefix,
-            final ActorRef shardActorRef) {
-
-        ActorRef txActorRef;
-        if (type == TransactionType.WRITE_ONLY
-                && dataStoreContextBuilder.build().isWriteOnlyTransactionOptimizationsEnabled()) {
-            txActorRef = shardActorRef;
-        } else {
-            txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-            log.info("Created mock shard Tx actor {}", txActorRef);
-
-            doReturn(actorSystem.actorSelection(txActorRef.path()))
-                .when(mockActorContext).actorSelection(txActorRef.path().toString());
-
-            doReturn(Futures.successful(createTransactionReply(txActorRef, transactionVersion))).when(mockActorContext)
-                .executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
-                        eqCreateTransaction(prefix, type), any(Timeout.class));
-        }
-
-        return txActorRef;
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type) {
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
-                DefaultShardStrategy.DEFAULT_SHARD);
-    }
-
-    protected ActorRef setupActorContextWithInitialCreateTransaction(final ActorSystem actorSystem,
-            final TransactionType type,
-            final String shardName) {
-        return setupActorContextWithInitialCreateTransaction(actorSystem, type, DataStoreVersions.CURRENT_VERSION,
-                shardName);
-    }
-
-    @SuppressWarnings({"checkstyle:avoidHidingCauseException", "checkstyle:IllegalThrows"})
-    protected void propagateReadFailedExceptionCause(final FluentFuture<?> future) throws Throwable {
-        try {
-            future.get(5, TimeUnit.SECONDS);
-            fail("Expected ReadFailedException");
-        } catch (ExecutionException e) {
-            final Throwable cause = e.getCause();
-            assertTrue("Unexpected cause: " + cause.getClass(), cause instanceof ReadFailedException);
-            throw Throwables.getRootCause(cause);
-        }
-    }
-
-    protected List<BatchedModifications> captureBatchedModifications(final ActorRef actorRef) {
-        ArgumentCaptor<BatchedModifications> batchedModificationsCaptor =
-                ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext, Mockito.atLeastOnce()).executeOperationAsync(
-                eq(actorSelection(actorRef)), batchedModificationsCaptor.capture(), any(Timeout.class));
-
-        List<BatchedModifications> batchedModifications = filterCaptured(
-                batchedModificationsCaptor, BatchedModifications.class);
-        return batchedModifications;
-    }
-
-    protected <T> List<T> filterCaptured(final ArgumentCaptor<T> captor, final Class<T> type) {
-        List<T> captured = new ArrayList<>();
-        for (T c: captor.getAllValues()) {
-            if (type.isInstance(c)) {
-                captured.add(c);
-            }
-        }
-
-        return captured;
-    }
-
-    protected void verifyOneBatchedModification(final ActorRef actorRef, final Modification expected,
-            final boolean expIsReady) {
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), expIsReady, expIsReady, expected);
-    }
-
-    protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
-            final Modification... expected) {
-        verifyBatchedModifications(message, expIsReady, false, expected);
-    }
-
-    protected void verifyBatchedModifications(final Object message, final boolean expIsReady,
-            final boolean expIsDoCommitOnReady, final Modification... expected) {
-        assertEquals("Message type", BatchedModifications.class, message.getClass());
-        BatchedModifications batchedModifications = (BatchedModifications)message;
-        assertEquals("BatchedModifications size", expected.length, batchedModifications.getModifications().size());
-        assertEquals("isReady", expIsReady, batchedModifications.isReady());
-        assertEquals("isDoCommitOnReady", expIsDoCommitOnReady, batchedModifications.isDoCommitOnReady());
-        for (int i = 0; i < batchedModifications.getModifications().size(); i++) {
-            Modification actual = batchedModifications.getModifications().get(i);
-            assertEquals("Modification type", expected[i].getClass(), actual.getClass());
-            assertEquals("getPath", ((AbstractModification)expected[i]).getPath(),
-                    ((AbstractModification)actual).getPath());
-            if (actual instanceof WriteModification) {
-                assertEquals("getData", ((WriteModification)expected[i]).getData(),
-                        ((WriteModification)actual).getData());
-            }
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void verifyCohortFutures(final AbstractThreePhaseCommitCohort<?> proxy,
-            final Object... expReplies) {
-        assertEquals("getReadyOperationFutures size", expReplies.length,
-                proxy.getCohortFutures().size());
-
-        List<Object> futureResults = new ArrayList<>();
-        for (Future<?> future : proxy.getCohortFutures()) {
-            assertNotNull("Ready operation Future is null", future);
-            try {
-                futureResults.add(Await.result(future, FiniteDuration.create(5, TimeUnit.SECONDS)));
-            } catch (Exception e) {
-                futureResults.add(e);
-            }
-        }
-
-        for (Object expReply : expReplies) {
-            boolean found = false;
-            Iterator<?> iter = futureResults.iterator();
-            while (iter.hasNext()) {
-                Object actual = iter.next();
-                if (CommitTransactionReply.isSerializedType(expReply)
-                        && CommitTransactionReply.isSerializedType(actual)) {
-                    found = true;
-                } else if (expReply instanceof ActorSelection && Objects.equals(expReply, actual)) {
-                    found = true;
-                } else if (expReply instanceof Class && ((Class<?>) expReply).isInstance(actual)) {
-                    found = true;
-                }
-
-                if (found) {
-                    iter.remove();
-                    break;
-                }
-            }
-
-            if (!found) {
-                fail(String.format("No cohort Future response found for %s. Actual: %s", expReply, futureResults));
-            }
-        }
-    }
-}
index a71d99e055380d4acfaf1290d4d4f444ecd91eec..9ce9cc743bc1596bf3237496871af0907b8c0b11 100644 (file)
@@ -8,7 +8,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertFalse;
-import static org.mockito.ArgumentMatchers.anyCollection;
+import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
@@ -27,7 +27,7 @@ import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class DataTreeChangeListenerActorTest extends AbstractActorTest {
     private TestKit testKit;
@@ -68,7 +68,7 @@ public class DataTreeChangeListenerActorTest extends AbstractActorTest {
 
         testKit.within(Duration.ofSeconds(1), () -> {
             testKit.expectNoMessage();
-            verify(mockListener, never()).onDataTreeChanged(anyCollection());
+            verify(mockListener, never()).onDataTreeChanged(anyList());
             return null;
         });
     }
index 373d4d7188b155a6e6da9ba0a861cc7cf576da1f..b0d38fba4750567b074aaff1ac56e4b7a483a3f1 100644 (file)
@@ -9,10 +9,11 @@ package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
@@ -27,9 +28,14 @@ import akka.util.Timeout;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.time.Duration;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.Executor;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import org.eclipse.jdt.annotation.NonNullByDefault;
 import org.junit.Test;
-import org.mockito.stubbing.Answer;
+import org.mockito.ArgumentCaptor;
 import org.opendaylight.controller.cluster.common.actor.Dispatchers;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
@@ -42,37 +48,30 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNo
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import scala.concurrent.ExecutionContextExecutor;
-import scala.concurrent.Future;
 
 public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
     private final DOMDataTreeChangeListener mockListener = mock(DOMDataTreeChangeListener.class);
 
     @Test(timeout = 10000)
     public void testSuccessfulRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, false);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardFound(kit.getRef()));
 
-        RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
-            RegisterDataTreeChangeListener.class);
-        assertEquals("getPath", path, registerMsg.getPath());
-        assertFalse("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+        final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+        assertEquals(path, registerMsg.getPath());
+        assertFalse(registerMsg.isRegisterOnAllInstances());
 
         kit.reply(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
 
@@ -80,8 +79,7 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
         }
 
-        assertEquals("getListenerRegistrationActor", getSystem().actorSelection(kit.getRef().path()),
-            proxy.getListenerRegistrationActor());
+        assertEquals(getSystem().actorSelection(kit.getRef().path()), proxy.getListenerRegistrationActor());
 
         kit.watch(proxy.getDataChangeListenerActor());
 
@@ -100,48 +98,38 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test(timeout = 10000)
     public void testSuccessfulRegistrationForClusteredListener() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
-            ClusteredDOMDataTreeChangeListener.class);
-
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> proxy =
-                new DataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, true);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardFound(kit.getRef()));
 
-        RegisterDataTreeChangeListener registerMsg = kit.expectMsgClass(timeout,
-            RegisterDataTreeChangeListener.class);
-        assertEquals("getPath", path, registerMsg.getPath());
-        assertTrue("isRegisterOnAllInstances", registerMsg.isRegisterOnAllInstances());
+        final var registerMsg = kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+        assertEquals(path, registerMsg.getPath());
+        assertTrue(registerMsg.isRegisterOnAllInstances());
 
         proxy.close();
     }
 
     @Test(timeout = 10000)
     public void testLocalShardNotFound() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
-
-        new Thread(() -> proxy.init("shard-1")).start();
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, true);
 
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new LocalShardNotFound("shard-1"));
 
@@ -152,19 +140,16 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test(timeout = 10000)
     public void testLocalShardNotInitialized() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
             mock(Configuration.class));
 
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var proxy = startProxyAsync(actorUtils, path, false);
 
-        new Thread(() -> proxy.init("shard-1")).start();
-
-        Duration timeout = Duration.ofSeconds(5);
-        FindLocalShard findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
-        assertEquals("getShardName", "shard-1", findLocalShard.getShardName());
+        final var timeout = Duration.ofSeconds(5);
+        final var findLocalShard = kit.expectMsgClass(timeout, FindLocalShard.class);
+        assertEquals("shard-1", findLocalShard.getShardName());
 
         kit.reply(new NotInitializedException("not initialized"));
 
@@ -178,43 +163,35 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
 
     @Test
     public void testFailedRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorSystem mockActorSystem = mock(ActorSystem.class);
+        final var kit = new TestKit(getSystem());
+        final var mockActorSystem = mock(ActorSystem.class);
 
-        ActorRef mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
+        final var mockActor = getSystem().actorOf(Props.create(DoNothingActor.class), "testFailedRegistration");
         doReturn(mockActor).when(mockActorSystem).actorOf(any(Props.class));
-        ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
+        final var executor = ExecutionContexts.fromExecutor(MoreExecutors.directExecutor());
 
-        ActorUtils actorUtils = mock(ActorUtils.class);
-        final YangInstanceIdentifier path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
+        final var actorUtils = mock(ActorUtils.class);
+        final var path = YangInstanceIdentifier.of(TestModel.TEST_QNAME);
 
         doReturn(executor).when(actorUtils).getClientDispatcher();
         doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
         doReturn(mockActorSystem).when(actorUtils).getActorSystem();
 
-        String shardName = "shard-1";
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, path);
-
         doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
         doReturn(Futures.failed(new RuntimeException("mock"))).when(actorUtils).executeOperationAsync(
             any(ActorRef.class), any(Object.class), any(Timeout.class));
-        doReturn(mock(DatastoreContext.class)).when(actorUtils).getDatastoreContext();
-
-        proxy.init("shard-1");
 
-        assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+        final var proxy = DataTreeChangeListenerProxy.of(actorUtils, mockListener, path, true, "shard-1");
+        assertNull(proxy.getListenerRegistrationActor());
 
         proxy.close();
     }
 
     @Test
     public void testCloseBeforeRegistration() {
-        final TestKit kit = new TestKit(getSystem());
-        ActorUtils actorUtils = mock(ActorUtils.class);
-
-        String shardName = "shard-1";
+        final var kit = new TestKit(getSystem());
+        final var actorUtils = mock(ActorUtils.class);
 
         doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
         doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(actorUtils).getClientDispatcher();
@@ -223,23 +200,46 @@ public class DataTreeChangeListenerProxyTest extends AbstractActorTest {
         doReturn(getSystem().actorSelection(kit.getRef().path())).when(actorUtils).actorSelection(
             kit.getRef().path());
         doReturn(kit.duration("5 seconds")).when(actorUtils).getOperationDuration();
-        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync(eq(shardName));
+        doReturn(Futures.successful(kit.getRef())).when(actorUtils).findLocalShardAsync("shard-1");
 
-        final DataTreeChangeListenerProxy<DOMDataTreeChangeListener> proxy = new DataTreeChangeListenerProxy<>(
-                actorUtils, mockListener, YangInstanceIdentifier.of(TestModel.TEST_QNAME));
+        final var proxy = createProxy(actorUtils, YangInstanceIdentifier.of(TestModel.TEST_QNAME), true);
+        final var instance = proxy.getKey();
 
-        Answer<Future<Object>> answer = invocation -> {
-            proxy.close();
-            return Futures.successful((Object) new RegisterDataTreeNotificationListenerReply(kit.getRef()));
-        };
+        doAnswer(invocation -> {
+            instance.close();
+            return Futures.successful(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
+        }).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class), any(Timeout.class));
+        proxy.getValue().run();
 
-        doAnswer(answer).when(actorUtils).executeOperationAsync(any(ActorRef.class), any(Object.class),
-            any(Timeout.class));
+        kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
 
-        proxy.init(shardName);
+        assertNull(instance.getListenerRegistrationActor());
+    }
 
-        kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistration.class);
+    @NonNullByDefault
+    private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+            final boolean clustered) {
+        return startProxyAsync(actorUtils, path, clustered, Runnable::run);
+    }
+
+    @NonNullByDefault
+    private DataTreeChangeListenerProxy startProxyAsync(final ActorUtils actorUtils, final YangInstanceIdentifier path,
+            final boolean clustered, final Consumer<Runnable> execute) {
+        final var proxy = createProxy(actorUtils, path, clustered);
+        final var thread = new Thread(proxy.getValue());
+        thread.setDaemon(true);
+        thread.start();
+        return proxy.getKey();
+    }
 
-        assertEquals("getListenerRegistrationActor", null, proxy.getListenerRegistrationActor());
+    @NonNullByDefault
+    private Entry<DataTreeChangeListenerProxy, Runnable> createProxy(final ActorUtils actorUtils,
+            final YangInstanceIdentifier path, final boolean clustered) {
+        final var executor = mock(Executor.class);
+        final var captor = ArgumentCaptor.forClass(Runnable.class);
+        doNothing().when(executor).execute(captor.capture());
+        final var proxy = DataTreeChangeListenerProxy.ofTesting(actorUtils, mockListener, path, clustered, "shard-1",
+            executor);
+        return Map.entry(proxy, captor.getValue());
     }
 }
index aee744e2484d861af6a52ce9b17a9863882605f0..88653642d26d423188fd41309714c36fdbe498e1 100644 (file)
@@ -40,8 +40,9 @@ import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNo
 import org.opendaylight.controller.cluster.datastore.utils.MockDataTreeChangeListener;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
 import scala.concurrent.Await;
 import scala.concurrent.duration.FiniteDuration;
 
@@ -56,7 +57,7 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
 
     @Override
     @Before
-    public void setUp() {
+    public void setUp() throws Exception {
         super.setUp();
         createShard();
     }
@@ -77,7 +78,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
 
     @Test
     public void testInitialChangeListenerEventWithContainerPath() throws DataValidationFailedException {
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
 
         Entry<MockDataTreeChangeListener, ActorSelection> entry = registerChangeListener(TEST_PATH, 1);
         MockDataTreeChangeListener listener = entry.getKey();
@@ -87,7 +90,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
 
         listener.reset(1);
 
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
         listener.waitForChangeEvents();
         listener.verifyNotifiedData(TEST_PATH);
 
@@ -96,7 +101,9 @@ public class DataTreeChangeListenerSupportTest extends AbstractShardTest {
         entry.getValue().tell(CloseDataTreeNotificationListenerRegistration.getInstance(), kit.getRef());
         kit.expectMsgClass(Duration.ofSeconds(5), CloseDataTreeNotificationListenerRegistrationReply.class);
 
-        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.containerNode(TEST_QNAME));
+        writeToStore(shard.getDataStore(), TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .build());
         listener.verifyNoNotifiedData(TEST_PATH);
     }
 
index 276d87f9af6905639e52643ccd42d56a2545a5bc..7a3e2683921c3a6d99f04ea5ef41e5fe34240de4 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
@@ -45,8 +46,8 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeCandidate;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 
 /**
@@ -56,7 +57,7 @@ import scala.concurrent.Await;
  */
 public class DataTreeCohortActorTest extends AbstractActorTest {
     private static final Collection<DOMDataTreeCandidate> CANDIDATES = new ArrayList<>();
-    private static final SchemaContext MOCK_SCHEMA = mock(SchemaContext.class);
+    private static final EffectiveModelContext MOCK_SCHEMA = mock(EffectiveModelContext.class);
     private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
     private final DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
     private final PostCanCommitStep mockPostCanCommit = mock(PostCanCommitStep.class);
@@ -108,13 +109,12 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
         askAndAwait(cohortActor, new Commit(txId2));
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testAsyncCohort() throws Exception {
         ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
 
         doReturn(executeWithDelay(executor, mockPostCanCommit))
-                .when(mockCohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+                .when(mockCohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
 
         doReturn(executor.submit(() -> mockPostPreCommit)).when(mockPostCanCommit).preCommit();
 
@@ -135,12 +135,12 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
         executor.shutdownNow();
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testFailureOnCanCommit() throws Exception {
-        DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock");
+        DataValidationFailedException failure = new DataValidationFailedException(YangInstanceIdentifier.of(),
+                "mock");
         doReturn(FluentFutures.immediateFailedFluentFuture(failure)).when(mockCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
         ActorRef cohortActor = newCohortActor("testFailureOnCanCommit");
 
@@ -195,16 +195,15 @@ public class DataTreeCohortActorTest extends AbstractActorTest {
     }
 
     private ActorRef newCohortActor(final String name) {
-        return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.EMPTY), name);
+        return actorFactory.createActor(DataTreeCohortActor.props(mockCohort, YangInstanceIdentifier.of()), name);
     }
 
-    @SuppressWarnings("unchecked")
     private void resetMockCohort() {
         reset(mockCohort);
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostCanCommit).abort();
         doReturn(Futures.immediateFuture(mockPostPreCommit)).when(mockPostCanCommit).preCommit();
         doReturn(FluentFutures.immediateFluentFuture(mockPostCanCommit)).when(mockCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(mockPostPreCommit).abort();
         doReturn(Futures.immediateFuture(null)).when(mockPostPreCommit).commit();
index ce02d6ccab4745693ac374ef8190e1b1b9ecc38c..7eb534c334e3b3afb8811a3216994c8e7994ff53 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
@@ -13,6 +12,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyCollection;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
@@ -24,11 +24,9 @@ import akka.actor.Address;
 import akka.actor.AddressFromURIString;
 import akka.cluster.Cluster;
 import akka.testkit.javadsl.TestKit;
-import com.google.common.base.Optional;
 import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.FluentFuture;
 import com.typesafe.config.ConfigFactory;
-import java.math.BigInteger;
 import java.util.Collection;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
@@ -37,6 +35,7 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.DataValidationFailedException;
@@ -49,15 +48,14 @@ import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public class DataTreeCohortIntegrationTest {
 
@@ -94,36 +92,36 @@ public class DataTreeCohortIntegrationTest {
     @SuppressWarnings({ "unchecked", "rawtypes" })
     @Test
     public void testSuccessfulCanCommitWithNoopPostStep() throws Exception {
-        final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+        final var cohort = mock(DOMDataTreeCommitCohort.class);
         doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
         ArgumentCaptor<Collection> candidateCapt = ArgumentCaptor.forClass(Collection.class);
         IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
 
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep", "test-1")) {
-            final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(TEST_ID,
-                    cohort);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testSuccessfulCanCommitWithNoopPostStep",
+            "test-1")) {
+
+            final var cohortReg = dataStore.registerCommitCohort(TEST_ID, cohort);
             assertNotNull(cohortReg);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
                 state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
 
-            final ContainerNode node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+            final var node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
             kit.testWriteTransaction(dataStore, TestModel.TEST_PATH, node);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapt.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapt.capture());
             assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapt.getValue().iterator().next(), TEST_ID,
-                    ModificationType.WRITE, Optional.of(node), Optional.absent());
+                    ModificationType.WRITE, node, null);
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             kit.testWriteTransaction(dataStore, TestModel.OUTER_LIST_PATH,
                     ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
                     .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 42))
                     .build());
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), any(Collection.class));
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), anyCollection());
 
             cohortReg.close();
 
@@ -135,17 +133,15 @@ public class DataTreeCohortIntegrationTest {
         }
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testFailedCanCommit() throws Exception {
-        final DOMDataTreeCommitCohort failedCohort = mock(DOMDataTreeCommitCohort.class);
+        final var failedCohort = mock(DOMDataTreeCommitCohort.class);
 
         doReturn(FAILED_CAN_COMMIT_FUTURE).when(failedCohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testFailedCanCommit", "test-1")) {
+        final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testFailedCanCommit", "test-1")) {
             dataStore.registerCommitCohort(TEST_ID, failedCohort);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
@@ -166,14 +162,15 @@ public class DataTreeCohortIntegrationTest {
     @SuppressWarnings({ "unchecked", "rawtypes" })
     @Test
     public void testCanCommitWithListEntries() throws Exception {
-        final DOMDataTreeCommitCohort cohort = mock(DOMDataTreeCommitCohort.class);
+        final var cohort = mock(DOMDataTreeCommitCohort.class);
         doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+                any(EffectiveModelContext.class), anyCollection());
+        final var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testCanCommitWithMultipleListEntries",
+            "cars-1")) {
 
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testCanCommitWithMultipleListEntries", "cars-1")) {
-            final ObjectRegistration<DOMDataTreeCommitCohort> cohortReg = dataStore.registerCommitCohort(
+            final var cohortReg = dataStore.registerCommitCohort(
                     new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, CarsModel.CAR_LIST_PATH
                             .node(CarsModel.CAR_QNAME)), cohort);
             assertNotNull(cohortReg);
@@ -193,15 +190,15 @@ public class DataTreeCohortIntegrationTest {
 
             writeTx = dataStore.newWriteOnlyTransaction();
             final YangInstanceIdentifier optimaPath = CarsModel.newCarPath("optima");
-            final MapEntryNode optimaNode = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            final MapEntryNode optimaNode = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             writeTx.write(optimaPath, optimaNode);
             kit.doCommit(writeTx.ready());
 
             ArgumentCaptor<Collection> candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
             assertDataTreeCandidate((DOMDataTreeCandidate) candidateCapture.getValue().iterator().next(),
                     new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.WRITE,
-                    Optional.of(optimaNode), Optional.absent());
+                    optimaNode, null);
 
             // Write replace the cars container with 2 new car entries. The cohort should get invoked with 3
             // DOMDataTreeCandidates: once for each of the 2 new car entries (WRITE mod) and once for the deleted prior
@@ -209,51 +206,51 @@ public class DataTreeCohortIntegrationTest {
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             writeTx = dataStore.newWriteOnlyTransaction();
             final YangInstanceIdentifier sportagePath = CarsModel.newCarPath("sportage");
-            final MapEntryNode sportageNode = CarsModel.newCarEntry("sportage", BigInteger.valueOf(20000));
+            final MapEntryNode sportageNode = CarsModel.newCarEntry("sportage", Uint64.valueOf(20000));
             final YangInstanceIdentifier soulPath = CarsModel.newCarPath("soul");
-            final MapEntryNode soulNode = CarsModel.newCarEntry("soul", BigInteger.valueOf(20000));
+            final MapEntryNode soulNode = CarsModel.newCarEntry("soul", Uint64.valueOf(20000));
             writeTx.write(CarsModel.BASE_PATH, CarsModel.newCarsNode(CarsModel.newCarsMapNode(sportageNode,soulNode)));
             kit.doCommit(writeTx.ready());
 
             candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
 
             assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.WRITE,
-                    Optional.of(sportageNode), Optional.absent());
+                    sportageNode, null);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.WRITE,
-                    Optional.of(soulNode), Optional.absent());
+                    soulNode, null);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, optimaPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, optimaPath), ModificationType.DELETE,
-                    Optional.absent(), Optional.of(optimaNode));
+                    null, optimaNode);
 
             // Delete the cars container - cohort should be invoked for the 2 deleted car entries.
 
             reset(cohort);
             doReturn(PostCanCommitStep.NOOP_SUCCESSFUL_FUTURE).when(cohort).canCommit(any(Object.class),
-                    any(SchemaContext.class), any(Collection.class));
+                    any(EffectiveModelContext.class), anyCollection());
 
             writeTx = dataStore.newWriteOnlyTransaction();
             writeTx.delete(CarsModel.BASE_PATH);
             kit.doCommit(writeTx.ready());
 
             candidateCapture = ArgumentCaptor.forClass(Collection.class);
-            verify(cohort).canCommit(any(Object.class), any(SchemaContext.class), candidateCapture.capture());
+            verify(cohort).canCommit(any(Object.class), any(EffectiveModelContext.class), candidateCapture.capture());
 
             assertDataTreeCandidate(findCandidate(candidateCapture, sportagePath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, sportagePath), ModificationType.DELETE,
-                    Optional.absent(), Optional.of(sportageNode));
+                    null, sportageNode);
 
             assertDataTreeCandidate(findCandidate(candidateCapture, soulPath), new DOMDataTreeIdentifier(
                     LogicalDatastoreType.CONFIGURATION, soulPath), ModificationType.DELETE,
-                    Optional.absent(), Optional.of(soulNode));
+                    null, soulNode);
 
         }
     }
@@ -276,29 +273,28 @@ public class DataTreeCohortIntegrationTest {
      * DataTreeCandidate) and since currently preCommit is a noop in the Shard backend (it is combined with commit),
      * we can't actually test abort after canCommit.
      */
-    @SuppressWarnings("unchecked")
     @Test
     @Ignore
     public void testAbortAfterCanCommit() throws Exception {
-        final DOMDataTreeCommitCohort cohortToAbort = mock(DOMDataTreeCommitCohort.class);
-        final PostCanCommitStep stepToAbort = mock(PostCanCommitStep.class);
+        final var cohortToAbort = mock(DOMDataTreeCommitCohort.class);
+        final var stepToAbort = mock(PostCanCommitStep.class);
         doReturn(ThreePhaseCommitStep.NOOP_ABORT_FUTURE).when(stepToAbort).abort();
         doReturn(PostPreCommitStep.NOOP_FUTURE).when(stepToAbort).preCommit();
         doReturn(FluentFutures.immediateFluentFuture(stepToAbort)).when(cohortToAbort).canCommit(any(Object.class),
-                any(SchemaContext.class), any(Collection.class));
+                any(EffectiveModelContext.class), anyCollection());
 
-        IntegrationTestKit kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        try (AbstractDataStore dataStore = kit.setupAbstractDataStore(
-                DistributedDataStore.class, "testAbortAfterCanCommit", "test-1", "cars-1")) {
+        var kit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
+        try (var dataStore = kit.setupDataStore(ClientBackedDataStore.class, "testAbortAfterCanCommit",
+                "test-1", "cars-1")) {
             dataStore.registerCommitCohort(TEST_ID, cohortToAbort);
 
             IntegrationTestKit.verifyShardState(dataStore, "test-1",
                 state -> assertEquals("Cohort registrations", 1, state.getCommitCohortActors().size()));
 
-            DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
+            var writeTx = dataStore.newWriteOnlyTransaction();
             writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
             writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
-            DOMStoreThreePhaseCommitCohort dsCohort = writeTx.ready();
+            var dsCohort = writeTx.ready();
 
             dsCohort.canCommit().get(5, TimeUnit.SECONDS);
             dsCohort.preCommit().get(5, TimeUnit.SECONDS);
@@ -309,20 +305,11 @@ public class DataTreeCohortIntegrationTest {
 
     private static void assertDataTreeCandidate(final DOMDataTreeCandidate candidate,
             final DOMDataTreeIdentifier expTreeId, final ModificationType expType,
-            final Optional<NormalizedNode<?, ?>> expDataAfter, final Optional<NormalizedNode<?, ?>> expDataBefore) {
+            final NormalizedNode expDataAfter, final NormalizedNode expDataBefore) {
         assertNotNull("Expected candidate for path " + expTreeId.getRootIdentifier(), candidate);
         assertEquals("rootPath", expTreeId, candidate.getRootPath());
-        assertEquals("modificationType", expType, candidate.getRootNode().getModificationType());
-
-        assertEquals("dataAfter present", expDataAfter.isPresent(), candidate.getRootNode().getDataAfter().isPresent());
-        if (expDataAfter.isPresent()) {
-            assertEquals("dataAfter", expDataAfter.get(), candidate.getRootNode().getDataAfter().get());
-        }
-
-        assertEquals("dataBefore present", expDataBefore.isPresent(),
-                candidate.getRootNode().getDataBefore().isPresent());
-        if (expDataBefore.isPresent()) {
-            assertEquals("dataBefore", expDataBefore.get(), candidate.getRootNode().getDataBefore().get());
-        }
+        assertEquals("modificationType", expType, candidate.getRootNode().modificationType());
+        assertEquals("dataAfter", expDataAfter, candidate.getRootNode().dataAfter());
+        assertEquals("dataBefore", expDataBefore, candidate.getRootNode().dataBefore());
     }
 }
index 38a89d0c75fa7fe0feb56302e5443c4a3bd814ba..244df9c0bc0973d541e7cf0baf742d2b111df019 100644 (file)
@@ -82,14 +82,13 @@ public class DatastoreContextContextPropertiesUpdaterTest {
         updater.close();
     }
 
-    private Object resolveField(final String name, final Object obj) throws Exception {
+    private static Object resolveField(final String name, final Object obj) throws Exception {
         final Field currProps = obj.getClass().getDeclaredField(name);
         currProps.setAccessible(true);
         return currProps.get(obj);
     }
 
-    private class DummyListenerImpl implements Listener {
-
+    private static final class DummyListenerImpl implements Listener {
         private DatastoreContextFactory contextFactory;
 
         @Override
index 14866d447a785cef63bf7f97c6195db59a5bea86..e271c98c63355f7e70ed752ef3631a3e879aeee0 100644 (file)
@@ -10,56 +10,33 @@ package org.opendaylight.controller.cluster.datastore;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_MS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL;
 
-import java.util.Arrays;
 import java.util.HashMap;
-import java.util.Hashtable;
 import java.util.Map;
 import org.junit.Test;
-import org.opendaylight.mdsal.binding.generator.impl.GeneratedClassLoadingStrategy;
-import org.opendaylight.mdsal.binding.generator.impl.ModuleInfoBackedContext;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev140612.DataStorePropertiesContainer;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.mdsal.binding.dom.codec.impl.BindingCodecContext;
+import org.opendaylight.mdsal.binding.runtime.spi.BindingRuntimeHelpers;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStorePropertiesContainer;
 
 /**
  * Unit tests for DatastoreContextIntrospector.
  *
  * @author Thomas Pantelis
  */
-@SuppressWarnings("checkstyle:IllegalCatch")
 public class DatastoreContextIntrospectorTest {
-
-    static SchemaContext SCHEMA_CONTEXT;
-    static DatastoreContextIntrospectorFactory INTROSPECTOR_FACTORY;
-
-    static {
-        final ModuleInfoBackedContext moduleContext = ModuleInfoBackedContext.create();
-        try {
-            moduleContext.addModuleInfos(Arrays.asList(
-                    BindingReflections.getModuleInfo(DataStorePropertiesContainer.class)));
-        } catch (Exception e) {
-            throw new RuntimeException(e);
-        }
-        SCHEMA_CONTEXT = moduleContext.tryToCreateSchemaContext().get();
-
-        DOMSchemaService mockSchemaService = mock(DOMSchemaService.class);
-        doReturn(SCHEMA_CONTEXT).when(mockSchemaService).getGlobalContext();
-        INTROSPECTOR_FACTORY = new DatastoreContextIntrospectorFactory(mockSchemaService,
-                GeneratedClassLoadingStrategy.getTCCLClassLoadingStrategy());
-    }
+    static final AbstractDatastoreContextIntrospectorFactory INTROSPECTOR_FACTORY =
+            new DefaultDatastoreContextIntrospectorFactory(new BindingCodecContext(
+                BindingRuntimeHelpers.createRuntimeContext(DataStorePropertiesContainer.class)));
 
     @Test
     public void testYangDefaults() {
@@ -86,17 +63,18 @@ public class DatastoreContextIntrospectorTest {
         properties.put("shard-transaction-commit-queue-capacity", "567");
         properties.put("shard-initialization-timeout-in-seconds", "82");
         properties.put("shard-leader-election-timeout-in-seconds", "66");
+        properties.put("initial-settle-timeout-multiplier", "5");
+        properties.put("recovery-snapshot-interval-seconds", "360");
         properties.put("shard-isolated-leader-check-interval-in-millis", "123");
         properties.put("shard-snapshot-data-threshold-percentage", "100");
+        properties.put("shard-snapshot-data-threshold", "800");
         properties.put("shard-election-timeout-factor", "21");
         properties.put("shard-batched-modification-count", "901");
         properties.put("transactionCreationInitialRateLimit", "200");
-        properties.put("MaxShardDataChangeExecutorPoolSize", "41");
-        properties.put("Max-Shard-Data-Change Executor-Queue Size", "1111");
-        properties.put(" max shard data change listener queue size", "2222");
-        properties.put("mAx-shaRd-data-STORE-executor-quEUe-size", "3333");
         properties.put("persistent", "false");
         properties.put("initial-payload-serialized-buffer-capacity", "600");
+        properties.put("export-on-recovery", "json");
+        properties.put("recovery-json-dump", "persistence-export");
 
         boolean updated = introspector.update(properties);
         assertTrue("updated", updated);
@@ -111,22 +89,24 @@ public class DatastoreContextIntrospectorTest {
         assertEquals(567, context.getShardTransactionCommitQueueCapacity());
         assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
         assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+        assertEquals(5, context.getInitialSettleTimeoutMultiplier());
+        assertEquals(360, context.getShardRaftConfig().getRecoverySnapshotIntervalSeconds());
         assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
         assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+        assertEquals(800, context.getShardRaftConfig().getSnapshotDataThreshold());
         assertEquals(21, context.getShardRaftConfig().getElectionTimeoutFactor());
         assertEquals(901, context.getShardBatchedModificationCount());
         assertEquals(200, context.getTransactionCreationInitialRateLimit());
-        assertEquals(41, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
-        assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
-        assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
-        assertEquals(3333, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
         assertEquals(600, context.getInitialPayloadSerializedBufferCapacity());
+        assertEquals("persistence-export", context.getRecoveryExportBaseDir());
+        assertEquals(ExportOnRecovery.Json, context.getExportOnRecovery());
         assertFalse(context.isPersistent());
 
         properties.put("shard-transaction-idle-timeout-in-minutes", "32");
         properties.put("operation-timeout-in-seconds", "27");
         properties.put("shard-heartbeat-interval-in-millis", "102");
         properties.put("shard-election-timeout-factor", "22");
+        properties.put("initial-settle-timeout-multiplier", "6");
         properties.put("max-shard-data-change-executor-pool-size", "42");
         properties.put("max-shard-data-store-executor-queue-size", "4444");
         properties.put("persistent", "true");
@@ -144,20 +124,18 @@ public class DatastoreContextIntrospectorTest {
         assertEquals(567, context.getShardTransactionCommitQueueCapacity());
         assertEquals(82, context.getShardInitializationTimeout().duration().toSeconds());
         assertEquals(66, context.getShardLeaderElectionTimeout().duration().toSeconds());
+        assertEquals(6, context.getInitialSettleTimeoutMultiplier());
         assertEquals(123, context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
         assertEquals(100, context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+        assertEquals(800, context.getShardRaftConfig().getSnapshotDataThreshold());
         assertEquals(22, context.getShardRaftConfig().getElectionTimeoutFactor());
         assertEquals(200, context.getTransactionCreationInitialRateLimit());
-        assertEquals(42, context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
-        assertEquals(1111, context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
-        assertEquals(2222, context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
-        assertEquals(4444, context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
         assertTrue(context.isPersistent());
 
         updated = introspector.update(null);
         assertFalse("updated", updated);
 
-        updated = introspector.update(new Hashtable<>());
+        updated = introspector.update(new HashMap<>());
         assertFalse("updated", updated);
     }
 
@@ -175,8 +153,8 @@ public class DatastoreContextIntrospectorTest {
         properties.put("shard-heartbeat-interval-in-millis", "99"); // bad - must be >= 100
         properties.put("shard-transaction-commit-queue-capacity", "567"); // good
         properties.put("shard-snapshot-data-threshold-percentage", "101"); // bad - must be 0-100
+        properties.put("shard-snapshot-data-threshold", "-1"); // bad - must be > 0
         properties.put("shard-initialization-timeout-in-seconds", "-1"); // bad - must be > 0
-        properties.put("max-shard-data-change-executor-pool-size", "bogus"); // bad - NaN
         properties.put("unknownProperty", "1"); // bad - invalid property name
 
         final boolean updated = introspector.update(properties);
@@ -193,9 +171,8 @@ public class DatastoreContextIntrospectorTest {
         assertEquals(567, context.getShardTransactionCommitQueueCapacity());
         assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
                 context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+        assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD, context.getShardRaftConfig().getSnapshotDataThreshold());
         assertEquals(DEFAULT_SHARD_INITIALIZATION_TIMEOUT, context.getShardInitializationTimeout());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
-                context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
     }
 
     @Test
@@ -219,7 +196,6 @@ public class DatastoreContextIntrospectorTest {
 
         assertEquals(33, operContext.getShardTransactionIdleTimeout().toMinutes());
         assertTrue(operContext.isPersistent());
-        assertEquals(333, operContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
 
         final DatastoreContextIntrospector configIntrospector = INTROSPECTOR_FACTORY.newInstance(CONFIGURATION);
         updated = configIntrospector.update(properties);
@@ -228,7 +204,6 @@ public class DatastoreContextIntrospectorTest {
 
         assertEquals(44, configContext.getShardTransactionIdleTimeout().toMinutes());
         assertFalse(configContext.isPersistent());
-        assertEquals(444, configContext.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
     }
 
     @Test
@@ -238,6 +213,7 @@ public class DatastoreContextIntrospectorTest {
         properties.put("operational.shard-transaction-idle-timeout-in-minutes", "33"); // operational override
         properties.put("config.shard-transaction-idle-timeout-in-minutes", "44"); // config override
         properties.put("topology.shard-transaction-idle-timeout-in-minutes", "55"); // global shard override
+        properties.put("config.topology.persistent", "false"); // per-shard config override
 
         final DatastoreContextIntrospector operIntrospector = INTROSPECTOR_FACTORY.newInstance(OPERATIONAL);
 
@@ -258,6 +234,7 @@ public class DatastoreContextIntrospectorTest {
 
         shardContext = configIntrospector.newContextFactory().getShardDatastoreContext("topology");
         assertEquals(55, shardContext.getShardTransactionIdleTimeout().toMinutes());
+        assertFalse(shardContext.isPersistent());
 
         // operational shard override
         properties.put("operational.topology.shard-transaction-idle-timeout-in-minutes", "66");
@@ -274,5 +251,6 @@ public class DatastoreContextIntrospectorTest {
 
         shardContext = configIntrospector.newContextFactory().getShardDatastoreContext("default");
         assertEquals(44, shardContext.getShardTransactionIdleTimeout().toMinutes());
+        assertTrue(shardContext.isPersistent());
     }
 }
index 6bd8f367fab185da5ee3b6231992c316eaafe79b..611da694c3279812471fbe2c4b44207312d374aa 100644 (file)
@@ -11,15 +11,19 @@ import static org.junit.Assert.assertEquals;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_CONFIGURATION_READER;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_MAX_MESSAGE_SLICE_SIZE;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_OPERATION_TIMEOUT_IN_MS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_PERSISTENT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_RECOVERY_EXPORT_BASE_DIR;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_INITIALIZATION_TIMEOUT;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT;
+import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
 import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY;
@@ -30,7 +34,7 @@ import static org.opendaylight.controller.cluster.datastore.DatastoreContext.DEF
 import java.util.concurrent.TimeUnit;
 import org.junit.Assert;
 import org.junit.Test;
-import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
 
 public class DatastoreContextTest {
 
@@ -51,25 +55,22 @@ public class DatastoreContextTest {
                 context.getShardInitializationTimeout().duration().toMillis());
         assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis(),
                 context.getShardLeaderElectionTimeout().duration().toMillis());
+        assertEquals(DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER,
+                context.getInitialSettleTimeoutMultiplier());
         assertEquals(DEFAULT_PERSISTENT, context.isPersistent());
         assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
         assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS,
                 context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
         assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE,
                 context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+        assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD,
+                context.getShardRaftConfig().getSnapshotDataThreshold());
         assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR, context.getShardRaftConfig().getElectionTimeoutFactor());
         assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT, context.getTransactionCreationInitialRateLimit());
         assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT,
                 context.getShardBatchedModificationCount());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
-                context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE,
-                context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE,
-                context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE,
-                context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
         assertEquals(DEFAULT_MAX_MESSAGE_SLICE_SIZE, context.getMaximumMessageSliceSize());
+        assertEquals(DEFAULT_RECOVERY_EXPORT_BASE_DIR, context.getRecoveryExportBaseDir());
     }
 
     @Test
@@ -82,6 +83,7 @@ public class DatastoreContextTest {
         builder.shardTransactionCommitTimeoutInSeconds(DEFAULT_SHARD_TX_COMMIT_TIMEOUT_IN_SECONDS + 1);
         builder.shardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1);
         builder.shardSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT + 1);
+        builder.recoverySnapshotIntervalSeconds(DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS + 1);
         builder.shardHeartbeatIntervalInMillis(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1);
         builder.shardTransactionCommitQueueCapacity(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1);
         builder.shardInitializationTimeout(DEFAULT_SHARD_INITIALIZATION_TIMEOUT
@@ -90,22 +92,18 @@ public class DatastoreContextTest {
                 TimeUnit.MILLISECONDS);
         builder.shardLeaderElectionTimeout(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
                 TimeUnit.MILLISECONDS);
+        builder.initialSettleTimeoutMultiplier(DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER + 1);
         builder.persistent(!DEFAULT_PERSISTENT);
         builder.shardIsolatedLeaderCheckIntervalInMillis(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1);
         builder.shardSnapshotDataThresholdPercentage(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1);
+        builder.shardSnapshotDataThreshold(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD + 1);
         builder.shardElectionTimeoutFactor(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1);
         builder.transactionCreationInitialRateLimit(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1);
         builder.shardBatchedModificationCount(DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1);
-        builder.maxShardDataChangeExecutorPoolSize(
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1);
-        builder.maxShardDataChangeExecutorQueueSize(
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1);
-        builder.maxShardDataChangeListenerQueueSize(
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1);
-        builder.maxShardDataStoreExecutorQueueSize(
-                InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1);
         builder.maximumMessageSliceSize(DEFAULT_MAX_MESSAGE_SLICE_SIZE + 1);
         builder.initialPayloadSerializedBufferCapacity(DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY + 1);
+        builder.exportOnRecovery(ExportOnRecovery.Json);
+        builder.recoveryExportBaseDir(DEFAULT_RECOVERY_EXPORT_BASE_DIR + "-new");
 
         DatastoreContext context = builder.build();
 
@@ -120,7 +118,7 @@ public class DatastoreContextTest {
         Assert.assertNotSame(context, newContext);
     }
 
-    private static void verifyCustomSettings(DatastoreContext context) {
+    private static void verifyCustomSettings(final DatastoreContext context) {
         assertEquals(DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT.toMillis() + 1,
                 context.getShardTransactionIdleTimeout().toMillis());
         assertEquals(TimeUnit.MILLISECONDS.toSeconds(DEFAULT_OPERATION_TIMEOUT_IN_MS) + 1,
@@ -130,6 +128,8 @@ public class DatastoreContextTest {
         assertEquals(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE + 1,
                 context.getShardRaftConfig().getJournalRecoveryLogBatchSize());
         assertEquals(DEFAULT_SNAPSHOT_BATCH_COUNT + 1, context.getShardRaftConfig().getSnapshotBatchCount());
+        assertEquals(DEFAULT_RECOVERY_SNAPSHOT_INTERVAL_SECONDS + 1,
+                context.getShardRaftConfig().getRecoverySnapshotIntervalSeconds());
         assertEquals(DEFAULT_HEARTBEAT_INTERVAL_IN_MILLIS + 1,
                 context.getShardRaftConfig().getHeartBeatInterval().length());
         assertEquals(DEFAULT_SHARD_TX_COMMIT_QUEUE_CAPACITY + 1, context.getShardTransactionCommitQueueCapacity());
@@ -137,27 +137,26 @@ public class DatastoreContextTest {
                 context.getShardInitializationTimeout().duration().toMillis());
         assertEquals(DEFAULT_SHARD_LEADER_ELECTION_TIMEOUT.duration().toMillis() + 1,
                 context.getShardLeaderElectionTimeout().duration().toMillis());
+        assertEquals(DEFAULT_INITIAL_SETTLE_TIMEOUT_MULTIPLIER + 1,
+                context.getInitialSettleTimeoutMultiplier());
         assertEquals(!DEFAULT_PERSISTENT, context.isPersistent());
         assertEquals(DEFAULT_CONFIGURATION_READER, context.getConfigurationReader());
         assertEquals(DEFAULT_ISOLATED_LEADER_CHECK_INTERVAL_IN_MILLIS + 1,
                 context.getShardRaftConfig().getIsolatedCheckIntervalInMillis());
         assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD_PERCENTAGE + 1,
                 context.getShardRaftConfig().getSnapshotDataThresholdPercentage());
+        assertEquals(DEFAULT_SHARD_SNAPSHOT_DATA_THRESHOLD + 1,
+                context.getShardRaftConfig().getSnapshotDataThreshold());
         assertEquals(DEFAULT_SHARD_ELECTION_TIMEOUT_FACTOR + 1,
                 context.getShardRaftConfig().getElectionTimeoutFactor());
         assertEquals(DEFAULT_TX_CREATION_INITIAL_RATE_LIMIT + 1, context.getTransactionCreationInitialRateLimit());
         assertEquals(DatastoreContext.DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT + 1,
                 context.getShardBatchedModificationCount());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE + 1,
-                context.getDataStoreProperties().getMaxDataChangeExecutorPoolSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE + 1,
-                context.getDataStoreProperties().getMaxDataChangeExecutorQueueSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE + 1,
-                context.getDataStoreProperties().getMaxDataChangeListenerQueueSize());
-        assertEquals(InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE + 1,
-                context.getDataStoreProperties().getMaxDataStoreExecutorQueueSize());
         assertEquals(DEFAULT_MAX_MESSAGE_SLICE_SIZE + 1, context.getMaximumMessageSliceSize());
         assertEquals(DEFAULT_INITIAL_PAYLOAD_SERIALIZED_BUFFER_CAPACITY + 1,
                 context.getInitialPayloadSerializedBufferCapacity());
+        assertEquals(DEFAULT_RECOVERY_EXPORT_BASE_DIR + "-new",
+                context.getRecoveryExportBaseDir());
+        assertEquals(ExportOnRecovery.Json, context.getExportOnRecovery());
     }
 }
index a35a901c6f6bfd4c2a71c9035847e3baeee15a17..9b13193b15da96b2633ec71a3cff8838780cf116 100644 (file)
@@ -15,9 +15,9 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.FileOutputStream;
-import java.math.BigInteger;
 import java.util.Arrays;
 import java.util.Collections;
+import java.util.Optional;
 import org.apache.commons.lang3.SerializationUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -32,12 +32,13 @@ import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 /**
  * Unit tests for DatastoreSnapshotRestore.
@@ -63,8 +64,7 @@ public class DatastoreSnapshotRestoreTest {
                 newShardManagerSnapshot("config-one", "config-two"),
                 Arrays.asList(new DatastoreSnapshot.ShardSnapshot("config-one", newSnapshot(CarsModel.BASE_PATH,
                         CarsModel.newCarsNode(CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima",
-                            BigInteger.valueOf(20000L)),CarsModel.newCarEntry("sportage",
-                                BigInteger.valueOf(30000L)))))),
+                            Uint64.valueOf(20000)),CarsModel.newCarEntry("sportage", Uint64.valueOf(30000)))))),
                         new DatastoreSnapshot.ShardSnapshot("config-two", newSnapshot(PeopleModel.BASE_PATH,
                             PeopleModel.emptyContainer()))));
 
@@ -78,18 +78,15 @@ public class DatastoreSnapshotRestoreTest {
             SerializationUtils.serialize(snapshotList, fos);
         }
 
-        DatastoreSnapshotRestore instance = DatastoreSnapshotRestore.instance(restoreDirectoryPath);
+        DefaultDatastoreSnapshotRestore instance = new DefaultDatastoreSnapshotRestore(restoreDirectoryPath);
+        instance.activate();
 
-        assertDatastoreSnapshotEquals(configSnapshot, instance.getAndRemove("config"));
-        assertDatastoreSnapshotEquals(operSnapshot, instance.getAndRemove("oper"));
+        assertDatastoreSnapshotEquals(configSnapshot, instance.getAndRemove("config").orElse(null));
+        assertDatastoreSnapshotEquals(operSnapshot, instance.getAndRemove("oper").orElse(null));
 
-        assertNull("DatastoreSnapshot was not removed", instance.getAndRemove("config"));
+        assertEquals("DatastoreSnapshot was not removed", Optional.empty(), instance.getAndRemove("config"));
 
         assertFalse(backupFile + " was not deleted", backupFile.exists());
-
-        instance = DatastoreSnapshotRestore.instance(restoreDirectoryPath);
-        assertNull("Expected null DatastoreSnapshot", instance.getAndRemove("config"));
-        assertNull("Expected null DatastoreSnapshot", instance.getAndRemove("oper"));
     }
 
     private static void assertDatastoreSnapshotEquals(final DatastoreSnapshot expected,
@@ -126,15 +123,14 @@ public class DatastoreSnapshotRestoreTest {
     }
 
     private static ShardManagerSnapshot newShardManagerSnapshot(final String... shards) {
-        return new ShardManagerSnapshot(Arrays.asList(shards), Collections.emptyMap());
+        return new ShardManagerSnapshot(Arrays.asList(shards));
     }
 
-    private static Snapshot newSnapshot(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
-            throws Exception {
+    private static Snapshot newSnapshot(final YangInstanceIdentifier path, final NormalizedNode node) throws Exception {
         DataTree dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
             SchemaContextHelper.full());
         AbstractShardTest.writeToStore(dataTree, path, node);
-        NormalizedNode<?, ?> root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.EMPTY);
+        NormalizedNode root = AbstractShardTest.readStore(dataTree, YangInstanceIdentifier.of());
 
         return Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(root)),
                 Collections.<ReplicatedLogEntry>emptyList(), 2, 1, 2, 1, 1, "member-1", null);
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DebugThreePhaseCommitCohortTest.java
deleted file mode 100644 (file)
index 16dc540..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertSame;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.same;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.slf4j.Logger;
-import scala.concurrent.Future;
-
-/**
- * Unit tests for DebugThreePhaseCommitCohort.
- *
- * @author Thomas Pantelis
- */
-public class DebugThreePhaseCommitCohortTest {
-    private final TransactionIdentifier transactionId = MockIdentifiers.transactionIdentifier(
-        DebugThreePhaseCommitCohortTest.class, "mock");
-
-    @Test
-    public void test() {
-        AbstractThreePhaseCommitCohort<?> mockDelegate = mock(AbstractThreePhaseCommitCohort.class);
-        Exception failure = new Exception("mock failure");
-        ListenableFuture<Object> expFailedFuture = Futures.immediateFailedFuture(failure);
-        doReturn(expFailedFuture).when(mockDelegate).canCommit();
-        doReturn(expFailedFuture).when(mockDelegate).preCommit();
-        doReturn(expFailedFuture).when(mockDelegate).commit();
-
-        ListenableFuture<Object> expAbortFuture = Futures.immediateFuture(null);
-        doReturn(expAbortFuture).when(mockDelegate).abort();
-
-        List<Future<Object>> expCohortFutures = new ArrayList<>();
-        doReturn(expCohortFutures).when(mockDelegate).getCohortFutures();
-
-        Throwable debugContext = new RuntimeException("mock");
-        DebugThreePhaseCommitCohort cohort = new DebugThreePhaseCommitCohort(transactionId, mockDelegate, debugContext);
-
-        Logger mockLogger = mock(Logger.class);
-        cohort.setLogger(mockLogger);
-
-        assertSame("canCommit", expFailedFuture, cohort.canCommit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        reset(mockLogger);
-        assertSame("preCommit", expFailedFuture, cohort.preCommit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        reset(mockLogger);
-        assertSame("commit", expFailedFuture, cohort.commit());
-        verify(mockLogger).warn(anyString(), same(transactionId), same(failure), same(debugContext));
-
-        assertSame("abort", expAbortFuture, cohort.abort());
-
-        assertSame("getCohortFutures", expCohortFutures, cohort.getCohortFutures());
-
-        reset(mockLogger);
-        ListenableFuture<Boolean> expSuccessFuture = Futures.immediateFuture(Boolean.TRUE);
-        doReturn(expSuccessFuture).when(mockDelegate).canCommit();
-
-        assertSame("canCommit", expSuccessFuture, cohort.canCommit());
-        verify(mockLogger, never()).warn(anyString(), any(TransactionIdentifier.class), any(Throwable.class),
-                any(Throwable.class));
-    }
-}
index d984535db811a1e5ad066c62a80bee5e5617dad3..6815b2d367a96a7bcdba3b94ca927e133d7b9cd6 100644 (file)
@@ -35,7 +35,7 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
-import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
+import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
@@ -55,7 +55,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { DistributedDataStore.class }, { ClientBackedDataStore.class }
+            { TestClientBackedDataStore.class }
         });
     }
 
@@ -87,9 +87,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
         final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
         InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the write Tx
             final DOMStoreWriteTransaction writeTx = writeOnly ? dataStore.newWriteOnlyTransaction()
                     : dataStore.newReadWriteTransaction();
@@ -147,7 +145,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
             // Verify the data in the store
             final DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
 
-            Optional<NormalizedNode<?, ?>> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
+            Optional<NormalizedNode> optional = readTx.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
             assertTrue("isPresent", optional.isPresent());
 
             optional = readTx.read(TestModel.OUTER_LIST_PATH).get(5, TimeUnit.SECONDS);
@@ -183,16 +181,14 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
         final CountDownLatch blockRecoveryLatch = new CountDownLatch(1);
         InMemoryJournal.addBlockReadMessagesLatch(persistentID, blockRecoveryLatch);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-            testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
 
             // Do some reads on the Tx on a separate thread.
             final AtomicReference<FluentFuture<Boolean>> txExistsFuture = new AtomicReference<>();
-            final AtomicReference<FluentFuture<Optional<NormalizedNode<?, ?>>>> txReadFuture = new AtomicReference<>();
+            final AtomicReference<FluentFuture<Optional<NormalizedNode>>> txReadFuture = new AtomicReference<>();
             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
             final CountDownLatch txReadsDone = new CountDownLatch(1);
             final Thread txThread = new Thread(() -> {
@@ -251,7 +247,7 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
 
         InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
 
-        final AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName);
+        final var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName);
 
         // Create the write Tx
         final DOMStoreWriteTransaction writeTx = dataStore.newWriteOnlyTransaction();
@@ -317,14 +313,13 @@ public class DistributedDataStoreIntegrationTest extends AbstractDistributedData
 
         InMemoryJournal.addEntry(persistentID, 1, "Dummy data so akka will read from persistence");
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(testParameter, testName, false, shardName)) {
-
+        try (var dataStore = testKit.setupDataStore(testParameter, testName, false, shardName)) {
             // Create the read-write Tx
             final DOMStoreReadWriteTransaction readWriteTx = dataStore.newReadWriteTransaction();
             assertNotNull("newReadWriteTransaction returned null", readWriteTx);
 
             // Do a read on the Tx on a separate thread.
-            final AtomicReference<FluentFuture<Optional<NormalizedNode<?, ?>>>> txReadFuture = new AtomicReference<>();
+            final AtomicReference<FluentFuture<Optional<NormalizedNode>>> txReadFuture = new AtomicReference<>();
             final AtomicReference<Exception> caughtEx = new AtomicReference<>();
             final CountDownLatch txReadDone = new CountDownLatch(1);
             final Thread txThread = new Thread(() -> {
index 3943e7ee563325f2fb371182ef6d18413b1dd7f9..91c00f7eb153c04a6fc64f6782a7349dab80e2f3 100644 (file)
@@ -7,13 +7,21 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.awaitility.Awaitility.await;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.verify;
 
@@ -23,44 +31,48 @@ import akka.actor.ActorSystem;
 import akka.actor.Address;
 import akka.actor.AddressFromURIString;
 import akka.cluster.Cluster;
+import akka.cluster.Member;
 import akka.dispatch.Futures;
 import akka.pattern.Patterns;
 import akka.testkit.javadsl.TestKit;
 import com.google.common.base.Stopwatch;
-import com.google.common.base.Supplier;
 import com.google.common.base.Throwables;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.Uninterruptibles;
 import com.typesafe.config.ConfigFactory;
-import java.math.BigInteger;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import org.junit.After;
-import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
 import org.junit.runners.Parameterized.Parameters;
-import org.mockito.Mockito;
 import org.mockito.stubbing.Answer;
 import org.opendaylight.controller.cluster.access.client.RequestTimeoutException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.databroker.ConcurrentDOMDataBroker;
+import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.ShardLeaderNotRespondingException;
+import org.opendaylight.controller.cluster.datastore.TestShard.RequestFrontendMetadata;
+import org.opendaylight.controller.cluster.datastore.TestShard.StartDropMessages;
+import org.opendaylight.controller.cluster.datastore.TestShard.StopDropMessages;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.GetShardDataTree;
@@ -68,10 +80,17 @@ import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransact
 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.cluster.datastore.persisted.FrontendClientMetadata;
+import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
 import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
+import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
@@ -82,27 +101,31 @@ import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
+import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
 import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
 import org.opendaylight.mdsal.dom.spi.store.DOMStore;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.collection.Set;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -118,12 +141,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { DistributedDataStore.class, 7}, { ClientBackedDataStore.class, 12 }
+                { TestClientBackedDataStore.class, 12 }
         });
     }
 
     @Parameter(0)
-    public Class<? extends AbstractDataStore> testParameter;
+    public Class<? extends ClientBackedDataStore> testParameter;
     @Parameter(1)
     public int commitTimeout;
 
@@ -153,8 +176,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     private final TransactionIdentifier tx1 = nextTransactionId();
     private final TransactionIdentifier tx2 = nextTransactionId();
 
-    private AbstractDataStore followerDistributedDataStore;
-    private AbstractDataStore leaderDistributedDataStore;
+    private ClientBackedDataStore followerDistributedDataStore;
+    private ClientBackedDataStore leaderDistributedDataStore;
     private IntegrationTestKit followerTestKit;
     private IntegrationTestKit leaderTestKit;
 
@@ -176,15 +199,15 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     @After
     public void tearDown() {
         if (followerDistributedDataStore != null) {
-            leaderDistributedDataStore.close();
+            followerDistributedDataStore.close();
         }
         if (leaderDistributedDataStore != null) {
             leaderDistributedDataStore.close();
         }
 
-        TestKit.shutdownActorSystem(leaderSystem);
-        TestKit.shutdownActorSystem(followerSystem);
-        TestKit.shutdownActorSystem(follower2System);
+        TestKit.shutdownActorSystem(leaderSystem, true);
+        TestKit.shutdownActorSystem(followerSystem, true);
+        TestKit.shutdownActorSystem(follower2System,true);
 
         InMemoryJournal.clear();
         InMemorySnapshotStore.clear();
@@ -200,13 +223,20 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards)
             throws Exception {
-        leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder, commitTimeout);
+        initDatastores(type, moduleShardsConfig, shards, leaderDatastoreContextBuilder,
+                followerDatastoreContextBuilder);
+    }
 
-        leaderDistributedDataStore = leaderTestKit.setupAbstractDataStore(
-                testParameter, type, moduleShardsConfig, false, shards);
+    private void initDatastores(final String type, final String moduleShardsConfig, final String[] shards,
+            final DatastoreContext.Builder leaderBuilder, final DatastoreContext.Builder followerBuilder)
+                    throws Exception {
+        leaderTestKit = new IntegrationTestKit(leaderSystem, leaderBuilder, commitTimeout);
 
-        followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder, commitTimeout);
-        followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
+        leaderDistributedDataStore = leaderTestKit.setupDataStore(testParameter, type, moduleShardsConfig, false,
+            shards);
+
+        followerTestKit = new IntegrationTestKit(followerSystem, followerBuilder, commitTimeout);
+        followerDistributedDataStore = followerTestKit.setupDataStore(
                 testParameter, type, moduleShardsConfig, false, shards);
 
         leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(), shards);
@@ -217,29 +247,19 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     private static void verifyCars(final DOMStoreReadTransaction readTx, final MapEntryNode... entries)
             throws Exception {
-        final Optional<NormalizedNode<?, ?>> optional = readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-
-        final CollectionNodeBuilder<MapEntryNode, MapNode> listBuilder = ImmutableNodes.mapNodeBuilder(
-                CarsModel.CAR_QNAME);
-        for (final NormalizedNode<?, ?> entry: entries) {
-            listBuilder.withChild((MapEntryNode) entry);
-        }
-
-        assertEquals("Car list node", listBuilder.build(), optional.get());
+        assertEquals("Car list node",
+            Optional.of(ImmutableNodes.mapNodeBuilder(CarsModel.CAR_QNAME).withValue(Arrays.asList(entries)).build()),
+            readTx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS));
     }
 
     private static void verifyNode(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> expNode) throws Exception {
-        final Optional<NormalizedNode<?, ?>> optional = readTx.read(path).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", expNode, optional.get());
+            final NormalizedNode expNode) throws Exception {
+        assertEquals(Optional.of(expNode), readTx.read(path).get(5, TimeUnit.SECONDS));
     }
 
     private static void verifyExists(final DOMStoreReadTransaction readTx, final YangInstanceIdentifier path)
             throws Exception {
-        final Boolean exists = readTx.exists(path).get(5, TimeUnit.SECONDS);
-        assertEquals("exists", Boolean.TRUE, exists);
+        assertEquals("exists", Boolean.TRUE, readTx.exists(path).get(5, TimeUnit.SECONDS));
     }
 
     @Test
@@ -255,11 +275,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
         writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
 
-        final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
         writeTx.merge(car1Path, car1);
 
-        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
+        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
         writeTx.merge(car2Path, car2);
 
@@ -321,13 +341,99 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         final ActorSystem newSystem = newActorSystem("reinstated-member2", "Member2");
 
-        try (AbstractDataStore member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder,
-                commitTimeout)
-                .setupAbstractDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
+        try (var member2Datastore = new IntegrationTestKit(newSystem, leaderDatastoreContextBuilder, commitTimeout)
+                .setupDataStore(testParameter, testName, "module-shards-member2", true, CARS)) {
             verifyCars(member2Datastore.newReadOnlyTransaction(), car2);
         }
     }
 
+    @Test
+    public void testSingleTransactionsWritesInQuickSuccession() throws Exception {
+        initDatastoresWithCars("testSingleTransactionsWritesInQuickSuccession");
+
+        final DOMStoreTransactionChain txChain = followerDistributedDataStore.createTransactionChain();
+
+        DOMStoreWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+        writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
+        writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
+        followerTestKit.doCommit(writeTx.ready());
+
+        int numCars = 5;
+        for (int i = 0; i < numCars; i++) {
+            writeTx = txChain.newWriteOnlyTransaction();
+            writeTx.write(CarsModel.newCarPath("car" + i), CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000)));
+            followerTestKit.doCommit(writeTx.ready());
+
+            try (var tx = txChain.newReadOnlyTransaction()) {
+                tx.read(CarsModel.BASE_PATH).get();
+            }
+        }
+
+        // wait to let the shard catch up with purged
+        await("Range set leak test").atMost(5, TimeUnit.SECONDS)
+            .pollInterval(500, TimeUnit.MILLISECONDS)
+            .untilAsserted(() -> {
+                final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+                final var frontendMetadata =
+                    (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+                    .executeOperation(localShard, new RequestFrontendMetadata());
+
+                assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+            });
+
+        try (var tx = txChain.newReadOnlyTransaction()) {
+            final var body = assertInstanceOf(Collection.class,
+                tx.read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS).orElseThrow().body());
+            assertEquals(numCars, ((Collection<?>) body).size());
+        }
+    }
+
+    private static void assertClientMetadata(final FrontendClientMetadata clientMeta, final long lastPurged) {
+        final var iterator = clientMeta.getCurrentHistories().iterator();
+        var metadata = iterator.next();
+        while (iterator.hasNext() && metadata.getHistoryId() != 1) {
+            metadata = iterator.next();
+        }
+
+        assertEquals(UnsignedLongBitmap.of(), metadata.getClosedTransactions());
+        assertEquals("[[0.." + lastPurged + "]]", metadata.getPurgedTransactions().ranges().toString());
+    }
+
+    @Test
+    public void testCloseTransactionMetadataLeak() throws Exception {
+        initDatastoresWithCars("testCloseTransactionMetadataLeak");
+
+        final var txChain = followerDistributedDataStore.createTransactionChain();
+
+        var writeTx = txChain.newWriteOnlyTransaction();
+        writeTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
+        writeTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
+        followerTestKit.doCommit(writeTx.ready());
+
+        int numCars = 5;
+        for (int i = 0; i < numCars; i++) {
+            try (var tx = txChain.newWriteOnlyTransaction()) {
+                // Empty on purpose
+            }
+
+            try (var tx = txChain.newReadOnlyTransaction()) {
+                tx.read(CarsModel.BASE_PATH).get();
+            }
+        }
+
+        // wait to let the shard catch up with purged
+        await("wait for purges to settle").atMost(5, TimeUnit.SECONDS)
+            .pollInterval(500, TimeUnit.MILLISECONDS)
+            .untilAsserted(() -> {
+                final var localShard = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+                final var frontendMetadata =
+                    (FrontendShardDataTreeSnapshotMetadata) leaderDistributedDataStore.getActorUtils()
+                    .executeOperation(localShard, new RequestFrontendMetadata());
+
+                assertClientMetadata(frontendMetadata.getClients().get(0), numCars * 2);
+            });
+    }
+
     @Test
     public void testReadWriteTransactionWithSingleShard() throws Exception {
         initDatastoresWithCars("testReadWriteTransactionWithSingleShard");
@@ -338,12 +444,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
         rwTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
 
-        final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         rwTx.merge(CarsModel.newCarPath("optima"), car1);
 
         verifyCars(rwTx, car1);
 
-        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
+        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
         final YangInstanceIdentifier car2Path = CarsModel.newCarPath("sportage");
         rwTx.merge(car2Path, car2);
 
@@ -362,11 +468,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         assertNotNull("newWriteOnlyTransaction returned null", writeTx);
 
         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
-        final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
+        final NormalizedNode carsNode = CarsModel.emptyContainer();
         writeTx.write(carsPath, carsNode);
 
         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
-        final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
+        final NormalizedNode peopleNode = PeopleModel.emptyContainer();
         writeTx.write(peoplePath, peopleNode);
 
         followerTestKit.doCommit(writeTx.ready());
@@ -385,11 +491,11 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         assertNotNull("newReadWriteTransaction returned null", rwTx);
 
         final YangInstanceIdentifier carsPath = CarsModel.BASE_PATH;
-        final NormalizedNode<?, ?> carsNode = CarsModel.emptyContainer();
+        final NormalizedNode carsNode = CarsModel.emptyContainer();
         rwTx.write(carsPath, carsNode);
 
         final YangInstanceIdentifier peoplePath = PeopleModel.BASE_PATH;
-        final NormalizedNode<?, ?> peopleNode = PeopleModel.emptyContainer();
+        final NormalizedNode peopleNode = PeopleModel.emptyContainer();
         rwTx.write(peoplePath, peopleNode);
 
         followerTestKit.doCommit(rwTx.ready());
@@ -427,7 +533,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         rwTx.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
 
-        final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         final YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
         rwTx.write(car1Path, car1);
 
@@ -435,7 +541,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         verifyCars(rwTx, car1);
 
-        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(25000));
+        final MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(25000));
         rwTx.merge(CarsModel.newCarPath("sportage"), car2);
 
         rwTx.delete(car1Path);
@@ -468,7 +574,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         final DOMStoreReadWriteTransaction readWriteTx = txChain.newReadWriteTransaction();
 
-        final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         final YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
         readWriteTx.write(carPath, car);
 
@@ -476,13 +582,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final YangInstanceIdentifier personPath = PeopleModel.newPersonPath("jack");
         readWriteTx.merge(personPath, person);
 
-        Optional<NormalizedNode<?, ?>> optional = readWriteTx.read(carPath).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", car, optional.get());
-
-        optional = readWriteTx.read(personPath).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", person, optional.get());
+        assertEquals(Optional.of(car), readWriteTx.read(carPath).get(5, TimeUnit.SECONDS));
+        assertEquals(Optional.of(person), readWriteTx.read(personPath).get(5, TimeUnit.SECONDS));
 
         final DOMStoreThreePhaseCommitCohort cohort2 = readWriteTx.ready();
 
@@ -500,8 +601,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final DOMStoreReadTransaction readTx = followerDistributedDataStore.newReadOnlyTransaction();
         verifyCars(readTx, car);
 
-        optional = readTx.read(personPath).get(5, TimeUnit.SECONDS);
-        assertFalse("isPresent", optional.isPresent());
+        assertEquals(Optional.empty(), readTx.read(personPath).get(5, TimeUnit.SECONDS));
     }
 
     @Test
@@ -513,25 +613,21 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                         LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
                         MoreExecutors.directExecutor());
 
-        final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-        final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+        final var listener = mock(FutureCallback.class);
+        final DOMTransactionChain txChain = broker.createTransactionChain();
+        txChain.addCallback(listener);
 
         final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
-
-        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+            .build());
 
-        try {
-            writeTx.commit().get(5, TimeUnit.SECONDS);
-            fail("Expected TransactionCommitFailedException");
-        } catch (final ExecutionException e) {
-            // Expected
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS));
+        assertInstanceOf(TransactionCommitFailedException.class, ex.getCause());
 
-        verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
+        verify(listener, timeout(5000)).onFailure(any());
 
         txChain.close();
         broker.close();
@@ -541,37 +637,32 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
     public void testChainedTransactionFailureWithMultipleShards() throws Exception {
         initDatastoresWithCarsAndPeople("testChainedTransactionFailureWithMultipleShards");
 
-        final ConcurrentDOMDataBroker broker = new ConcurrentDOMDataBroker(
-                ImmutableMap.<LogicalDatastoreType, DOMStore>builder().put(
-                        LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore).build(),
-                        MoreExecutors.directExecutor());
-
-        final DOMTransactionChainListener listener = Mockito.mock(DOMTransactionChainListener.class);
-        final DOMTransactionChain txChain = broker.createTransactionChain(listener);
+        try (var broker = new ConcurrentDOMDataBroker(
+            Map.of(LogicalDatastoreType.CONFIGURATION, followerDistributedDataStore), MoreExecutors.directExecutor())) {
 
-        final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
+            final var listener = mock(FutureCallback.class);
+            final DOMTransactionChain txChain = broker.createTransactionChain();
+            txChain.addCallback(listener);
 
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
+            final DOMDataTreeWriteTransaction writeTx = txChain.newWriteOnlyTransaction();
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(CarsModel.BASE_QNAME))
-                    .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+            writeTx.put(LogicalDatastoreType.CONFIGURATION, PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
 
-        // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
-        // done for put for performance reasons.
-        writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, invalidData);
+            // Note that merge will validate the data and fail but put succeeds b/c deep validation is not
+            // done for put for performance reasons.
+            writeTx.merge(LogicalDatastoreType.CONFIGURATION, CarsModel.BASE_PATH, Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+                .build());
 
-        try {
-            writeTx.commit().get(5, TimeUnit.SECONDS);
-            fail("Expected TransactionCommitFailedException");
-        } catch (final ExecutionException e) {
-            // Expected
-        }
+            final var ex = assertThrows(ExecutionException.class, () -> writeTx.commit().get(5, TimeUnit.SECONDS))
+                .getCause();
+            assertThat(ex, instanceOf(TransactionCommitFailedException.class));
 
-        verify(listener, timeout(5000)).onTransactionChainFailed(eq(txChain), eq(writeTx), any(Throwable.class));
+            verify(listener, timeout(5000)).onFailure(any());
 
-        txChain.close();
-        broker.close();
+            txChain.close();
+        }
     }
 
     @Test
@@ -611,9 +702,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
                 .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
         IntegrationTestKit newMember1TestKit = new IntegrationTestKit(leaderSystem, newMember1Builder, commitTimeout);
 
-        try (AbstractDataStore ds =
-                newMember1TestKit.setupAbstractDataStore(
-                        testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS)) {
+        try (var ds = newMember1TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false,
+            CARS)) {
 
             followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), CARS);
 
@@ -621,7 +711,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
 
-            MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             YangInstanceIdentifier car1Path = CarsModel.newCarPath("optima");
             writeTx.merge(car1Path, car1);
 
@@ -631,13 +721,12 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         }
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testReadyLocalTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testReadyLocalTransactionForwardedToLeader");
         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
 
-        final com.google.common.base.Optional<ActorRef> carsFollowerShard =
+        final Optional<ActorRef> carsFollowerShard =
                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
 
@@ -650,13 +739,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
 
-        final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
         modification.ready();
 
         ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(tx1 , modification, true, Optional.empty());
 
-        carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
         Object resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -669,13 +758,13 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         // Send another tx without immediate commit.
 
         modification = dataTree.takeSnapshot().newModification();
-        MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000));
+        MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
         modification.ready();
 
         readyLocal = new ReadyLocalTransaction(tx2 , modification, false, Optional.empty());
 
-        carsFollowerShard.get().tell(readyLocal, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(readyLocal, followerTestKit.getRef());
         resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -686,11 +775,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
                 ((ReadyTransactionReply)resp).getCohortPath());
 
-        final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
-        Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
-        ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
-                leaderDistributedDataStore.getActorUtils(), Arrays.asList(
-                        new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
+        ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(leaderDistributedDataStore.getActorUtils(),
+            List.of(new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor),
+                () -> DataStoreVersions.CURRENT_VERSION)), tx2);
         cohort.canCommit().get(5, TimeUnit.SECONDS);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
@@ -698,17 +785,16 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         verifyCars(leaderDistributedDataStore.newReadOnlyTransaction(), car1, car2);
     }
 
-    @SuppressWarnings("unchecked")
     @Test
     public void testForwardedReadyTransactionForwardedToLeader() throws Exception {
         initDatastoresWithCars("testForwardedReadyTransactionForwardedToLeader");
         followerTestKit.waitUntilLeader(followerDistributedDataStore.getActorUtils(), "cars");
 
-        final com.google.common.base.Optional<ActorRef> carsFollowerShard =
+        final Optional<ActorRef> carsFollowerShard =
                 followerDistributedDataStore.getActorUtils().findLocalShard("cars");
         assertTrue("Cars follower shard found", carsFollowerShard.isPresent());
 
-        carsFollowerShard.get().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(GetShardDataTree.INSTANCE, followerTestKit.getRef());
         final DataTree dataTree = followerTestKit.expectMsgClass(DataTree.class);
 
         // Send a tx with immediate commit.
@@ -717,15 +803,14 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         new WriteModification(CarsModel.BASE_PATH, CarsModel.emptyContainer()).apply(modification);
         new MergeModification(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()).apply(modification);
 
-        final MapEntryNode car1 = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+        final MapEntryNode car1 = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
         new WriteModification(CarsModel.newCarPath("optima"), car1).apply(modification);
 
-        ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1,
-                DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
-                        Mockito.mock(ShardDataTreeTransactionParent.class), tx1, modification), true,
-                Optional.empty());
+        ForwardedReadyTransaction forwardedReady = new ForwardedReadyTransaction(tx1, DataStoreVersions.CURRENT_VERSION,
+            new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx1, modification),
+            true, Optional.empty());
 
-        carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
         Object resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -738,15 +823,14 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         // Send another tx without immediate commit.
 
         modification = dataTree.takeSnapshot().newModification();
-        MapEntryNode car2 = CarsModel.newCarEntry("sportage", BigInteger.valueOf(30000));
+        MapEntryNode car2 = CarsModel.newCarEntry("sportage", Uint64.valueOf(30000));
         new WriteModification(CarsModel.newCarPath("sportage"), car2).apply(modification);
 
-        forwardedReady = new ForwardedReadyTransaction(tx2,
-                DataStoreVersions.CURRENT_VERSION, new ReadWriteShardDataTreeTransaction(
-                        Mockito.mock(ShardDataTreeTransactionParent.class), tx2, modification), false,
-                Optional.empty());
+        forwardedReady = new ForwardedReadyTransaction(tx2, DataStoreVersions.CURRENT_VERSION,
+            new ReadWriteShardDataTreeTransaction(mock(ShardDataTreeTransactionParent.class), tx2, modification),
+            false, Optional.empty());
 
-        carsFollowerShard.get().tell(forwardedReady, followerTestKit.getRef());
+        carsFollowerShard.orElseThrow().tell(forwardedReady, followerTestKit.getRef());
         resp = followerTestKit.expectMsgClass(Object.class);
         if (resp instanceof akka.actor.Status.Failure) {
             throw new AssertionError("Unexpected failure response", ((akka.actor.Status.Failure)resp).cause());
@@ -757,11 +841,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         ActorSelection txActor = leaderDistributedDataStore.getActorUtils().actorSelection(
                 ((ReadyTransactionReply)resp).getCohortPath());
 
-        final Supplier<Short> versionSupplier = Mockito.mock(Supplier.class);
-        Mockito.doReturn(DataStoreVersions.CURRENT_VERSION).when(versionSupplier).get();
         final ThreePhaseCommitCohortProxy cohort = new ThreePhaseCommitCohortProxy(
-                leaderDistributedDataStore.getActorUtils(), Arrays.asList(
-                        new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor), versionSupplier)), tx2);
+            leaderDistributedDataStore.getActorUtils(), List.of(
+                new ThreePhaseCommitCohortProxy.CohortInfo(Futures.successful(txActor),
+                    () -> DataStoreVersions.CURRENT_VERSION)), tx2);
         cohort.canCommit().get(5, TimeUnit.SECONDS);
         cohort.preCommit().get(5, TimeUnit.SECONDS);
         cohort.commit().get(5, TimeUnit.SECONDS);
@@ -771,12 +854,14 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test
     public void testTransactionForwardedToLeaderAfterRetry() throws Exception {
-        //TODO remove when test passes also for ClientBackedDataStore
-        Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
         followerDatastoreContextBuilder.shardBatchedModificationCount(2);
         leaderDatastoreContextBuilder.shardBatchedModificationCount(2);
         initDatastoresWithCarsAndPeople("testTransactionForwardedToLeaderAfterRetry");
 
+        // Verify backend statistics on start
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 0);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
         // Do an initial write to get the primary shard info cached.
 
         final DOMStoreWriteTransaction initialWriteTx = followerDistributedDataStore.newWriteOnlyTransaction();
@@ -808,42 +893,49 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final DOMStoreWriteTransaction writeTx2 = followerDistributedDataStore.newWriteOnlyTransaction();
         final LinkedList<MapEntryNode> cars = new LinkedList<>();
         int carIndex = 1;
-        cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
+        cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
         writeTx2.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
         carIndex++;
-        NormalizedNode<?, ?> people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
+        NormalizedNode people = ImmutableNodes.mapNodeBuilder(PeopleModel.PERSON_QNAME)
                 .withChild(PeopleModel.newPersonEntry("Dude")).build();
         writeTx2.write(PeopleModel.PERSON_LIST_PATH, people);
         final DOMStoreThreePhaseCommitCohort writeTx2Cohort = writeTx2.ready();
 
+        // At this point only leader should see the transactions
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 2);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
+
         // Prepare another WO that writes to a single shard and thus will be directly committed on ready. This
-        // tx writes 5 cars so 2 BatchedModidifications messages will be sent initially and cached in the
-        // leader shard (with shardBatchedModificationCount set to 2). The 3rd BatchedModidifications will be
-        // sent on ready.
+        // tx writes 5 cars so 2 BatchedModifications messages will be sent initially and cached in the leader shard
+        // (with shardBatchedModificationCount set to 2). The 3rd BatchedModifications will be sent on ready.
 
         final DOMStoreWriteTransaction writeTx3 = followerDistributedDataStore.newWriteOnlyTransaction();
         for (int i = 1; i <= 5; i++, carIndex++) {
-            cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
+            cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
             writeTx3.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
         }
 
-        // Prepare another WO that writes to a single shard. This will send a single BatchedModidifications
-        // message on ready.
+        // Prepare another WO that writes to a single shard. This will send a single BatchedModifications message
+        // on ready.
 
         final DOMStoreWriteTransaction writeTx4 = followerDistributedDataStore.newWriteOnlyTransaction();
-        cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
+        cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
         writeTx4.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
         carIndex++;
 
-        // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaciton message to the
-        // leader shard on ready.
+        // Prepare a RW tx that will create a tx actor and send a ForwardedReadyTransaction message to the leader shard
+        // on ready.
 
         final DOMStoreReadWriteTransaction readWriteTx = followerDistributedDataStore.newReadWriteTransaction();
-        cars.add(CarsModel.newCarEntry("car" + carIndex, BigInteger.valueOf(carIndex)));
-        readWriteTx.write(CarsModel.newCarPath("car" + carIndex), cars.getLast());
+        cars.add(CarsModel.newCarEntry("car" + carIndex, Uint64.valueOf(carIndex)));
+        final YangInstanceIdentifier carPath = CarsModel.newCarPath("car" + carIndex);
+        readWriteTx.write(carPath, cars.getLast());
 
-        IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-            stats -> assertEquals("getReadWriteTransactionCount", 1, stats.getReadWriteTransactionCount()));
+        // There is a difference here between implementations: tell-based protocol enforces batching on per-transaction
+        // level whereas ask-based protocol has a global limit towards a shard -- and hence flushes out last two
+        // transactions eagerly.
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 0);
 
         // Disable elections on the leader so it switches to follower.
 
@@ -876,15 +968,24 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         followerTestKit.doCommit(writeTx4Cohort);
         followerTestKit.doCommit(rwTxCohort);
 
+        // At this point everything is committed and the follower datastore should see 5 transactions, but leader should
+        // only see the initial transactions
+        verifyCarsReadWriteTransactions(leaderDistributedDataStore, 3);
+        verifyCarsReadWriteTransactions(followerDistributedDataStore, 5);
+
         DOMStoreReadTransaction readTx = leaderDistributedDataStore.newReadOnlyTransaction();
         verifyCars(readTx, cars.toArray(new MapEntryNode[cars.size()]));
         verifyNode(readTx, PeopleModel.PERSON_LIST_PATH, people);
     }
 
+    private static void verifyCarsReadWriteTransactions(final ClientBackedDataStore datastore, final int expected)
+            throws Exception {
+        IntegrationTestKit.verifyShardStats(datastore, "cars",
+            stats -> assertEquals("getReadWriteTransactionCount", expected, stats.getReadWriteTransactionCount()));
+    }
+
     @Test
     public void testLeadershipTransferOnShutdown() throws Exception {
-        //TODO remove when test passes also for ClientBackedDataStore
-        Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
         leaderDatastoreContextBuilder.shardBatchedModificationCount(1);
         followerDatastoreContextBuilder.shardElectionTimeoutFactor(10).customRaftPolicyImplementation(null);
         final String testName = "testLeadershipTransferOnShutdown";
@@ -893,8 +994,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
                 DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500),
                 commitTimeout);
-        try (AbstractDataStore follower2DistributedDataStore = follower2TestKit.setupAbstractDataStore(
-                testParameter, testName, MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
+        try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_PEOPLE_1_2_3, false)) {
 
             followerTestKit.waitForMembersUp("member-3");
             follower2TestKit.waitForMembersUp("member-1", "member-2");
@@ -907,16 +1008,18 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             writeTx.write(PeopleModel.BASE_PATH, PeopleModel.emptyContainer());
             final DOMStoreThreePhaseCommitCohort cohort1 = writeTx.ready();
 
-            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-                stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
+            // FIXME: this assertion should be made in an explicit Shard test
+            //            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+            //                stats -> assertEquals("getTxCohortCacheSize", 1, stats.getTxCohortCacheSize()));
 
             writeTx = followerDistributedDataStore.newWriteOnlyTransaction();
-            final MapEntryNode car = CarsModel.newCarEntry("optima", BigInteger.valueOf(20000));
+            final MapEntryNode car = CarsModel.newCarEntry("optima", Uint64.valueOf(20000));
             writeTx.write(CarsModel.newCarPath("optima"), car);
             final DOMStoreThreePhaseCommitCohort cohort2 = writeTx.ready();
 
-            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
-                stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
+            // FIXME: this assertion should be made in an explicit Shard test
+            //            IntegrationTestKit.verifyShardStats(leaderDistributedDataStore, "cars",
+            //                stats -> assertEquals("getTxCohortCacheSize", 2, stats.getTxCohortCacheSize()));
 
             // Gracefully stop the leader via a Shutdown message.
 
@@ -948,8 +1051,6 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test
     public void testTransactionWithIsolatedLeader() throws Exception {
-        //TODO remove when test passes also for ClientBackedDataStore
-        Assume.assumeTrue(testParameter.equals(DistributedDataStore.class));
         // Set the isolated leader check interval high so we can control the switch to IsolatedLeader.
         leaderDatastoreContextBuilder.shardIsolatedLeaderCheckIntervalInMillis(10000000);
         final String testName = "testTransactionWithIsolatedLeader";
@@ -982,23 +1083,33 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         MemberNode.verifyRaftState(leaderDistributedDataStore, "cars",
             raftState -> assertEquals("getRaftState", "IsolatedLeader", raftState.getRaftState()));
 
-        try {
-            leaderTestKit.doCommit(noShardLeaderWriteTx.ready());
-            fail("Expected NoShardLeaderException");
-        } catch (final ExecutionException e) {
-            assertEquals("getCause", NoShardLeaderException.class, Throwables.getRootCause(e).getClass());
-        }
+        final var noShardLeaderCohort = noShardLeaderWriteTx.ready();
+        // tell-based canCommit() does not have a real timeout and hence continues
+        final var canCommit = noShardLeaderCohort.canCommit();
+        Uninterruptibles.sleepUninterruptibly(commitTimeout, TimeUnit.SECONDS);
+        assertFalse(canCommit.isDone());
 
         sendDatastoreContextUpdate(leaderDistributedDataStore, leaderDatastoreContextBuilder
                 .shardElectionTimeoutFactor(100));
 
         final DOMStoreThreePhaseCommitCohort successTxCohort = successWriteTx.ready();
 
-        followerDistributedDataStore = followerTestKit.setupAbstractDataStore(
-                testParameter, testName, MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
+        followerDistributedDataStore = followerTestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_ONLY_1_2, false, CARS);
 
         leaderTestKit.doCommit(preIsolatedLeaderTxCohort);
         leaderTestKit.doCommit(successTxCohort);
+
+        // continuation of canCommit(): readied transaction will complete commit, but will report an OLFE
+        final var ex = assertThrows(ExecutionException.class,
+            () -> canCommit.get(commitTimeout, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(OptimisticLockFailedException.class));
+        assertEquals("Optimistic lock failed for path " + CarsModel.BASE_PATH, ex.getMessage());
+        final var cause = ex.getCause();
+        assertThat(cause, instanceOf(ConflictingModificationAppliedException.class));
+        final var cmae = (ConflictingModificationAppliedException) cause;
+        assertEquals("Node was created by other transaction.", cmae.getMessage());
+        assertEquals(CarsModel.BASE_PATH, cmae.getPath());
     }
 
     @Test
@@ -1023,18 +1134,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
 
-        try {
-            followerTestKit.doCommit(rwTx.ready());
-            fail("Exception expected");
-        } catch (final ExecutionException e) {
-            final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
-            if (DistributedDataStore.class.equals(testParameter)) {
-                assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException
-                        || e.getCause() instanceof ShardLeaderNotRespondingException);
-            } else {
-                assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
-            }
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
+        assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+            Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
     }
 
     @Test
@@ -1062,17 +1164,9 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         rwTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
 
-        try {
-            followerTestKit.doCommit(rwTx.ready());
-            fail("Exception expected");
-        } catch (final ExecutionException e) {
-            final String msg = "Unexpected exception: " + Throwables.getStackTraceAsString(e.getCause());
-            if (DistributedDataStore.class.equals(testParameter)) {
-                assertTrue(msg, Throwables.getRootCause(e) instanceof NoShardLeaderException);
-            } else {
-                assertTrue(msg, Throwables.getRootCause(e) instanceof RequestTimeoutException);
-            }
-        }
+        final var ex = assertThrows(ExecutionException.class, () -> followerTestKit.doCommit(rwTx.ready()));
+        assertThat("Unexpected exception: " + Throwables.getStackTraceAsString(ex.getCause()),
+            Throwables.getRootCause(ex), instanceOf(RequestTimeoutException.class));
     }
 
     @Test
@@ -1086,9 +1180,7 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
                 follower2System, follower2DatastoreContextBuilder, commitTimeout);
 
-        try (AbstractDataStore ds =
-                follower2TestKit.setupAbstractDataStore(
-                        testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
+        try (var ds = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS)) {
 
             followerTestKit.waitForMembersUp("member-1", "member-3");
             follower2TestKit.waitForMembersUp("member-1", "member-2");
@@ -1115,6 +1207,66 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         }
     }
 
+    @Test
+    public void testSemiReachableCandidateNotDroppingLeader() throws Exception {
+        final String testName = "testSemiReachableCandidateNotDroppingLeader";
+        initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
+
+        final DatastoreContext.Builder follower2DatastoreContextBuilder = DatastoreContext.newBuilder()
+                .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10);
+        final IntegrationTestKit follower2TestKit = new IntegrationTestKit(
+                follower2System, follower2DatastoreContextBuilder, commitTimeout);
+
+        final var ds2 = follower2TestKit.setupDataStore(testParameter, testName, MODULE_SHARDS_CARS_1_2_3, false, CARS);
+
+        followerTestKit.waitForMembersUp("member-1", "member-3");
+        follower2TestKit.waitForMembersUp("member-1", "member-2");
+
+        // behavior is controlled by akka.coordinated-shutdown.run-by-actor-system-terminate configuration option
+        TestKit.shutdownActorSystem(follower2System, true);
+
+        ActorRef cars = leaderDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+        final OnDemandRaftState initialState = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
+                .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
+
+        Cluster leaderCluster = Cluster.get(leaderSystem);
+        Cluster followerCluster = Cluster.get(followerSystem);
+        Cluster follower2Cluster = Cluster.get(follower2System);
+
+        Member follower2Member = follower2Cluster.readView().self();
+
+        await().atMost(10, TimeUnit.SECONDS)
+                .until(() -> containsUnreachable(leaderCluster, follower2Member));
+        await().atMost(10, TimeUnit.SECONDS)
+                .until(() -> containsUnreachable(followerCluster, follower2Member));
+
+        ActorRef followerCars = followerDistributedDataStore.getActorUtils().findLocalShard("cars").orElseThrow();
+
+        // to simulate a follower not being able to receive messages, but still being able to send messages and becoming
+        // candidate, we can just send a couple of RequestVotes to both leader and follower.
+        cars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
+        followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 1, "member-3-shard-cars", -1, -1), null);
+        cars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
+        followerCars.tell(new RequestVote(initialState.getCurrentTerm() + 3, "member-3-shard-cars", -1, -1), null);
+
+        OnDemandRaftState stateAfter = (OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
+                .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
+        OnDemandRaftState followerState = (OnDemandRaftState) followerDistributedDataStore.getActorUtils()
+                .executeOperation(cars, GetOnDemandRaftState.INSTANCE);
+
+        assertEquals(initialState.getCurrentTerm(), stateAfter.getCurrentTerm());
+        assertEquals(initialState.getCurrentTerm(), followerState.getCurrentTerm());
+
+        ds2.close();
+    }
+
+    private static Boolean containsUnreachable(final Cluster cluster, final Member member) {
+        // unreachableMembers() returns scala.collection.immutable.Set, but we are using scala.collection.Set to fix JDT
+        // see https://bugs.eclipse.org/bugs/show_bug.cgi?id=468276#c32
+        final Set<Member> members = cluster.readView().unreachableMembers();
+        return members.contains(member);
+    }
+
     @Test
     public void testInstallSnapshot() throws Exception {
         final String testName = "testInstallSnapshot";
@@ -1128,10 +1280,10 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
             SchemaContextHelper.full());
 
         final ContainerNode carsNode = CarsModel.newCarsNode(
-                CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", BigInteger.valueOf(20000))));
+                CarsModel.newCarsMapNode(CarsModel.newCarEntry("optima", Uint64.valueOf(20000))));
         AbstractShardTest.writeToStore(tree, CarsModel.BASE_PATH, carsNode);
 
-        final NormalizedNode<?, ?> snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.EMPTY);
+        final NormalizedNode snapshotRoot = AbstractShardTest.readStore(tree, YangInstanceIdentifier.of());
         final Snapshot initialSnapshot = Snapshot.create(
                 new ShardSnapshotState(new MetadataShardDataTreeSnapshot(snapshotRoot)),
                 Collections.emptyList(), 5, 1, 5, 1, 1, null, null);
@@ -1142,10 +1294,8 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
         initDatastoresWithCars(testName);
 
-        final Optional<NormalizedNode<?, ?>> readOptional = leaderDistributedDataStore.newReadOnlyTransaction().read(
-                CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", readOptional.isPresent());
-        assertEquals("Node", carsNode, readOptional.get());
+        assertEquals(Optional.of(carsNode), leaderDistributedDataStore.newReadOnlyTransaction().read(
+            CarsModel.BASE_PATH).get(5, TimeUnit.SECONDS));
 
         verifySnapshot(InMemorySnapshotStore.waitForSavedSnapshot(leaderCarShardName, Snapshot.class),
                 initialSnapshot, snapshotRoot);
@@ -1156,23 +1306,148 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
 
     @Test
     public void testReadWriteMessageSlicing() throws Exception {
-        // The slicing is only implemented for tell-based protocol
-        Assume.assumeTrue(testParameter.equals(ClientBackedDataStore.class));
-
         leaderDatastoreContextBuilder.maximumMessageSliceSize(100);
         followerDatastoreContextBuilder.maximumMessageSliceSize(100);
         initDatastoresWithCars("testLargeReadReplySlicing");
 
         final DOMStoreReadWriteTransaction rwTx = followerDistributedDataStore.newReadWriteTransaction();
 
-        final NormalizedNode<?, ?> carsNode = CarsModel.create();
+        final NormalizedNode carsNode = CarsModel.create();
         rwTx.write(CarsModel.BASE_PATH, carsNode);
 
         verifyNode(rwTx, CarsModel.BASE_PATH, carsNode);
     }
 
+    @SuppressWarnings("IllegalCatch")
+    @Test
+    public void testRaftCallbackDuringLeadershipDrop() throws Exception {
+        final String testName = "testRaftCallbackDuringLeadershipDrop";
+        initDatastores(testName, MODULE_SHARDS_CARS_1_2_3, CARS);
+
+        final ExecutorService executor = Executors.newSingleThreadExecutor();
+
+        final IntegrationTestKit follower2TestKit = new IntegrationTestKit(follower2System,
+                DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build()).operationTimeoutInMillis(500)
+                        .shardLeaderElectionTimeoutInSeconds(3600),
+                commitTimeout);
+
+        final DOMStoreWriteTransaction initialWriteTx = leaderDistributedDataStore.newWriteOnlyTransaction();
+        initialWriteTx.write(CarsModel.BASE_PATH, CarsModel.emptyContainer());
+        leaderTestKit.doCommit(initialWriteTx.ready());
+
+        try (var follower2DistributedDataStore = follower2TestKit.setupDataStore(testParameter, testName,
+            MODULE_SHARDS_CARS_1_2_3, false)) {
+
+            final ActorRef member3Cars = ((LocalShardStore) follower2DistributedDataStore).getLocalShards()
+                    .getLocalShards().get("cars").getActor();
+            final ActorRef member2Cars = ((LocalShardStore)followerDistributedDataStore).getLocalShards()
+                    .getLocalShards().get("cars").getActor();
+            member2Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
+            member3Cars.tell(new StartDropMessages<>(AppendEntries.class), null);
+
+            final DOMStoreWriteTransaction newTx = leaderDistributedDataStore.newWriteOnlyTransaction();
+            newTx.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
+            final AtomicBoolean submitDone = new AtomicBoolean(false);
+            executor.submit(() -> {
+                try {
+                    leaderTestKit.doCommit(newTx.ready());
+                    submitDone.set(true);
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+            });
+            final ActorRef leaderCars = ((LocalShardStore) leaderDistributedDataStore).getLocalShards()
+                    .getLocalShards().get("cars").getActor();
+            await().atMost(10, TimeUnit.SECONDS)
+                    .until(() -> ((OnDemandRaftState) leaderDistributedDataStore.getActorUtils()
+                            .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE)).getLastIndex() >= 1);
+
+            final OnDemandRaftState raftState = (OnDemandRaftState)leaderDistributedDataStore.getActorUtils()
+                    .executeOperation(leaderCars, GetOnDemandRaftState.INSTANCE);
+
+            // Simulate a follower not receiving heartbeats but still being able to send messages ie RequestVote with
+            // new term(switching to candidate after election timeout)
+            leaderCars.tell(new RequestVote(raftState.getCurrentTerm() + 1,
+                    "member-3-shard-cars-testRaftCallbackDuringLeadershipDrop", -1,
+                            -1), member3Cars);
+
+            member2Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
+            member3Cars.tell(new StopDropMessages<>(AppendEntries.class), null);
+
+            await("Is tx stuck in COMMIT_PENDING")
+                    .atMost(10, TimeUnit.SECONDS).untilAtomic(submitDone, equalTo(true));
+
+        }
+
+        executor.shutdownNow();
+    }
+
+    @Test
+    public void testSnapshotOnRootOverwrite() throws Exception {
+        initDatastores("testSnapshotOnRootOverwrite", "module-shards-default-cars-member1-and-2.conf",
+            new String[] {"cars", "default"},
+            leaderDatastoreContextBuilder.snapshotOnRootOverwrite(true),
+            followerDatastoreContextBuilder.snapshotOnRootOverwrite(true));
+
+        leaderTestKit.waitForMembersUp("member-2");
+        final ContainerNode rootNode = Builders.containerBuilder()
+                .withNodeIdentifier(NodeIdentifier.create(SchemaContext.NAME))
+                .withChild(CarsModel.create())
+                .build();
+
+        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
+
+        // FIXME: CONTROLLER-2020: ClientBackedDatastore does not have stable indexes/term,
+        //                         the snapshot index seems to fluctuate
+        assumeTrue(false);
+        IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
+            state -> assertEquals(1, state.getSnapshotIndex()));
+
+        IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
+            state -> assertEquals(1, state.getSnapshotIndex()));
+
+        verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 1);
+        verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
+
+        for (int i = 0; i < 10; i++) {
+            leaderTestKit.testWriteTransaction(leaderDistributedDataStore, CarsModel.newCarPath("car " + i),
+                    CarsModel.newCarEntry("car " + i, Uint64.ONE));
+        }
+
+        // fake snapshot causes the snapshotIndex to move
+        IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
+            state -> assertEquals(10, state.getSnapshotIndex()));
+        IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
+            state -> assertEquals(10, state.getSnapshotIndex()));
+
+        // however the real snapshot still has not changed and was taken at index 1
+        verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 1);
+        verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 1);
+
+        // root overwrite so expect a snapshot
+        leaderTestKit.testWriteTransaction(leaderDistributedDataStore, YangInstanceIdentifier.of(), rootNode);
+
+        // this was a real snapshot so everything should be in it(1(DisableTrackingPayload) + 1 + 10 + 1)
+        IntegrationTestKit.verifyShardState(leaderDistributedDataStore, "cars",
+            state -> assertEquals(12, state.getSnapshotIndex()));
+        IntegrationTestKit.verifyShardState(followerDistributedDataStore, "cars",
+            state -> assertEquals(12, state.getSnapshotIndex()));
+
+        verifySnapshot("member-1-shard-cars-testSnapshotOnRootOverwrite", 12);
+        verifySnapshot("member-2-shard-cars-testSnapshotOnRootOverwrite", 12);
+    }
+
+    private static void verifySnapshot(final String persistenceId, final long lastAppliedIndex) {
+        await().atMost(5, TimeUnit.SECONDS).untilAsserted(() -> {
+                List<Snapshot> snap = InMemorySnapshotStore.getSnapshots(persistenceId, Snapshot.class);
+                assertEquals(1, snap.size());
+                assertEquals(lastAppliedIndex, snap.get(0).getLastAppliedIndex());
+            }
+        );
+    }
+
     private static void verifySnapshot(final Snapshot actual, final Snapshot expected,
-                                       final NormalizedNode<?, ?> expRoot) {
+                                       final NormalizedNode expRoot) {
         assertEquals("Snapshot getLastAppliedTerm", expected.getLastAppliedTerm(), actual.getLastAppliedTerm());
         assertEquals("Snapshot getLastAppliedIndex", expected.getLastAppliedIndex(), actual.getLastAppliedIndex());
         assertEquals("Snapshot getLastTerm", expected.getLastTerm(), actual.getLastTerm());
@@ -1180,15 +1455,15 @@ public class DistributedDataStoreRemotingIntegrationTest extends AbstractTest {
         assertEquals("Snapshot state type", ShardSnapshotState.class, actual.getState().getClass());
         MetadataShardDataTreeSnapshot shardSnapshot =
                 (MetadataShardDataTreeSnapshot) ((ShardSnapshotState)actual.getState()).getSnapshot();
-        assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().get());
+        assertEquals("Snapshot root node", expRoot, shardSnapshot.getRootNode().orElseThrow());
     }
 
-    private static void sendDatastoreContextUpdate(final AbstractDataStore dataStore, final Builder builder) {
+    private static void sendDatastoreContextUpdate(final ClientBackedDataStore dataStore, final Builder builder) {
         final Builder newBuilder = DatastoreContext.newBuilderFrom(builder.build());
-        final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
+        final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
         final Answer<DatastoreContext> answer = invocation -> newBuilder.build();
-        Mockito.doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
-        Mockito.doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
+        doAnswer(answer).when(mockContextFactory).getBaseDatastoreContext();
+        doAnswer(answer).when(mockContextFactory).getShardDatastoreContext(anyString());
         dataStore.onDatastoreContextUpdated(mockContextFactory);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java
deleted file mode 100644 (file)
index f608aa0..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import akka.util.Timeout;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.FiniteDuration;
-
-public class DistributedDataStoreTest extends AbstractActorTest {
-    private static final ClientIdentifier UNKNOWN_ID = ClientIdentifier.create(
-            FrontendIdentifier.create(MemberName.forName("local"), FrontendType.forName("unknown")), 0);
-
-    private static SchemaContext SCHEMA_CONTEXT;
-
-    @Mock
-    private ActorUtils actorUtils;
-
-    @Mock
-    private DatastoreContext datastoreContext;
-
-    @Mock
-    private Timeout shardElectionTimeout;
-
-    @BeforeClass
-    public static void beforeClass() {
-        SCHEMA_CONTEXT = TestModel.createTestContext();
-    }
-
-    @AfterClass
-    public static void afterClass() {
-        SCHEMA_CONTEXT = null;
-    }
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(SCHEMA_CONTEXT).when(actorUtils).getSchemaContext();
-        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
-    }
-
-    @Test
-    public void testRateLimitingUsedInReadWriteTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newReadWriteTransaction();
-
-            verify(actorUtils, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingUsedInWriteOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newWriteOnlyTransaction();
-
-            verify(actorUtils, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingNotUsedInReadOnlyTxCreation() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            distributedDataStore.newReadOnlyTransaction();
-            distributedDataStore.newReadOnlyTransaction();
-            distributedDataStore.newReadOnlyTransaction();
-
-            verify(actorUtils, times(0)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testWaitTillReadyBlocking() {
-        doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
-        doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
-        doReturn(FiniteDuration.apply(50, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-
-            long start = System.currentTimeMillis();
-
-            distributedDataStore.waitTillReady();
-
-            long end = System.currentTimeMillis();
-
-            assertTrue("Expected to be blocked for 50 millis", end - start >= 50);
-        }
-    }
-
-    @Test
-    public void testWaitTillReadyCountDown() {
-        try (DistributedDataStore distributedDataStore = new DistributedDataStore(actorUtils, UNKNOWN_ID)) {
-            doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
-            doReturn(shardElectionTimeout).when(datastoreContext).getShardLeaderElectionTimeout();
-            doReturn(FiniteDuration.apply(5000, TimeUnit.MILLISECONDS)).when(shardElectionTimeout).duration();
-
-            Executors.newSingleThreadExecutor().submit(() -> {
-                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-                distributedDataStore.getWaitTillReadyCountDownLatch().countDown();
-            });
-
-            long start = System.currentTimeMillis();
-
-            distributedDataStore.waitTillReady();
-
-            long end = System.currentTimeMillis();
-
-            assertTrue("Expected to be released in 500 millis", end - start < 5000);
-        }
-    }
-}
index 73888de3909c9a919eac3b5c9d8c7c6a501164c5..b5fcc951aa87527387623b841e201faf7847de0f 100644 (file)
@@ -9,7 +9,6 @@ package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.runners.Parameterized.Parameters;
 import static org.opendaylight.controller.md.cluster.datastore.model.CarsModel.CAR_QNAME;
 
 import akka.actor.ActorSystem;
@@ -22,7 +21,6 @@ import com.google.common.util.concurrent.Uninterruptibles;
 import com.typesafe.config.ConfigFactory;
 import java.io.File;
 import java.io.IOException;
-import java.math.BigInteger;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Optional;
@@ -34,17 +32,21 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.opendaylight.controller.cluster.databroker.TestClientBackedDataStore;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.builder.CollectionNodeBuilder;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
 
 @RunWith(Parameterized.class)
 public class DistributedDataStoreWithSegmentedJournalIntegrationTest
@@ -53,7 +55,8 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
     @Parameters(name = "{0}")
     public static Collection<Object[]> data() {
         return Arrays.asList(new Object[][] {
-                { DistributedDataStore.class }});
+                { TestClientBackedDataStore.class }
+        });
     }
 
     @Before
@@ -91,10 +94,10 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
     @Test
     public void testManyWritesDeletes() throws Exception {
         final IntegrationTestKit testKit = new IntegrationTestKit(getSystem(), datastoreContextBuilder);
-        CollectionNodeBuilder<MapEntryNode, MapNode> carMapBuilder = ImmutableNodes.mapNodeBuilder(CAR_QNAME);
+        CollectionNodeBuilder<MapEntryNode, SystemMapNode> carMapBuilder = ImmutableNodes.mapNodeBuilder(CAR_QNAME);
 
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-                testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+            "module-shards-cars-member-1.conf", true, "cars")) {
 
             DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
 
@@ -108,7 +111,7 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
                 DOMStoreReadWriteTransaction rwTx = txChain.newReadWriteTransaction();
 
                 YangInstanceIdentifier path = CarsModel.newCarPath("car" + i);
-                MapEntryNode data = CarsModel.newCarEntry("car" + i, BigInteger.valueOf(20000));
+                MapEntryNode data = CarsModel.newCarEntry("car" + i, Uint64.valueOf(20000));
 
                 rwTx.merge(path, data);
                 carMapBuilder.withChild(data);
@@ -124,13 +127,13 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
                 }
             }
 
-            final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+            final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
                     .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
             assertTrue("isPresent", optional.isPresent());
 
             MapNode cars = carMapBuilder.build();
 
-            assertEquals("cars not matching result", cars, optional.get());
+            assertEquals("cars not matching result", cars, optional.orElseThrow());
 
             txChain.close();
 
@@ -152,16 +155,15 @@ public class DistributedDataStoreWithSegmentedJournalIntegrationTest
         }
 
         // test restoration from journal and verify data matches
-        try (AbstractDataStore dataStore = testKit.setupAbstractDataStore(
-                testParameter, "testManyWritesDeletes", "module-shards-cars-member-1.conf", true, "cars")) {
+        try (var dataStore = testKit.setupDataStore(testParameter, "testManyWritesDeletes",
+            "module-shards-cars-member-1.conf", true, "cars")) {
 
             DOMStoreTransactionChain txChain = dataStore.createTransactionChain();
             MapNode cars = carMapBuilder.build();
 
-            final Optional<NormalizedNode<?, ?>> optional = txChain.newReadOnlyTransaction()
+            final Optional<NormalizedNode> optional = txChain.newReadOnlyTransaction()
                     .read(CarsModel.CAR_LIST_PATH).get(5, TimeUnit.SECONDS);
-            assertTrue("isPresent", optional.isPresent());
-            assertEquals("restored cars do not match snapshot", cars, optional.get());
+            assertEquals("restored cars do not match snapshot", Optional.of(cars), optional);
 
             txChain.close();
         }
index f9804c92e7299be349ca119c3b41efc3c7b43ff5..4c56c472e418589b371c039747d0ed0f1d4bed5b 100644 (file)
@@ -7,15 +7,15 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.junit.Assert.assertSame;
+import static org.mockito.Mockito.mock;
+
 import akka.actor.ActorRef;
-import java.util.Arrays;
-import java.util.Collection;
-import org.junit.Assert;
+import java.util.List;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
 
@@ -24,12 +24,12 @@ public class ForwardingDataTreeChangeListenerTest extends AbstractActorTest {
         final ActorRef actorRef = getSystem().actorOf(MessageCollectorActor.props());
 
         ForwardingDataTreeChangeListener forwardingListener = new ForwardingDataTreeChangeListener(
-                getSystem().actorSelection(actorRef.path()));
+                getSystem().actorSelection(actorRef.path()), ActorRef.noSender());
 
-        Collection<DataTreeCandidate> expected = Arrays.asList(Mockito.mock(DataTreeCandidate.class));
+        List<DataTreeCandidate> expected = List.of(mock(DataTreeCandidate.class));
         forwardingListener.onDataTreeChanged(expected);
 
         DataTreeChanged actual = MessageCollectorActor.expectFirstMatching(actorRef, DataTreeChanged.class, 5000);
-        Assert.assertSame(expected, actual.getChanges());
+        assertSame(expected, actual.getChanges());
     }
 }
index b34f80dd6523519b6396e9d308b4ace99a0cdba1..7786f13948f010a53d9686a4f969046fcc54d0ff 100644 (file)
@@ -35,7 +35,7 @@ import org.opendaylight.controller.cluster.access.concepts.RequestEnvelope;
 import org.opendaylight.controller.cluster.access.concepts.RequestException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
 
 public class FrontendReadWriteTransactionTest {
 
@@ -133,7 +133,7 @@ public class FrontendReadWriteTransactionTest {
         assertNotNull(handleRequest(readyReq));
         verify(mockParent).finishTransaction(same(shardTransaction), eq(Optional.empty()));
 
-        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.EMPTY, true));
+        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
     }
 
     @Test(expected = IllegalStateException.class)
@@ -160,6 +160,6 @@ public class FrontendReadWriteTransactionTest {
         assertNull(handleRequest(abortReq));
         verify(mockParent).abortTransaction(same(shardTransaction), any(Runnable.class));
 
-        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.EMPTY, true));
+        handleRequest(new ReadTransactionRequest(TX_ID, 0, mock(ActorRef.class), YangInstanceIdentifier.of(), true));
     }
 }
index b483be873ff3da98e3da85e5ea320623c5adaa45..63e9ba72a74fb2606477d6a9caabe44ad7975abf 100644 (file)
@@ -9,8 +9,11 @@ package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
@@ -22,18 +25,14 @@ import com.google.common.base.Stopwatch;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.lang.reflect.Constructor;
 import java.util.Optional;
 import java.util.Set;
-import java.util.concurrent.Callable;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import org.mockito.Mockito;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.OnDemandShardState;
 import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
@@ -46,7 +45,7 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -76,75 +75,53 @@ public class IntegrationTestKit extends ShardTestKit {
         return datastoreContextBuilder;
     }
 
-    public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
-                                                          final boolean waitUntilLeader,
-                                                          final SchemaContext schemaContext) throws Exception {
-        return setupDistributedDataStore(typeName, moduleShardsConfig, "modules.conf", waitUntilLeader, schemaContext);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, "module-shards.conf", true, SchemaContextHelper.full(),
+            shardNames);
     }
 
-    public DistributedDataStore setupDistributedDataStore(final String typeName, final String moduleShardsConfig,
-                                                          final String modulesConfig,
-                                                          final boolean waitUntilLeader,
-                                                          final SchemaContext schemaContext,
-                                                          final String... shardNames) throws Exception {
-        return (DistributedDataStore) setupAbstractDataStore(DistributedDataStore.class, typeName, moduleShardsConfig,
-                modulesConfig, waitUntilLeader, schemaContext, shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final boolean waitUntilLeader, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
+            SchemaContextHelper.full(), shardNames);
     }
 
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String... shardNames)
-            throws Exception {
-        return setupAbstractDataStore(implementation, typeName, "module-shards.conf", true,
-                SchemaContextHelper.full(), shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+            final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
+            SchemaContextHelper.full(), shardNames);
     }
 
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final boolean waitUntilLeader,
-                                                    final String... shardNames) throws Exception {
-        return setupAbstractDataStore(implementation, typeName, "module-shards.conf", waitUntilLeader,
-                SchemaContextHelper.full(), shardNames);
+    public ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final boolean waitUntilLeader,
+            final EffectiveModelContext schemaContext, final String... shardNames) throws Exception {
+        return setupDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
+            schemaContext, shardNames);
     }
 
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String moduleShardsConfig,
-                                                    final boolean waitUntilLeader, final String... shardNames)
-            throws Exception {
-        return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, waitUntilLeader,
-                SchemaContextHelper.full(), shardNames);
-    }
-
-    public AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                    final String typeName, final String moduleShardsConfig,
-                                                    final boolean waitUntilLeader,
-                                                    final SchemaContext schemaContext,
-                                                    final String... shardNames) throws Exception {
-        return setupAbstractDataStore(implementation, typeName, moduleShardsConfig, "modules.conf", waitUntilLeader,
-                schemaContext, shardNames);
-    }
-
-    private AbstractDataStore setupAbstractDataStore(final Class<? extends AbstractDataStore> implementation,
-                                                     final String typeName, final String moduleShardsConfig,
-                                                     final String modulesConfig, final boolean waitUntilLeader,
-                                                     final SchemaContext schemaContext, final String... shardNames)
-            throws Exception {
+    private ClientBackedDataStore setupDataStore(final Class<? extends ClientBackedDataStore> implementation,
+            final String typeName, final String moduleShardsConfig, final String modulesConfig,
+            final boolean waitUntilLeader, final EffectiveModelContext schemaContext, final String... shardNames)
+                throws Exception {
         final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
         final Configuration config = new ConfigurationImpl(moduleShardsConfig, modulesConfig);
 
         setDataStoreName(typeName);
 
         final DatastoreContext datastoreContext = datastoreContextBuilder.build();
-        final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
+        final DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
+        doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
+        doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(anyString());
 
-        final Constructor<? extends AbstractDataStore> constructor = implementation.getDeclaredConstructor(
-                ActorSystem.class, ClusterWrapper.class, Configuration.class,
-                DatastoreContextFactory.class, DatastoreSnapshot.class);
+        final var constructor = implementation.getDeclaredConstructor(ActorSystem.class, ClusterWrapper.class,
+            Configuration.class, DatastoreContextFactory.class, DatastoreSnapshot.class);
 
-        final AbstractDataStore dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
+        final var dataStore = constructor.newInstance(getSystem(), cluster, config, mockContextFactory,
             restoreFromSnapshot);
 
-        dataStore.onGlobalContextUpdated(schemaContext);
+        dataStore.onModelContextUpdated(schemaContext);
 
         if (waitUntilLeader) {
             waitUntilLeader(dataStore.getActorUtils(), shardNames);
@@ -164,52 +141,6 @@ public class IntegrationTestKit extends ShardTestKit {
         }
     }
 
-    public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
-                                                                       final SchemaContext schemaContext) {
-        final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
-        final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
-        setDataStoreName(typeName);
-
-        final DatastoreContext datastoreContext = getDatastoreContextBuilder().build();
-
-        final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
-        final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
-                configuration, mockContextFactory, restoreFromSnapshot);
-
-        dataStore.onGlobalContextUpdated(schemaContext);
-
-        datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
-        return dataStore;
-    }
-
-    public DistributedDataStore setupDistributedDataStoreWithoutConfig(final String typeName,
-                                                                       final SchemaContext schemaContext,
-                                                                       final LogicalDatastoreType storeType) {
-        final ClusterWrapper cluster = new ClusterWrapperImpl(getSystem());
-        final ConfigurationImpl configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider());
-
-        setDataStoreName(typeName);
-
-        final DatastoreContext datastoreContext =
-                getDatastoreContextBuilder().logicalStoreType(storeType).build();
-
-        final DatastoreContextFactory mockContextFactory = Mockito.mock(DatastoreContextFactory.class);
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
-        final DistributedDataStore dataStore = new DistributedDataStore(getSystem(), cluster,
-                configuration, mockContextFactory, restoreFromSnapshot);
-
-        dataStore.onGlobalContextUpdated(schemaContext);
-
-        datastoreContextBuilder = DatastoreContext.newBuilderFrom(datastoreContext);
-        return dataStore;
-    }
-
     public void waitUntilLeader(final ActorUtils actorUtils, final String... shardNames) {
         for (String shardName: shardNames) {
             ActorRef shard = findLocalShard(actorUtils, shardName);
@@ -248,22 +179,21 @@ public class IntegrationTestKit extends ShardTestKit {
     }
 
     public static ActorRef findLocalShard(final ActorUtils actorUtils, final String shardName) {
-        ActorRef shard = null;
-        for (int i = 0; i < 20 * 5 && shard == null; i++) {
+        for (int i = 0; i < 20 * 5; i++) {
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            com.google.common.base.Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
+            Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
             if (shardReply.isPresent()) {
-                shard = shardReply.get();
+                return shardReply.orElseThrow();
             }
         }
-        return shard;
+        return null;
     }
 
     public static void waitUntilShardIsDown(final ActorUtils actorUtils, final String shardName) {
         for (int i = 0; i < 20 * 5 ; i++) {
             LOG.debug("Waiting for shard down {}", shardName);
             Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            com.google.common.base.Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
+            Optional<ActorRef> shardReply = actorUtils.findLocalShard(shardName);
             if (!shardReply.isPresent()) {
                 return;
             }
@@ -272,7 +202,7 @@ public class IntegrationTestKit extends ShardTestKit {
         throw new IllegalStateException("Shard[" + shardName + " did not shutdown in time");
     }
 
-    public static void verifyShardStats(final AbstractDataStore datastore, final String shardName,
+    public static void verifyShardStats(final ClientBackedDataStore datastore, final String shardName,
             final ShardStatsVerifier verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -297,7 +227,7 @@ public class IntegrationTestKit extends ShardTestKit {
         throw lastError;
     }
 
-    public static void verifyShardState(final AbstractDataStore datastore, final String shardName,
+    public static void verifyShardState(final ClientBackedDataStore datastore, final String shardName,
             final Consumer<OnDemandShardState> verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -322,8 +252,8 @@ public class IntegrationTestKit extends ShardTestKit {
         throw lastError;
     }
 
-    void testWriteTransaction(final AbstractDataStore dataStore, final YangInstanceIdentifier nodePath,
-            final NormalizedNode<?, ?> nodeToWrite) throws Exception {
+    void testWriteTransaction(final ClientBackedDataStore dataStore, final YangInstanceIdentifier nodePath,
+            final NormalizedNode nodeToWrite) throws Exception {
 
         // 1. Create a write-only Tx
 
@@ -345,10 +275,7 @@ public class IntegrationTestKit extends ShardTestKit {
         // 5. Verify the data in the store
 
         DOMStoreReadTransaction readTx = dataStore.newReadOnlyTransaction();
-
-        Optional<NormalizedNode<?, ?>> optional = readTx.read(nodePath).get(5, TimeUnit.SECONDS);
-        assertTrue("isPresent", optional.isPresent());
-        assertEquals("Data node", nodeToWrite, optional.get());
+        assertEquals(Optional.of(nodeToWrite), readTx.read(nodePath).get(5, TimeUnit.SECONDS));
     }
 
     public void doCommit(final DOMStoreThreePhaseCommitCohort cohort) throws Exception {
@@ -366,32 +293,11 @@ public class IntegrationTestKit extends ShardTestKit {
         cohort.commit().get(5, TimeUnit.SECONDS);
     }
 
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    void assertExceptionOnCall(final Callable<Void> callable, final Class<? extends Exception> expType) {
-        try {
-            callable.call();
-            fail("Expected " + expType.getSimpleName());
-        } catch (Exception e) {
-            assertEquals("Exception type", expType, e.getClass());
-        }
-    }
-
     void assertExceptionOnTxChainCreates(final DOMStoreTransactionChain txChain,
             final Class<? extends Exception> expType) {
-        assertExceptionOnCall(() -> {
-            txChain.newWriteOnlyTransaction();
-            return null;
-        }, expType);
-
-        assertExceptionOnCall(() -> {
-            txChain.newReadWriteTransaction();
-            return null;
-        }, expType);
-
-        assertExceptionOnCall(() -> {
-            txChain.newReadOnlyTransaction();
-            return null;
-        }, expType);
+        assertThrows(expType, () -> txChain.newWriteOnlyTransaction());
+        assertThrows(expType, () -> txChain.newReadWriteTransaction());
+        assertThrows(expType, () -> txChain.newReadOnlyTransaction());
     }
 
     public interface ShardStatsVerifier {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/JsonExportTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/JsonExportTest.java
new file mode 100644 (file)
index 0000000..1e51e9c
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.awaitility.Awaitility.await;
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
+import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.distributed.datastore.provider.rev231229.DataStoreProperties.ExportOnRecovery;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+
+public class JsonExportTest extends AbstractShardTest {
+    private static final String DUMMY_DATA = "Dummy data as snapshot sequence number is set to 0 in "
+            + "InMemorySnapshotStore and journal recovery seq number will start from 1";
+    private static final String EXPECTED_JOURNAL_FILE = "expectedJournalExport.json";
+    private static final String EXPECTED_SNAPSHOT_FILE = "expectedSnapshotExport.json";
+    private static String actualJournalFilePath;
+    private static String actualSnapshotFilePath;
+    private DatastoreContext datastoreContext;
+
+    @Rule
+    public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+    @Override
+    @Before
+    public void setUp() throws Exception {
+        super.setUp();
+        final var exportTmpFolder = temporaryFolder.newFolder("persistence-export");
+        actualJournalFilePath = exportTmpFolder.getAbsolutePath() + "/journals/"
+            + "member-1-shard-inventory-config" + nextShardNum + "-journal.json";
+        actualSnapshotFilePath = exportTmpFolder.getAbsolutePath() + "/snapshots/"
+            + "member-1-shard-inventory-config" + nextShardNum + "-snapshot.json";
+        datastoreContext = DatastoreContext.newBuilder().shardJournalRecoveryLogBatchSize(1)
+            .shardSnapshotBatchCount(5000).shardHeartbeatIntervalInMillis(HEARTBEAT_MILLIS).persistent(true)
+            .exportOnRecovery(ExportOnRecovery.Json)
+            .recoveryExportBaseDir(exportTmpFolder.getAbsolutePath()).build();
+    }
+
+    @Override
+    protected DatastoreContext newDatastoreContext() {
+        return datastoreContext;
+    }
+
+    @Test
+    public void testJsonExport() throws Exception {
+        // Set up the InMemorySnapshotStore.
+        final var source = setupInMemorySnapshotStore();
+
+        final var writeMod = source.takeSnapshot().newModification();
+        writeMod.write(TestModel.OUTER_LIST_PATH, ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build());
+        writeMod.ready();
+        InMemoryJournal.addEntry(shardID.toString(), 0, DUMMY_DATA);
+
+        // Set up the InMemoryJournal.
+        InMemoryJournal.addEntry(shardID.toString(), 1, new SimpleReplicatedLogEntry(0, 1,
+                payloadForModification(source, writeMod, nextTransactionId())));
+
+        final int nListEntries = 16;
+        final Set<Integer> listEntryKeys = new HashSet<>();
+
+        // Add some ModificationPayload entries
+        for (int i = 1; i <= nListEntries; i++) {
+            final Integer value = i;
+            listEntryKeys.add(value);
+
+            final var path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+                    .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value).build();
+
+            final var mod = source.takeSnapshot().newModification();
+            mod.merge(path, ImmutableNodes.newMapEntryBuilder()
+                .withNodeIdentifier(
+                    NodeIdentifierWithPredicates.of(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, value))
+                .withChild(ImmutableNodes.leafNode(TestModel.ID_QNAME, value))
+                .build());
+            mod.ready();
+
+            InMemoryJournal.addEntry(shardID.toString(), i + 1, new SimpleReplicatedLogEntry(i, 1,
+                    payloadForModification(source, mod, nextTransactionId())));
+        }
+
+        InMemoryJournal.addEntry(shardID.toString(), nListEntries + 2,
+                new ApplyJournalEntries(nListEntries));
+
+        testRecovery(listEntryKeys, false);
+
+        verifyJournalExport();
+        verifySnapshotExport();
+    }
+
+    private static void verifyJournalExport() throws IOException {
+        final String expectedJournalData = readExpectedFile(EXPECTED_JOURNAL_FILE);
+        final String actualJournalData = readActualFile(actualJournalFilePath);
+        assertEquals("Exported journal is not expected ", expectedJournalData, actualJournalData);
+    }
+
+    private static void verifySnapshotExport() throws IOException {
+        final String expectedSnapshotData = readExpectedFile(EXPECTED_SNAPSHOT_FILE);
+        final String actualSnapshotData = readActualFile(actualSnapshotFilePath);
+        assertEquals("Exported snapshot is not expected ", expectedSnapshotData, actualSnapshotData);
+    }
+
+    private static String readExpectedFile(final String filePath) throws IOException {
+        final File exportFile = new File(JsonExportTest.class.getClassLoader().getResource(filePath).getFile());
+        return new String(Files.readAllBytes(Path.of(exportFile.getPath())));
+    }
+
+    private static String readActualFile(final String filePath) throws IOException {
+        final File exportFile = new File(filePath);
+        await().atMost(10, TimeUnit.SECONDS).until(exportFile::exists);
+        return new String(Files.readAllBytes(Path.of(filePath)));
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalShardStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalShardStore.java
new file mode 100644 (file)
index 0000000..9d34902
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.cluster.datastore.shardmanager.TestShardManager.GetLocalShardsReply;
+
+public interface LocalShardStore {
+
+    GetLocalShardsReply getLocalShards();
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/LocalTransactionContextTest.java
deleted file mode 100644 (file)
index 1fb2f8e..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorSelection;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.SettableFuture;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.messages.ReadData;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import scala.concurrent.Future;
-
-public class LocalTransactionContextTest {
-
-    @Mock
-    private OperationLimiter limiter;
-
-    @Mock
-    private DOMStoreReadWriteTransaction readWriteTransaction;
-
-    @Mock
-    private LocalTransactionReadySupport mockReadySupport;
-
-    private LocalTransactionContext localTransactionContext;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        localTransactionContext = new LocalTransactionContext(readWriteTransaction, limiter.getIdentifier(),
-                mockReadySupport) {
-            @Override
-            protected DOMStoreWriteTransaction getWriteDelegate() {
-                return readWriteTransaction;
-            }
-
-            @Override
-            protected DOMStoreReadTransaction getReadDelegate() {
-                return readWriteTransaction;
-            }
-        };
-    }
-
-    @Test
-    public void testWrite() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
-        localTransactionContext.executeModification(new WriteModification(yangInstanceIdentifier, normalizedNode),
-            null);
-        verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-    }
-
-    @Test
-    public void testMerge() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
-        localTransactionContext.executeModification(new MergeModification(yangInstanceIdentifier, normalizedNode),
-            null);
-        verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-    }
-
-    @Test
-    public void testDelete() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        localTransactionContext.executeModification(new DeleteModification(yangInstanceIdentifier), null);
-        verify(readWriteTransaction).delete(yangInstanceIdentifier);
-    }
-
-
-    @Test
-    public void testRead() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
-        doReturn(Futures.immediateCheckedFuture(Optional.of(normalizedNode))).when(readWriteTransaction)
-            .read(yangInstanceIdentifier);
-        localTransactionContext.executeRead(new ReadData(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
-                SettableFuture.create(), null);
-        verify(readWriteTransaction).read(yangInstanceIdentifier);
-    }
-
-    @Test
-    public void testExists() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        doReturn(Futures.immediateCheckedFuture(true)).when(readWriteTransaction).exists(yangInstanceIdentifier);
-        localTransactionContext.executeRead(new DataExists(yangInstanceIdentifier, DataStoreVersions.CURRENT_VERSION),
-                SettableFuture.create(), null);
-        verify(readWriteTransaction).exists(yangInstanceIdentifier);
-    }
-
-    @Test
-    public void testReady() {
-        final LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
-        doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(
-                java.util.Optional.empty());
-        doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, null);
-
-        Future<ActorSelection> future = localTransactionContext.readyTransaction(null, java.util.Optional.empty());
-        assertTrue(future.isCompleted());
-
-        verify(mockReadySupport).onTransactionReady(readWriteTransaction, null);
-    }
-
-    @Test
-    public void testReadyWithWriteError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
-        localTransactionContext.executeModification(new WriteModification(yangInstanceIdentifier, normalizedNode),
-            null);
-        localTransactionContext.executeModification(new WriteModification(yangInstanceIdentifier, normalizedNode),
-            null);
-
-        verify(readWriteTransaction).write(yangInstanceIdentifier, normalizedNode);
-
-        doReadyWithExpectedError(error);
-    }
-
-    @Test
-    public void testReadyWithMergeError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        NormalizedNode<?, ?> normalizedNode = mock(NormalizedNode.class);
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
-        localTransactionContext.executeModification(new MergeModification(yangInstanceIdentifier, normalizedNode),
-            null);
-        localTransactionContext.executeModification(new MergeModification(yangInstanceIdentifier, normalizedNode),
-            null);
-
-        verify(readWriteTransaction).merge(yangInstanceIdentifier, normalizedNode);
-
-        doReadyWithExpectedError(error);
-    }
-
-    @Test
-    public void testReadyWithDeleteError() {
-        YangInstanceIdentifier yangInstanceIdentifier = YangInstanceIdentifier.EMPTY;
-        RuntimeException error = new RuntimeException("mock");
-        doThrow(error).when(readWriteTransaction).delete(yangInstanceIdentifier);
-
-        localTransactionContext.executeModification(new DeleteModification(yangInstanceIdentifier), null);
-        localTransactionContext.executeModification(new DeleteModification(yangInstanceIdentifier), null);
-
-        verify(readWriteTransaction).delete(yangInstanceIdentifier);
-
-        doReadyWithExpectedError(error);
-    }
-
-    private void doReadyWithExpectedError(final RuntimeException expError) {
-        LocalThreePhaseCommitCohort mockCohort = mock(LocalThreePhaseCommitCohort.class);
-        doReturn(akka.dispatch.Futures.successful(null)).when(mockCohort).initiateCoordinatedCommit(
-                java.util.Optional.empty());
-        doReturn(mockCohort).when(mockReadySupport).onTransactionReady(readWriteTransaction, expError);
-
-        localTransactionContext.readyTransaction(null, java.util.Optional.empty());
-    }
-}
index 60cc13b6c17a936de063a4327a3423ffbdd9dac3..4466493857fa9d1e85f5470b45f56260fe756270 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
@@ -17,23 +18,23 @@ import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent.CurrentClusterState;
 import akka.cluster.Member;
 import akka.cluster.MemberStatus;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
 import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
@@ -51,8 +52,8 @@ public class MemberNode {
     private static final String MEMBER_1_ADDRESS = "akka://cluster-test@127.0.0.1:2558";
 
     private IntegrationTestKit kit;
-    private AbstractDataStore configDataStore;
-    private AbstractDataStore operDataStore;
+    private ClientBackedDataStore configDataStore;
+    private ClientBackedDataStore operDataStore;
     private DatastoreContext.Builder datastoreContextBuilder;
     private boolean cleanedUp;
 
@@ -72,12 +73,12 @@ public class MemberNode {
     }
 
 
-    public AbstractDataStore configDataStore() {
+    public ClientBackedDataStore configDataStore() {
         return configDataStore;
     }
 
 
-    public AbstractDataStore operDataStore() {
+    public ClientBackedDataStore operDataStore() {
         return operDataStore;
     }
 
@@ -93,6 +94,7 @@ public class MemberNode {
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
             CurrentClusterState state = Cluster.get(kit.getSystem()).state();
+
             for (Member m : state.getUnreachable()) {
                 if (member.equals(m.getRoles().iterator().next())) {
                     return;
@@ -123,14 +125,14 @@ public class MemberNode {
             }
 
             try {
-                IntegrationTestKit.shutdownActorSystem(kit.getSystem(), Boolean.TRUE);
+                IntegrationTestKit.shutdownActorSystem(kit.getSystem(), true);
             } catch (RuntimeException e) {
                 LoggerFactory.getLogger(MemberNode.class).warn("Failed to shutdown actor system", e);
             }
         }
     }
 
-    public static void verifyRaftState(final AbstractDataStore datastore, final String shardName,
+    public static void verifyRaftState(final ClientBackedDataStore datastore, final String shardName,
             final RaftStateVerifier verifier) throws Exception {
         ActorUtils actorUtils = datastore.getActorUtils();
 
@@ -155,9 +157,9 @@ public class MemberNode {
         throw lastError;
     }
 
-    public static void verifyRaftPeersPresent(final AbstractDataStore datastore, final String shardName,
+    public static void verifyRaftPeersPresent(final ClientBackedDataStore datastore, final String shardName,
             final String... peerMemberNames) throws Exception {
-        final Set<String> peerIds = Sets.newHashSet();
+        final Set<String> peerIds = new HashSet<>();
         for (String p: peerMemberNames) {
             peerIds.add(ShardIdentifier.create(shardName, MemberName.forName(p),
                 datastore.getActorUtils().getDataStoreName()).toString());
@@ -167,7 +169,7 @@ public class MemberNode {
             raftState.getPeerAddresses().keySet()));
     }
 
-    public static void verifyNoShardPresent(final AbstractDataStore datastore, final String shardName) {
+    public static void verifyNoShardPresent(final ClientBackedDataStore datastore, final String shardName) {
         Stopwatch sw = Stopwatch.createStarted();
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             Optional<ActorRef> shardReply = datastore.getActorUtils().findLocalShard(shardName);
@@ -188,7 +190,7 @@ public class MemberNode {
         private boolean useAkkaArtery = true;
         private String[] waitForshardLeader = new String[0];
         private String testName;
-        private SchemaContext schemaContext;
+        private EffectiveModelContext schemaContext;
         private boolean createOperDatastore = true;
         private DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder()
                 .shardHeartbeatIntervalInMillis(300).shardElectionTimeoutFactor(30);
@@ -203,7 +205,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder moduleShardsConfig(final String newModuleShardsConfig) {
-            this.moduleShardsConfig = newModuleShardsConfig;
+            moduleShardsConfig = newModuleShardsConfig;
             return this;
         }
 
@@ -213,7 +215,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder akkaConfig(final String newAkkaConfig) {
-            this.akkaConfig = newAkkaConfig;
+            akkaConfig = newAkkaConfig;
             return this;
         }
 
@@ -223,7 +225,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder useAkkaArtery(final boolean newUseAkkaArtery) {
-            this.useAkkaArtery = newUseAkkaArtery;
+            useAkkaArtery = newUseAkkaArtery;
             return this;
         }
 
@@ -233,7 +235,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder testName(final String newTestName) {
-            this.testName = newTestName;
+            testName = newTestName;
             return this;
         }
 
@@ -243,7 +245,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder waitForShardLeader(final String... shardNames) {
-            this.waitForshardLeader = shardNames;
+            waitForshardLeader = shardNames;
             return this;
         }
 
@@ -253,7 +255,7 @@ public class MemberNode {
          * @return this Builder
          */
         public Builder createOperDatastore(final boolean value) {
-            this.createOperDatastore = value;
+            createOperDatastore = value;
             return this;
         }
 
@@ -262,8 +264,8 @@ public class MemberNode {
          *
          * @return this Builder
          */
-        public Builder schemaContext(final SchemaContext newSchemaContext) {
-            this.schemaContext = newSchemaContext;
+        public Builder schemaContext(final EffectiveModelContext newSchemaContext) {
+            schemaContext = newSchemaContext;
             return this;
         }
 
@@ -278,9 +280,9 @@ public class MemberNode {
         }
 
         public MemberNode build() throws Exception {
-            Preconditions.checkNotNull(moduleShardsConfig, "moduleShardsConfig must be specified");
-            Preconditions.checkNotNull(akkaConfig, "akkaConfig must be specified");
-            Preconditions.checkNotNull(testName, "testName must be specified");
+            requireNonNull(moduleShardsConfig, "moduleShardsConfig must be specified");
+            requireNonNull(akkaConfig, "akkaConfig must be specified");
+            requireNonNull(testName, "testName must be specified");
 
             if (schemaContext == null) {
                 schemaContext = SchemaContextHelper.full();
@@ -306,12 +308,12 @@ public class MemberNode {
 
             String memberName = new ClusterWrapperImpl(system).getCurrentMemberName().getName();
             node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-config-" + memberName);
-            node.configDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
-                    "config_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
+            node.configDataStore = node.kit.setupDataStore(ClientBackedDataStore.class, "config_" + testName,
+                moduleShardsConfig, true, schemaContext, waitForshardLeader);
 
             if (createOperDatastore) {
                 node.kit.getDatastoreContextBuilder().shardManagerPersistenceId("shard-manager-oper-" + memberName);
-                node.operDataStore = node.kit.setupAbstractDataStore(DistributedDataStore.class,
+                node.operDataStore = node.kit.setupDataStore(ClientBackedDataStore.class,
                         "oper_" + testName, moduleShardsConfig, true, schemaContext, waitForshardLeader);
             }
 
@@ -5,38 +5,16 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import java.util.concurrent.atomic.AtomicReference;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 interface OperationCallback {
-    OperationCallback NO_OP_CALLBACK = new OperationCallback() {
-        @Override
-        public void run() {
-        }
-
-        @Override
-        public void success() {
-        }
-
-        @Override
-        public void failure() {
-        }
-
-        @Override
-        public void pause() {
-        }
-
-        @Override
-        public void resume() {
-        }
-    };
-
     class Reference extends AtomicReference<OperationCallback> {
         private static final long serialVersionUID = 1L;
 
-        Reference(OperationCallback initialValue) {
+        Reference(final OperationCallback initialValue) {
             super(initialValue);
         }
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RemoteTransactionContextTest.java
deleted file mode 100644 (file)
index ead6486..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-
-import akka.actor.ActorRef;
-import akka.actor.Status.Failure;
-import akka.dispatch.ExecutionContexts;
-import akka.dispatch.OnComplete;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.FrontendType;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.DataExists;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Test whether RmoteTransactionContext operates correctly.
- */
-public class RemoteTransactionContextTest extends AbstractActorTest {
-    private static final TransactionIdentifier TX_ID = new TransactionIdentifier(new LocalHistoryIdentifier(
-        ClientIdentifier.create(FrontendIdentifier.create(MemberName.forName("test"), FrontendType.forName("test")), 0),
-        0), 0);
-    private static final DeleteModification DELETE = new DeleteModification(DataStoreVersions.CURRENT_VERSION);
-
-    private OperationLimiter limiter;
-    private RemoteTransactionContext txContext;
-    private ActorUtils actorUtils;
-    private TestKit kit;
-
-    @Before
-    public void before() {
-        kit = new TestKit(getSystem());
-        actorUtils = Mockito.spy(new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
-            mock(Configuration.class)));
-        limiter = new OperationLimiter(TX_ID, 4, 0);
-        txContext = new RemoteTransactionContext(TX_ID, actorUtils.actorSelection(kit.getRef().path()), actorUtils,
-            DataStoreVersions.CURRENT_VERSION, limiter);
-        txContext.operationHandOffComplete();
-    }
-
-    /**
-     * OperationLimiter should be correctly released when a failure, like AskTimeoutException occurs. Future reads
-     * need to complete immediately with the failure and modifications should not be throttled and thrown away
-     * immediately.
-     */
-    @Test
-    public void testLimiterOnFailure() throws TimeoutException, InterruptedException {
-        txContext.executeModification(DELETE, null);
-        txContext.executeModification(DELETE, null);
-        assertEquals(2, limiter.availablePermits());
-
-        final Future<Object> sendFuture = txContext.sendBatchedModifications();
-        assertEquals(2, limiter.availablePermits());
-
-        BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
-        assertEquals(2, msg.getModifications().size());
-        assertEquals(1, msg.getTotalMessagesSent());
-        sendReply(new Failure(new NullPointerException()));
-        assertFuture(sendFuture, new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof NullPointerException);
-                assertEquals(4, limiter.availablePermits());
-
-                // The transaction has failed, no throttling should occur
-                txContext.executeModification(DELETE, null);
-                assertEquals(4, limiter.availablePermits());
-
-                // Executing a read should result in immediate failure
-                final SettableFuture<Boolean> readFuture = SettableFuture.create();
-                txContext.executeRead(new DataExists(), readFuture, null);
-                assertTrue(readFuture.isDone());
-                try {
-                    readFuture.get();
-                    fail("Read future did not fail");
-                } catch (ExecutionException | InterruptedException e) {
-                    assertTrue(e.getCause() instanceof NullPointerException);
-                }
-            }
-        });
-
-        final Future<Object> commitFuture = txContext.directCommit(null);
-
-        msg = kit.expectMsgClass(BatchedModifications.class);
-        // Modification should have been thrown away by the dropped transmit induced by executeRead()
-        assertEquals(0, msg.getModifications().size());
-        assertTrue(msg.isDoCommitOnReady());
-        assertTrue(msg.isReady());
-        assertEquals(2, msg.getTotalMessagesSent());
-        sendReply(new Failure(new IllegalStateException()));
-        assertFuture(commitFuture, new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof IllegalStateException);
-            }
-        });
-
-        kit.expectNoMessage();
-    }
-
-    /**
-     * OperationLimiter gives up throttling at some point -- {@link RemoteTransactionContext} needs to deal with that
-     * case, too.
-     */
-    @Test
-    public void testLimiterOnOverflowFailure() throws TimeoutException, InterruptedException {
-        txContext.executeModification(DELETE, null);
-        txContext.executeModification(DELETE, null);
-        txContext.executeModification(DELETE, null);
-        txContext.executeModification(DELETE, null);
-        assertEquals(0, limiter.availablePermits());
-        txContext.executeModification(DELETE, null);
-        // Last acquire should have failed ...
-        assertEquals(0, limiter.availablePermits());
-
-        final Future<Object> future = txContext.sendBatchedModifications();
-        assertEquals(0, limiter.availablePermits());
-
-        BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
-        // ... so we are sending 5 modifications ...
-        assertEquals(5, msg.getModifications().size());
-        assertEquals(1, msg.getTotalMessagesSent());
-        sendReply(new Failure(new NullPointerException()));
-
-        assertFuture(future, new OnComplete<Object>() {
-            @Override
-            public void onComplete(final Throwable failure, final Object success) {
-                assertTrue(failure instanceof NullPointerException);
-                // ... but they account for only 4 permits.
-                assertEquals(4, limiter.availablePermits());
-            }
-        });
-
-        kit.expectNoMessage();
-    }
-
-    private void sendReply(final Object message) {
-        final ActorRef askActor = kit.getLastSender();
-        kit.watch(askActor);
-        kit.reply(new Failure(new IllegalStateException()));
-        kit.expectTerminated(askActor);
-    }
-
-    private static void assertFuture(final Future<Object> future, final OnComplete<Object> complete)
-            throws TimeoutException, InterruptedException {
-        Await.ready(future, FiniteDuration.apply(3, TimeUnit.SECONDS));
-        future.onComplete(complete, ExecutionContexts.fromExecutor(MoreExecutors.directExecutor()));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/RootDataTreeChangeListenerProxyTest.java
new file mode 100644 (file)
index 0000000..1b5cc25
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.timeout;
+import static org.mockito.Mockito.verify;
+
+import akka.actor.ActorSelection;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.collect.ImmutableList;
+import java.time.Duration;
+import java.util.List;
+import java.util.Set;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
+import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
+import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeNotificationListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
+import org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
+
+public class RootDataTreeChangeListenerProxyTest extends AbstractActorTest {
+
+    @Test(timeout = 10000)
+    public void testSuccessfulRegistrationOnTwoShards() {
+        final TestKit kit = new TestKit(getSystem());
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+            mock(Configuration.class));
+
+        ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
+            ClusteredDOMDataTreeChangeListener.class);
+
+        final YangInstanceIdentifier path = YangInstanceIdentifier.of();
+        final RootDataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> rootListenerProxy =
+            new RootDataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener,
+            Set.of("shard-1", "shard-2"));
+
+        final Duration timeout = Duration.ofSeconds(5);
+        FindLocalShard findLocalShard1 = kit.expectMsgClass(FindLocalShard.class);
+        kit.reply(new LocalShardFound(kit.getRef()));
+        FindLocalShard findLocalShard2 = kit.expectMsgClass(FindLocalShard.class);
+        kit.reply(new LocalShardFound(kit.getRef()));
+        assertTrue(List.of(findLocalShard1.getShardName(), findLocalShard2.getShardName())
+            .containsAll(List.of("shard-2", "shard-1")));
+
+        RegisterDataTreeChangeListener registerForShard1 = kit.expectMsgClass(timeout,
+            RegisterDataTreeChangeListener.class);
+        assertEquals("getPath", path, registerForShard1.getPath());
+        assertTrue("isRegisterOnAllInstances", registerForShard1.isRegisterOnAllInstances());
+
+        kit.reply(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
+
+        RegisterDataTreeChangeListener registerForShard2 = kit.expectMsgClass(timeout,
+            RegisterDataTreeChangeListener.class);
+        assertEquals("getPath", path, registerForShard2.getPath());
+        assertTrue("isRegisterOnAllInstances", registerForShard2.isRegisterOnAllInstances());
+
+        kit.reply(new RegisterDataTreeNotificationListenerReply(kit.getRef()));
+
+        assertEquals(registerForShard1.getListenerActorPath(), registerForShard2.getListenerActorPath());
+
+        final TestKit kit2 = new TestKit(getSystem());
+        final ActorSelection rootListenerActor = getSystem().actorSelection(registerForShard1.getListenerActorPath());
+        rootListenerActor.tell(new EnableNotification(true, "test"), kit.getRef());
+        final DataTreeCandidate peopleCandidate = DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(),
+            PeopleModel.create());
+        rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit.getRef());
+        rootListenerActor.tell(new DataTreeChanged(ImmutableList.of(peopleCandidate)), kit2.getRef());
+        //verify the 2 candidates were processed into 1 initial candidate
+        verify(mockClusteredListener, timeout(100).times(1)).onDataTreeChanged(any());
+
+        rootListenerProxy.close();
+    }
+
+    @Test(timeout = 10000, expected = java.lang.AssertionError.class)
+    public void testNotAllShardsFound() {
+        final TestKit kit = new TestKit(getSystem());
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+            mock(Configuration.class));
+
+        ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
+            ClusteredDOMDataTreeChangeListener.class);
+
+        final RootDataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> rootListenerProxy =
+            new RootDataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, Set.of("shard-1", "shard-2"));
+
+        Duration timeout = Duration.ofSeconds(5);
+        kit.expectMsgClass(FindLocalShard.class);
+        kit.reply(new LocalShardFound(kit.getRef()));
+        kit.expectMsgClass(FindLocalShard.class);
+        // don't send second reply
+        kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+
+        rootListenerProxy.close();
+    }
+
+    @Test(timeout = 10000, expected = java.lang.AssertionError.class)
+    public void testLocalShardNotInitialized() {
+        final TestKit kit = new TestKit(getSystem());
+        ActorUtils actorUtils = new ActorUtils(getSystem(), kit.getRef(), mock(ClusterWrapper.class),
+            mock(Configuration.class));
+
+        ClusteredDOMDataTreeChangeListener mockClusteredListener = mock(
+            ClusteredDOMDataTreeChangeListener.class);
+
+        final RootDataTreeChangeListenerProxy<ClusteredDOMDataTreeChangeListener> rootListenerProxy =
+            new RootDataTreeChangeListenerProxy<>(actorUtils, mockClusteredListener, Set.of("shard-1"));
+
+        Duration timeout = Duration.ofSeconds(5);
+        kit.expectMsgClass(FindLocalShard.class);
+        kit.reply(new NotInitializedException("not initialized"));
+        // don't send second reply
+        kit.expectMsgClass(timeout, RegisterDataTreeChangeListener.class);
+
+        rootListenerProxy.close();
+    }
+}
index bd9c0a48ff3f1872d7ffea017796ac58c306eccf..0b5295584eca4b0100630d641e5a15edbb8a1cc1 100644 (file)
@@ -49,6 +49,7 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardCommitCoordinationTest extends AbstractShardTest {
     private static final Logger LOG = LoggerFactory.getLogger(ShardCommitCoordinationTest.class);
 
@@ -553,9 +554,9 @@ public class ShardCommitCoordinationTest extends AbstractShardTest {
         LOG.info("{} ending", testName);
     }
 
-    static void verifyInnerListEntry(TestActorRef<Shard> shard, int outerID, String innerID) {
+    static void verifyInnerListEntry(final TestActorRef<Shard> shard, final int outerID, final String innerID) {
         final YangInstanceIdentifier path = innerEntryPath(outerID, innerID);
-        final NormalizedNode<?, ?> innerListEntry = readStore(shard, path);
+        final NormalizedNode innerListEntry = readStore(shard, path);
         assertNotNull(path + " not found", innerListEntry);
     }
 }
index 7de4835e374691b7131fe75b5d9be13492e06965..234c69e011bd05a978d13b6927b418a191fb8cf5 100644 (file)
@@ -23,7 +23,8 @@ import org.mockito.InOrder;
 import org.mockito.invocation.InvocationOnMock;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public final class ShardDataTreeMocking {
 
@@ -37,18 +38,18 @@ public final class ShardDataTreeMocking {
     }
 
     public static ShardDataTreeCohort immediateCanCommit(final ShardDataTreeCohort cohort) {
-        final FutureCallback<Void> callback = mockCallback();
-        doNothing().when(callback).onSuccess(null);
+        final FutureCallback<Empty> callback = mockCallback();
+        doNothing().when(callback).onSuccess(Empty.value());
         cohort.canCommit(callback);
 
-        verify(callback).onSuccess(null);
+        verify(callback).onSuccess(Empty.value());
         verifyNoMoreInteractions(callback);
         return cohort;
     }
 
-    public static FutureCallback<Void> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
-        final FutureCallback<Void> callback = mockCallback();
-        doNothing().when(callback).onSuccess(null);
+    public static FutureCallback<Empty> coordinatedCanCommit(final ShardDataTreeCohort cohort) {
+        final FutureCallback<Empty> callback = mockCallback();
+        doNothing().when(callback).onSuccess(Empty.value());
         doNothing().when(callback).onFailure(any(Throwable.class));
         cohort.canCommit(callback);
         return callback;
@@ -102,11 +103,11 @@ public final class ShardDataTreeMocking {
         }).when(preCommitCallback).onSuccess(any(DataTreeCandidate.class));
         doNothing().when(preCommitCallback).onFailure(any(Throwable.class));
 
-        final FutureCallback<Void> canCommit = mockCallback();
+        final FutureCallback<Empty> canCommit = mockCallback();
         doAnswer(invocation -> {
             cohort.preCommit(preCommitCallback);
             return null;
-        }).when(canCommit).onSuccess(null);
+        }).when(canCommit).onSuccess(Empty.value());
         doNothing().when(canCommit).onFailure(any(Throwable.class));
 
         cohort.canCommit(canCommit);
index 814f90711d43387ee4219bc2c4811ab21ece497d..ef6b2448f72bc0aac5f3e157a56bc36204713abf 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertEquals;
@@ -34,12 +33,12 @@ import static org.opendaylight.controller.cluster.datastore.ShardDataTreeMocking
 import static org.opendaylight.controller.cluster.datastore.ShardDataTreeMocking.immediatePreCommit;
 
 import com.google.common.base.Ticker;
-import com.google.common.collect.Maps;
 import com.google.common.primitives.UnsignedLong;
 import com.google.common.util.concurrent.FutureCallback;
-import java.math.BigInteger;
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -49,21 +48,35 @@ import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload;
+import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
+import org.opendaylight.controller.cluster.datastore.persisted.PayloadVersion;
+import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 public class ShardDataTreeTest extends AbstractTest {
@@ -71,12 +84,12 @@ public class ShardDataTreeTest extends AbstractTest {
 
     private final Shard mockShard = Mockito.mock(Shard.class);
     private ShardDataTree shardDataTree;
-    private SchemaContext fullSchema;
+    private EffectiveModelContext fullSchema;
 
     @Before
     public void setUp() {
         doReturn(Ticker.systemTicker()).when(mockShard).ticker();
-        doReturn(mock(ShardStats.class)).when(mockShard).getShardMBean();
+        doReturn(new ShardStats("shardName", "mxBeanType", mockShard)).when(mockShard).getShardMBean();
         doReturn(DATASTORE_CONTEXT).when(mockShard).getDatastoreContext();
 
         fullSchema = SchemaContextHelper.full();
@@ -125,11 +138,11 @@ public class ShardDataTreeTest extends AbstractTest {
 
         final DataTreeSnapshot snapshot1 = readOnlyShardDataTreeTransaction.getSnapshot();
 
-        final Optional<NormalizedNode<?, ?>> optional = snapshot1.readNode(CarsModel.BASE_PATH);
+        final Optional<NormalizedNode> optional = snapshot1.readNode(CarsModel.BASE_PATH);
 
         assertEquals(expectedCarsPresent, optional.isPresent());
 
-        final Optional<NormalizedNode<?, ?>> optional1 = snapshot1.readNode(PeopleModel.BASE_PATH);
+        final Optional<NormalizedNode> optional1 = snapshot1.readNode(PeopleModel.BASE_PATH);
 
         assertEquals(expectedPeoplePresent, optional1.isPresent());
     }
@@ -142,11 +155,11 @@ public class ShardDataTreeTest extends AbstractTest {
         candidates.add(addCar(shardDataTree));
         candidates.add(removeCar(shardDataTree));
 
-        final NormalizedNode<?, ?> expected = getCars(shardDataTree);
+        final NormalizedNode expected = getCars(shardDataTree);
 
         applyCandidates(shardDataTree, candidates);
 
-        final NormalizedNode<?, ?> actual = getCars(shardDataTree);
+        final NormalizedNode actual = getCars(shardDataTree);
 
         assertEquals(expected, actual);
     }
@@ -161,11 +174,11 @@ public class ShardDataTreeTest extends AbstractTest {
         candidates.add(addCar(shardDataTree));
         candidates.add(removeCar(shardDataTree));
 
-        final NormalizedNode<?, ?> expected = getCars(shardDataTree);
+        final NormalizedNode expected = getCars(shardDataTree);
 
         applyCandidates(shardDataTree, candidates);
 
-        final NormalizedNode<?, ?> actual = getCars(shardDataTree);
+        final NormalizedNode actual = getCars(shardDataTree);
 
         assertEquals(expected, actual);
     }
@@ -176,19 +189,19 @@ public class ShardDataTreeTest extends AbstractTest {
 
         DOMDataTreeChangeListener listener = mock(DOMDataTreeChangeListener.class);
         shardDataTree.registerTreeChangeListener(CarsModel.CAR_LIST_PATH.node(CarsModel.CAR_QNAME), listener,
-            com.google.common.base.Optional.absent(), noop -> { });
+            Optional.empty(), noop -> { });
 
         addCar(shardDataTree, "optima");
 
         verifyOnDataTreeChanged(listener, dtc -> {
-            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
             assertEquals("getRootPath", CarsModel.newCarPath("optima"), dtc.getRootPath());
         });
 
         addCar(shardDataTree, "sportage");
 
         verifyOnDataTreeChanged(listener, dtc -> {
-            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", ModificationType.WRITE, dtc.getRootNode().modificationType());
             assertEquals("getRootPath", CarsModel.newCarPath("sportage"), dtc.getRootPath());
         });
 
@@ -199,14 +212,14 @@ public class ShardDataTreeTest extends AbstractTest {
 
         shardDataTree.applySnapshot(newDataTree.takeStateSnapshot());
 
-        Map<YangInstanceIdentifier, ModificationType> expChanges = Maps.newHashMap();
+        Map<YangInstanceIdentifier, ModificationType> expChanges = new HashMap<>();
         expChanges.put(CarsModel.newCarPath("optima"), ModificationType.WRITE);
         expChanges.put(CarsModel.newCarPath("murano"), ModificationType.WRITE);
         expChanges.put(CarsModel.newCarPath("sportage"), ModificationType.DELETE);
         verifyOnDataTreeChanged(listener, dtc -> {
             ModificationType expType = expChanges.remove(dtc.getRootPath());
             assertNotNull("Got unexpected change for " + dtc.getRootPath(), expType);
-            assertEquals("getModificationType", expType, dtc.getRootNode().getModificationType());
+            assertEquals("getModificationType", expType, dtc.getRootNode().modificationType());
         });
 
         if (!expChanges.isEmpty()) {
@@ -222,73 +235,73 @@ public class ShardDataTreeTest extends AbstractTest {
         final ShardDataTreeCohort cohort2 = newShardDataTreeCohort(snapshot ->
             snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
 
-        NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+        NormalizedNode peopleNode = PeopleModel.create();
         final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot ->
             snapshot.write(PeopleModel.BASE_PATH, peopleNode));
 
         YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
-        MapEntryNode carNode = CarsModel.newCarEntry("optima", new BigInteger("100"));
+        MapEntryNode carNode = CarsModel.newCarEntry("optima", Uint64.valueOf(100));
         final ShardDataTreeCohort cohort4 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
 
         immediateCanCommit(cohort1);
-        final FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
-        final FutureCallback<Void> canCommitCallback3 = coordinatedCanCommit(cohort3);
-        final FutureCallback<Void> canCommitCallback4 = coordinatedCanCommit(cohort4);
+        final FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
+        final FutureCallback<Empty> canCommitCallback3 = coordinatedCanCommit(cohort3);
+        final FutureCallback<Empty> canCommitCallback4 = coordinatedCanCommit(cohort4);
 
         final FutureCallback<DataTreeCandidate> preCommitCallback1 = coordinatedPreCommit(cohort1);
         verify(preCommitCallback1).onSuccess(cohort1.getCandidate());
-        verify(canCommitCallback2).onSuccess(null);
+        verify(canCommitCallback2).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
         verify(preCommitCallback2).onSuccess(cohort2.getCandidate());
-        verify(canCommitCallback3).onSuccess(null);
+        verify(canCommitCallback3).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback3 = coordinatedPreCommit(cohort3);
         verify(preCommitCallback3).onSuccess(cohort3.getCandidate());
-        verify(canCommitCallback4).onSuccess(null);
+        verify(canCommitCallback4).onSuccess(Empty.value());
 
         final FutureCallback<DataTreeCandidate> preCommitCallback4 = coordinatedPreCommit(cohort4);
         verify(preCommitCallback4).onSuccess(cohort4.getCandidate());
 
         final FutureCallback<UnsignedLong> commitCallback2 = coordinatedCommit(cohort2);
-        verify(mockShard, never()).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        verify(mockShard, never()).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 anyBoolean());
         verifyNoMoreInteractions(commitCallback2);
 
         final FutureCallback<UnsignedLong> commitCallback4 = coordinatedCommit(cohort4);
-        verify(mockShard, never()).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        verify(mockShard, never()).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 anyBoolean());
         verifyNoMoreInteractions(commitCallback4);
 
         final FutureCallback<UnsignedLong> commitCallback1 = coordinatedCommit(cohort1);
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
         verifyNoMoreInteractions(commitCallback1);
         verifyNoMoreInteractions(commitCallback2);
 
         final FutureCallback<UnsignedLong> commitCallback3 = coordinatedCommit(cohort3);
         inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
         verifyNoMoreInteractions(commitCallback3);
         verifyNoMoreInteractions(commitCallback4);
 
         final ShardDataTreeCohort cohort5 = newShardDataTreeCohort(snapshot ->
             snapshot.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer()));
-        final FutureCallback<Void> canCommitCallback5 = coordinatedCanCommit(cohort5);
+        final FutureCallback<Empty> canCommitCallback5 = coordinatedCanCommit(cohort5);
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
 
         inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3, commitCallback4);
         inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
@@ -296,17 +309,12 @@ public class ShardDataTreeTest extends AbstractTest {
         inOrder.verify(commitCallback3).onSuccess(any(UnsignedLong.class));
         inOrder.verify(commitCallback4).onSuccess(any(UnsignedLong.class));
 
-        verify(canCommitCallback5).onSuccess(null);
+        verify(canCommitCallback5).onSuccess(Empty.value());
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
-
-        optional = snapshot.readNode(PeopleModel.BASE_PATH);
-        assertTrue("People node present", optional.isPresent());
-        assertEquals("People node", peopleNode, optional.get());
+        assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
+        assertEquals("People node", Optional.of(peopleNode), snapshot.readNode(PeopleModel.BASE_PATH));
     }
 
     @Test
@@ -318,7 +326,7 @@ public class ShardDataTreeTest extends AbstractTest {
             snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
 
         YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
-        MapEntryNode carNode = CarsModel.newCarEntry("optima", new BigInteger("100"));
+        MapEntryNode carNode = CarsModel.newCarEntry("optima", Uint64.valueOf(100));
         final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
 
         final FutureCallback<UnsignedLong> commitCallback2 = immediate3PhaseCommit(cohort2);
@@ -326,19 +334,19 @@ public class ShardDataTreeTest extends AbstractTest {
         final FutureCallback<UnsignedLong> commitCallback1 = immediate3PhaseCommit(cohort1);
 
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort2.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort2.transactionId()), any(CommitTransactionPayload.class),
                 eq(true));
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort2.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort2.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
 
         inOrder = inOrder(commitCallback1, commitCallback2, commitCallback3);
         inOrder.verify(commitCallback1).onSuccess(any(UnsignedLong.class));
@@ -347,9 +355,7 @@ public class ShardDataTreeTest extends AbstractTest {
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
+        assertEquals("Car node", Optional.of(carNode), snapshot.readNode(carPath));
     }
 
     @Test
@@ -363,7 +369,7 @@ public class ShardDataTreeTest extends AbstractTest {
             snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
 
         YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
-        MapEntryNode carNode = CarsModel.newCarEntry("optima", new BigInteger("100"));
+        MapEntryNode carNode = CarsModel.newCarEntry("optima", Uint64.valueOf(100));
         final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
 
         final FutureCallback<UnsignedLong> commitCallback1 = immediate3PhaseCommit(cohort1);
@@ -376,7 +382,7 @@ public class ShardDataTreeTest extends AbstractTest {
         inOrder.verify(commitCallback3).onSuccess(any(UnsignedLong.class));
 
         final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(CarsModel.BASE_PATH);
+        Optional<NormalizedNode> optional = snapshot.readNode(CarsModel.BASE_PATH);
         assertTrue("Car node present", optional.isPresent());
     }
 
@@ -393,7 +399,7 @@ public class ShardDataTreeTest extends AbstractTest {
             snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
 
         YangInstanceIdentifier carPath = CarsModel.newCarPath("optima");
-        MapEntryNode carNode = CarsModel.newCarEntry("optima", new BigInteger("100"));
+        MapEntryNode carNode = CarsModel.newCarEntry("optima", Uint64.valueOf(100));
         final ShardDataTreeCohort cohort4 = newShardDataTreeCohort(snapshot -> snapshot.write(carPath, carNode));
 
         coordinatedCanCommit(cohort2);
@@ -405,10 +411,10 @@ public class ShardDataTreeTest extends AbstractTest {
         coordinatedPreCommit(cohort2);
         coordinatedPreCommit(cohort3);
 
-        FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
-        doNothing().when(mockAbortCallback).onSuccess(null);
+        FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+        doNothing().when(mockAbortCallback).onSuccess(Empty.value());
         cohort2.abort(mockAbortCallback);
-        verify(mockAbortCallback).onSuccess(null);
+        verify(mockAbortCallback).onSuccess(Empty.value());
 
         coordinatedPreCommit(cohort4);
         coordinatedCommit(cohort1);
@@ -416,25 +422,24 @@ public class ShardDataTreeTest extends AbstractTest {
         coordinatedCommit(cohort4);
 
         InOrder inOrder = inOrder(mockShard);
-        inOrder.verify(mockShard).persistPayload(eq(cohort1.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort1.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
-        inOrder.verify(mockShard).persistPayload(eq(cohort3.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort3.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
-        inOrder.verify(mockShard).persistPayload(eq(cohort4.getIdentifier()), any(CommitTransactionPayload.class),
+        inOrder.verify(mockShard).persistPayload(eq(cohort4.transactionId()), any(CommitTransactionPayload.class),
                 eq(false));
 
         // The payload instance doesn't matter - it just needs to be of type CommitTransactionPayload.
         CommitTransactionPayload mockPayload = CommitTransactionPayload.create(nextTransactionId(),
                 cohort1.getCandidate());
-        shardDataTree.applyReplicatedPayload(cohort1.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort3.getIdentifier(), mockPayload);
-        shardDataTree.applyReplicatedPayload(cohort4.getIdentifier(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort1.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort3.transactionId(), mockPayload);
+        shardDataTree.applyReplicatedPayload(cohort4.transactionId(), mockPayload);
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(carPath);
-        assertTrue("Car node present", optional.isPresent());
-        assertEquals("Car node", carNode, optional.get());
+        Optional<NormalizedNode> optional = snapshot.readNode(carPath);
+        assertEquals("Car node", Optional.of(carNode), optional);
     }
 
     @SuppressWarnings("unchecked")
@@ -448,20 +453,20 @@ public class ShardDataTreeTest extends AbstractTest {
         final ShardDataTreeCohort cohort2 = newShardDataTreeCohort(snapshot ->
             snapshot.write(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode()));
 
-        NormalizedNode<?, ?> peopleNode = PeopleModel.create();
+        NormalizedNode peopleNode = PeopleModel.create();
         final ShardDataTreeCohort cohort3 = newShardDataTreeCohort(snapshot ->
             snapshot.write(PeopleModel.BASE_PATH, peopleNode));
 
         immediateCanCommit(cohort1);
-        FutureCallback<Void> canCommitCallback2 = coordinatedCanCommit(cohort2);
+        FutureCallback<Empty> canCommitCallback2 = coordinatedCanCommit(cohort2);
 
         coordinatedPreCommit(cohort1);
-        verify(canCommitCallback2).onSuccess(null);
+        verify(canCommitCallback2).onSuccess(Empty.value());
 
-        FutureCallback<Void> mockAbortCallback = mock(FutureCallback.class);
-        doNothing().when(mockAbortCallback).onSuccess(null);
+        FutureCallback<Empty> mockAbortCallback = mock(FutureCallback.class);
+        doNothing().when(mockAbortCallback).onSuccess(Empty.value());
         cohort1.abort(mockAbortCallback);
-        verify(mockAbortCallback).onSuccess(null);
+        verify(mockAbortCallback).onSuccess(Empty.value());
 
         FutureCallback<DataTreeCandidate> preCommitCallback2 = coordinatedPreCommit(cohort2);
         verify(preCommitCallback2).onFailure(any(Throwable.class));
@@ -472,9 +477,117 @@ public class ShardDataTreeTest extends AbstractTest {
 
         final DataTreeSnapshot snapshot =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
-        Optional<NormalizedNode<?, ?>> optional = snapshot.readNode(PeopleModel.BASE_PATH);
-        assertTrue("People node present", optional.isPresent());
-        assertEquals("People node", peopleNode, optional.get());
+        Optional<NormalizedNode> optional = snapshot.readNode(PeopleModel.BASE_PATH);
+        assertEquals("People node", Optional.of(peopleNode), optional);
+    }
+
+    @Test
+    public void testUintCommitPayload() throws IOException {
+        shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(),
+            DataTreeCandidates.fromNormalizedNode(YangInstanceIdentifier.of(), bigIntegerRoot()),
+            PayloadVersion.POTASSIUM));
+
+        assertCarsUint64();
+    }
+
+    @Test
+    public void testUintSnapshot() throws IOException, DataValidationFailedException {
+        shardDataTree.applyRecoverySnapshot(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(bigIntegerRoot()),
+            true));
+
+        assertCarsUint64();
+    }
+
+    @Test
+    public void testUintReplay() throws DataValidationFailedException, IOException {
+        // Commit two writes and one merge, saving the data tree candidate for each.
+        //        write(foo=1)
+        //        write(foo=2)
+        //        merge(foo=3)
+        final DataTree dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
+            fullSchema);
+        DataTreeModification mod = dataTree.takeSnapshot().newModification();
+        mod.write(CarsModel.BASE_PATH, Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.BASE_QNAME))
+                .withChild(Builders.mapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+                    .withChild(createCar("one", Uint64.ONE))
+                    .build())
+                .build());
+        mod.ready();
+        dataTree.validate(mod);
+        final DataTreeCandidate first = dataTree.prepare(mod);
+        dataTree.commit(first);
+
+        mod = dataTree.takeSnapshot().newModification();
+        mod.write(CarsModel.newCarPath("two"), createCar("two", Uint64.TWO));
+        mod.ready();
+        dataTree.validate(mod);
+        final DataTreeCandidate second = dataTree.prepare(mod);
+        dataTree.commit(second);
+
+        mod = dataTree.takeSnapshot().newModification();
+        mod.merge(CarsModel.CAR_LIST_PATH, Builders.mapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+            .withChild(createCar("three", Uint64.TEN))
+            .build());
+        mod.ready();
+        dataTree.validate(mod);
+        final DataTreeCandidate third = dataTree.prepare(mod);
+        dataTree.commit(third);
+
+        // Apply first candidate as a snapshot
+        shardDataTree.applyRecoverySnapshot(new ShardSnapshotState(
+            new MetadataShardDataTreeSnapshot(first.getRootNode().getDataAfter()), true));
+        // Apply the other two snapshots as transactions
+        shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), second,
+            PayloadVersion.POTASSIUM));
+        shardDataTree.applyRecoveryPayload(CommitTransactionPayload.create(nextTransactionId(), third,
+            PayloadVersion.POTASSIUM));
+
+        // Verify uint translation
+        final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
+
+        assertEquals(Builders.mapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+            // Note: Uint64
+            .withChild(createCar("one", Uint64.ONE))
+            .withChild(createCar("two", Uint64.TWO))
+            .withChild(createCar("three", Uint64.TEN))
+            .build(), snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow());
+    }
+
+    private void assertCarsUint64() {
+        final DataTreeSnapshot snapshot = shardDataTree.newReadOnlyTransaction(nextTransactionId()).getSnapshot();
+        final NormalizedNode cars = snapshot.readNode(CarsModel.CAR_LIST_PATH).orElseThrow();
+
+        assertEquals(Builders.mapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+            // Note: Uint64
+            .withChild(createCar("foo", Uint64.ONE))
+            .build(), cars);
+    }
+
+    private static ContainerNode bigIntegerRoot() {
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(SchemaContext.NAME))
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+                .withChild(Builders.mapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(CarsModel.CAR_QNAME))
+                    .withChild(createCar("foo", Uint64.ONE))
+                    .build())
+                .build())
+            .build();
+    }
+
+    private static MapEntryNode createCar(final String name, final Object value) {
+        return Builders.mapEntryBuilder()
+            .withNodeIdentifier(NodeIdentifierWithPredicates.of(CarsModel.CAR_QNAME, CarsModel.CAR_NAME_QNAME, name))
+            .withChild(ImmutableNodes.leafNode(CarsModel.CAR_NAME_QNAME, name))
+            // Note: old BigInteger
+            .withChild(ImmutableNodes.leafNode(CarsModel.CAR_PRICE_QNAME, value))
+            .build();
     }
 
     private ShardDataTreeCohort newShardDataTreeCohort(final DataTreeOperation operation) {
@@ -488,7 +601,7 @@ public class ShardDataTreeTest extends AbstractTest {
     @SuppressWarnings({ "rawtypes", "unchecked" })
     private static void verifyOnDataTreeChanged(final DOMDataTreeChangeListener listener,
             final Consumer<DataTreeCandidate> callback) {
-        ArgumentCaptor<Collection> changes = ArgumentCaptor.forClass(Collection.class);
+        ArgumentCaptor<List> changes = ArgumentCaptor.forClass(List.class);
         verify(listener, atLeastOnce()).onDataTreeChanged(changes.capture());
         for (Collection list : changes.getAllValues()) {
             for (Object dtc : list) {
@@ -499,16 +612,16 @@ public class ShardDataTreeTest extends AbstractTest {
         reset(listener);
     }
 
-    private static NormalizedNode<?, ?> getCars(final ShardDataTree shardDataTree) {
+    private static NormalizedNode getCars(final ShardDataTree shardDataTree) {
         final ReadOnlyShardDataTreeTransaction readOnlyShardDataTreeTransaction =
                 shardDataTree.newReadOnlyTransaction(nextTransactionId());
         final DataTreeSnapshot snapshot1 = readOnlyShardDataTreeTransaction.getSnapshot();
 
-        final Optional<NormalizedNode<?, ?>> optional = snapshot1.readNode(CarsModel.BASE_PATH);
+        final Optional<NormalizedNode> optional = snapshot1.readNode(CarsModel.BASE_PATH);
 
         assertTrue(optional.isPresent());
 
-        return optional.get();
+        return optional.orElseThrow();
     }
 
     private static DataTreeCandidate addCar(final ShardDataTree shardDataTree) {
@@ -519,7 +632,7 @@ public class ShardDataTreeTest extends AbstractTest {
         return doTransaction(shardDataTree, snapshot -> {
             snapshot.merge(CarsModel.BASE_PATH, CarsModel.emptyContainer());
             snapshot.merge(CarsModel.CAR_LIST_PATH, CarsModel.newCarMapNode());
-            snapshot.write(CarsModel.newCarPath(name), CarsModel.newCarEntry(name, new BigInteger("100")));
+            snapshot.write(CarsModel.newCarPath(name), CarsModel.newCarEntry(name, Uint64.valueOf(100)));
         });
     }
 
index 39285f9d67f90bfe6c51ec981debf5ff1b305833..996aa86636415c9843982709828f25ef222ceb00 100644 (file)
@@ -5,15 +5,14 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import com.google.common.base.Optional;
 import java.io.IOException;
+import java.util.Optional;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -25,15 +24,16 @@ import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeSnapshot;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,8 +41,8 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
     private static final Logger FOO_LOGGER = LoggerFactory.getLogger("foo");
 
     private ShardDataTree peopleDataTree;
-    private SchemaContext peopleSchemaContext;
-    private SchemaContext carsSchemaContext;
+    private EffectiveModelContext peopleSchemaContext;
+    private EffectiveModelContext carsSchemaContext;
     private ShardRecoveryCoordinator coordinator;
 
     @Before
@@ -58,7 +58,8 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
     }
 
     @Test
-    public void testAppendRecoveredLogEntryCommitTransactionPayload() throws IOException {
+    public void testAppendRecoveredLogEntryCommitTransactionPayload() throws IOException,
+            DataValidationFailedException {
         try {
             coordinator.appendRecoveredLogEntry(CommitTransactionPayload.create(nextTransactionId(), createCar()));
         } catch (final SchemaValidationFailedException e) {
@@ -69,7 +70,7 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
     }
 
     @Test
-    public void testApplyRecoverySnapshot() {
+    public void testApplyRecoverySnapshot() throws DataValidationFailedException {
         coordinator.applyRecoverySnapshot(createSnapshot());
 
         assertFalse(readCars(peopleDataTree).isPresent());
@@ -86,7 +87,7 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
         }
     }
 
-    private DataTreeCandidate createCar() {
+    private DataTreeCandidate createCar() throws DataValidationFailedException {
         final DataTree dataTree = new InMemoryDataTreeFactory().create(
             DataTreeConfiguration.DEFAULT_OPERATIONAL, carsSchemaContext);
 
@@ -99,23 +100,23 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
         return dataTree.prepare(modification);
     }
 
-    private Optional<NormalizedNode<?,?>> readCars(final ShardDataTree shardDataTree) {
+    private Optional<NormalizedNode> readCars(final ShardDataTree shardDataTree) {
         final DataTree dataTree = shardDataTree.getDataTree();
         // FIXME: this should not be called here
-        dataTree.setSchemaContext(peopleSchemaContext);
+        dataTree.setEffectiveModelContext(peopleSchemaContext);
 
         return shardDataTree.readNode(CarsModel.BASE_PATH);
     }
 
-    private Optional<NormalizedNode<?,?>> readPeople(final ShardDataTree shardDataTree) {
+    private Optional<NormalizedNode> readPeople(final ShardDataTree shardDataTree) {
         final DataTree dataTree = shardDataTree.getDataTree();
         // FIXME: this should not be called here
-        dataTree.setSchemaContext(peopleSchemaContext);
+        dataTree.setEffectiveModelContext(peopleSchemaContext);
 
         return shardDataTree.readNode(PeopleModel.BASE_PATH);
     }
 
-    private static ShardSnapshotState createSnapshot() {
+    private static ShardSnapshotState createSnapshot() throws DataValidationFailedException {
         final DataTree dataTree = new InMemoryDataTreeFactory().create(
             DataTreeConfiguration.DEFAULT_OPERATIONAL, SchemaContextHelper.select(SchemaContextHelper.CARS_YANG,
                 SchemaContextHelper.PEOPLE_YANG));
@@ -130,6 +131,6 @@ public class ShardRecoveryCoordinatorTest extends AbstractTest {
         dataTree.commit(dataTree.prepare(modification));
 
         return new ShardSnapshotState(new MetadataShardDataTreeSnapshot(dataTree.takeSnapshot().readNode(
-                YangInstanceIdentifier.EMPTY).get()));
+                YangInstanceIdentifier.of()).orElseThrow()));
     }
 }
@@ -5,7 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+package org.opendaylight.controller.cluster.datastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 
 import java.lang.management.ManagementFactory;
 import java.text.SimpleDateFormat;
@@ -13,7 +16,6 @@ import java.util.Date;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
@@ -25,7 +27,6 @@ public class ShardStatsTest {
 
     @Before
     public void setUp() throws Exception {
-
         shardStats = new ShardStats("shard-1", "DataStore", null);
         shardStats.registerMBean();
         mbeanServer = ManagementFactory.getPlatformMBeanServer();
@@ -41,10 +42,7 @@ public class ShardStatsTest {
 
     @Test
     public void testGetShardName() throws Exception {
-
-        Object attribute = mbeanServer.getAttribute(testMBeanName, "ShardName");
-        Assert.assertEquals(attribute, "shard-1");
-
+        assertEquals("shard-1", mbeanServer.getAttribute(testMBeanName, "ShardName"));
     }
 
     @Test
@@ -55,28 +53,21 @@ public class ShardStatsTest {
         shardStats.incrementCommittedTransactionCount();
 
         //now let us get from MBeanServer what is the transaction count.
-        Object attribute = mbeanServer.getAttribute(testMBeanName,
-            "CommittedTransactionsCount");
-        Assert.assertEquals(attribute, 3L);
-
-
+        assertEquals(3L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
     }
 
     @Test
     public void testGetLastCommittedTransactionTime() throws Exception {
         SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-        Assert.assertEquals(shardStats.getLastCommittedTransactionTime(),
-            sdf.format(new Date(0L)));
+        assertEquals(sdf.format(new Date(0L)), shardStats.getLastCommittedTransactionTime());
         long millis = System.currentTimeMillis();
         shardStats.setLastCommittedTransactionTime(millis);
 
         //now let us get from MBeanServer what is the transaction count.
         Object attribute = mbeanServer.getAttribute(testMBeanName,
             "LastCommittedTransactionTime");
-        Assert.assertEquals(attribute, sdf.format(new Date(millis)));
-        Assert.assertNotEquals(attribute,
-            sdf.format(new Date(millis - 1)));
-
+        assertEquals(sdf.format(new Date(millis)), attribute);
+        assertNotEquals(attribute, sdf.format(new Date(millis - 1)));
     }
 
     @Test
@@ -85,11 +76,8 @@ public class ShardStatsTest {
         shardStats.incrementFailedTransactionsCount();
         shardStats.incrementFailedTransactionsCount();
 
-
         //now let us get from MBeanServer what is the transaction count.
-        Object attribute =
-            mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount");
-        Assert.assertEquals(attribute, 2L);
+        assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount"));
     }
 
     @Test
@@ -98,11 +86,8 @@ public class ShardStatsTest {
         shardStats.incrementAbortTransactionsCount();
         shardStats.incrementAbortTransactionsCount();
 
-
         //now let us get from MBeanServer what is the transaction count.
-        Object attribute =
-            mbeanServer.getAttribute(testMBeanName, "AbortTransactionsCount");
-        Assert.assertEquals(attribute, 2L);
+        assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "AbortTransactionsCount"));
     }
 
     @Test
@@ -111,49 +96,32 @@ public class ShardStatsTest {
         shardStats.incrementFailedReadTransactionsCount();
         shardStats.incrementFailedReadTransactionsCount();
 
-
         //now let us get from MBeanServer what is the transaction count.
-        Object attribute =
-            mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
-        Assert.assertEquals(attribute, 2L);
+        assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
     }
 
     @Test
     public void testResetTransactionCounters() throws Exception {
-
         //let us increment committed transactions count and then check
         shardStats.incrementCommittedTransactionCount();
         shardStats.incrementCommittedTransactionCount();
         shardStats.incrementCommittedTransactionCount();
 
         //now let us get from MBeanServer what is the transaction count.
-        Object attribute = mbeanServer.getAttribute(testMBeanName,
-            "CommittedTransactionsCount");
-        Assert.assertEquals(attribute, 3L);
+        assertEquals(3L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
 
         //let us increment FailedReadTransactions count and then check
         shardStats.incrementFailedReadTransactionsCount();
         shardStats.incrementFailedReadTransactionsCount();
 
-
         //now let us get from MBeanServer what is the transaction count.
-        attribute =
-            mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
-        Assert.assertEquals(attribute, 2L);
-
+        assertEquals(2L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
 
         //here we will reset the counters and check the above ones are 0 after reset
         mbeanServer.invoke(testMBeanName, "resetTransactionCounters", null, null);
 
         //now let us get from MBeanServer what is the transaction count.
-        attribute = mbeanServer.getAttribute(testMBeanName,
-            "CommittedTransactionsCount");
-        Assert.assertEquals(attribute, 0L);
-
-        attribute =
-            mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount");
-        Assert.assertEquals(attribute, 0L);
-
-
+        assertEquals(0L, mbeanServer.getAttribute(testMBeanName, "CommittedTransactionsCount"));
+        assertEquals(0L, mbeanServer.getAttribute(testMBeanName, "FailedReadTransactionsCount"));
     }
 }
index b3e8de2d9b70af550bfa0017ed3a0d873125da83..e0db8543f6b6a96beb697f1968f4bf9cd44acfd7 100644 (file)
@@ -7,9 +7,13 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -52,7 +56,6 @@ import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
@@ -91,30 +94,31 @@ import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
 import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
 import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
+import org.opendaylight.controller.cluster.raft.messages.Payload;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.persisted.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.persisted.SimpleReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.persisted.Snapshot;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.concepts.Identifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -169,7 +173,7 @@ public class ShardTest extends AbstractShardTest {
     public void testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration() throws Exception {
         final CountDownLatch onFirstElectionTimeout = new CountDownLatch(1);
         final CountDownLatch onChangeListenerRegistered = new CountDownLatch(1);
-        final Creator<Shard> creator = new Creator<Shard>() {
+        final Creator<Shard> creator = new Creator<>() {
             boolean firstElectionTimeout = true;
 
             @Override
@@ -202,8 +206,8 @@ public class ShardTest extends AbstractShardTest {
         final ActorRef dclActor = actorFactory.createActor(DataTreeChangeListenerActor.props(listener, path),
                 "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration-DataChangeListener");
 
-        final TestActorRef<Shard> shard = actorFactory.createTestActor(
-                Props.create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
+        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props.create(Shard.class,
+                new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
                 "testDataTreeChangeListenerNotifiedWhenNotTheLeaderOnRegistration");
 
         final ShardTestKit testKit = new ShardTestKit(getSystem());
@@ -240,8 +244,7 @@ public class ShardTest extends AbstractShardTest {
             CreateTransactionReply.class);
 
         final String path = reply.getTransactionPath().toString();
-        assertTrue("Unexpected transaction path " + path, path.startsWith(String.format(
-            "akka://test/user/testCreateTransaction/shard-%s-%s:ShardTransactionTest@0:",
+        assertThat(path, containsString(String.format("/user/testCreateTransaction/shard-%s-%s:ShardTransactionTest@0:",
             shardID.getShardName(), shardID.getMemberName().getName())));
     }
 
@@ -259,8 +262,8 @@ public class ShardTest extends AbstractShardTest {
             CreateTransactionReply.class);
 
         final String path = reply.getTransactionPath().toString();
-        assertTrue("Unexpected transaction path " + path, path.startsWith(String.format(
-            "akka://test/user/testCreateTransactionOnChain/shard-%s-%s:ShardTransactionTest@0:",
+        assertThat(path, containsString(String.format(
+            "/user/testCreateTransactionOnChain/shard-%s-%s:ShardTransactionTest@0:",
             shardID.getShardName(), shardID.getMemberName().getName())));
     }
 
@@ -292,15 +295,17 @@ public class ShardTest extends AbstractShardTest {
         final DataTree store = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_OPERATIONAL,
             SCHEMA_CONTEXT);
 
-        final ContainerNode container = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                    .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).addChild(
-                        ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build()).build();
+        final ContainerNode container = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
+                .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
+                .build())
+            .build();
 
         writeToStore(store, TestModel.TEST_PATH, container);
 
-        final YangInstanceIdentifier root = YangInstanceIdentifier.EMPTY;
-        final NormalizedNode<?,?> expected = readStore(store, root);
+        final YangInstanceIdentifier root = YangInstanceIdentifier.of();
+        final NormalizedNode expected = readStore(store, root);
 
         final Snapshot snapshot = Snapshot.create(new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expected)),
                 Collections.emptyList(), 1, 2, 3, 4, -1, null, null);
@@ -344,7 +349,7 @@ public class ShardTest extends AbstractShardTest {
         while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
             Uninterruptibles.sleepUninterruptibly(75, TimeUnit.MILLISECONDS);
 
-            final NormalizedNode<?,?> actual = readStore(shard, TestModel.TEST_PATH);
+            final NormalizedNode actual = readStore(shard, TestModel.TEST_PATH);
             if (actual != null) {
                 assertEquals("Applied state", node, actual);
                 return;
@@ -373,7 +378,7 @@ public class ShardTest extends AbstractShardTest {
 
         // Add some ModificationPayload entries
         for (int i = 1; i <= nListEntries; i++) {
-            listEntryKeys.add(Integer.valueOf(i));
+            listEntryKeys.add(i);
 
             final YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
                     .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
@@ -389,7 +394,7 @@ public class ShardTest extends AbstractShardTest {
         InMemoryJournal.addEntry(shardID.toString(), nListEntries + 2,
             new ApplyJournalEntries(nListEntries));
 
-        testRecovery(listEntryKeys);
+        testRecovery(listEntryKeys, true);
     }
 
     @Test
@@ -479,7 +484,9 @@ public class ShardTest extends AbstractShardTest {
             ImmutableNodes.containerNode(TestModel.TEST_QNAME), false), testKit.getRef());
         final ReadyTransactionReply readyReply = ReadyTransactionReply
                 .fromSerializable(testKit.expectMsgClass(duration, ReadyTransactionReply.class));
-        assertEquals("Cohort path", shard.path().toString(), readyReply.getCohortPath());
+
+        String pathSuffix = shard.path().toString().replaceFirst("akka://test", "");
+        assertThat(readyReply.getCohortPath(), endsWith(pathSuffix));
         // Send the CanCommitTransaction message for the first Tx.
 
         shard.tell(new CanCommitTransaction(transactionID1, CURRENT_VERSION).toSerializable(), testKit.getRef());
@@ -551,7 +558,7 @@ public class ShardTest extends AbstractShardTest {
 
         verifyOuterListEntry(shard, 1);
 
-        verifyLastApplied(shard, 5);
+        verifyLastApplied(shard, 3);
     }
 
     @Test
@@ -640,6 +647,7 @@ public class ShardTest extends AbstractShardTest {
         verifyOuterListEntry(shard, 1);
     }
 
+    @Deprecated(since = "9.0.0", forRemoval = true)
     @Test(expected = IllegalStateException.class)
     public void testBatchedModificationsReadyWithIncorrectTotalMessageCount() throws Exception {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
@@ -666,6 +674,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testBatchedModificationsWithOperationFailure() {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         final TestActorRef<Shard> shard = actorFactory.createTestActor(
@@ -680,9 +689,10 @@ public class ShardTest extends AbstractShardTest {
 
         final TransactionIdentifier transactionID = nextTransactionId();
 
-        final ContainerNode invalidData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk")).build();
+        final ContainerNode invalidData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.JUNK_QNAME, "junk"))
+            .build();
 
         BatchedModifications batched = new BatchedModifications(transactionID, CURRENT_VERSION);
         batched.addModification(new MergeModification(TestModel.TEST_PATH, invalidData));
@@ -749,15 +759,16 @@ public class ShardTest extends AbstractShardTest {
 
         // Verify data in the data store.
 
-        final NormalizedNode<?, ?> actualNode = readStore(shard, path);
+        final NormalizedNode actualNode = readStore(shard, path);
         assertEquals("Stored node", containerNode, actualNode);
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testOnBatchedModificationsWhenNotLeader() {
         final AtomicBoolean overrideLeaderCalls = new AtomicBoolean();
         final ShardTestKit testKit = new ShardTestKit(getSystem());
-        final Creator<Shard> creator = new Creator<Shard>() {
+        final Creator<Shard> creator = new Creator<>() {
             private static final long serialVersionUID = 1L;
 
             @Override
@@ -777,8 +788,8 @@ public class ShardTest extends AbstractShardTest {
             }
         };
 
-        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props
-            .create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
+        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props.create(Shard.class,
+            new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
             "testOnBatchedModificationsWhenNotLeader");
 
         ShardTestKit.waitUntilLeader(shard);
@@ -794,6 +805,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     @Test
+    @Deprecated(since = "9.0.0", forRemoval = true)
     public void testTransactionMessagesWithNoLeader() {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         dataStoreContextBuilder.customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName())
@@ -839,7 +851,7 @@ public class ShardTest extends AbstractShardTest {
         ShardTestKit.waitUntilLeader(shard);
 
         final TransactionIdentifier transactionID = nextTransactionId();
-        final NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+        final NormalizedNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
         if (readWrite) {
             shard.tell(prepareForwardedReadyTransaction(shard, transactionID, TestModel.TEST_PATH, containerNode, true),
                 testKit.getRef());
@@ -850,7 +862,7 @@ public class ShardTest extends AbstractShardTest {
 
         testKit.expectMsgClass(Duration.ofSeconds(5), CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+        final NormalizedNode actualNode = readStore(shard, TestModel.TEST_PATH);
         assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
     }
 
@@ -883,7 +895,7 @@ public class ShardTest extends AbstractShardTest {
 
         testKit.expectMsgClass(CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
+        final NormalizedNode actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
         assertEquals(TestModel.OUTER_LIST_QNAME.getLocalName(), mergeData, actualNode);
     }
 
@@ -928,7 +940,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new CommitTransaction(txId, CURRENT_VERSION).toSerializable(), testKit.getRef());
         testKit.expectMsgClass(CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
+        final NormalizedNode actualNode = readStore(shard, TestModel.OUTER_LIST_PATH);
         assertEquals(TestModel.OUTER_LIST_QNAME.getLocalName(), mergeData, actualNode);
     }
 
@@ -947,7 +959,7 @@ public class ShardTest extends AbstractShardTest {
         final Duration duration = Duration.ofSeconds(5);
 
         final TransactionIdentifier transactionID = nextTransactionId();
-        final NormalizedNode<?, ?> containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+        final NormalizedNode containerNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
         shard.tell(prepareBatchedModifications(transactionID, TestModel.TEST_PATH, containerNode, false),
             testKit.getRef());
         testKit.expectMsgClass(duration, ReadyTransactionReply.class);
@@ -964,7 +976,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), testKit.getRef());
         testKit.expectMsgClass(duration, CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> actualNode = readStore(shard, TestModel.TEST_PATH);
+        final NormalizedNode actualNode = readStore(shard, TestModel.TEST_PATH);
         assertEquals(TestModel.TEST_QNAME.getLocalName(), containerNode, actualNode);
     }
 
@@ -1026,8 +1038,7 @@ public class ShardTest extends AbstractShardTest {
         // Committed transaction count should increase as usual
         assertEquals(1, shardStats.getCommittedTransactionsCount());
 
-        // Commit index should advance as we do not have an empty
-        // modification
+        // Commit index should advance 1 to account for disabling metadata
         assertEquals(1, shardStats.getCommitIndex());
     }
 
@@ -1085,7 +1096,7 @@ public class ShardTest extends AbstractShardTest {
         // Wait for the 2nd Tx to complete the canCommit phase.
 
         final CountDownLatch latch = new CountDownLatch(1);
-        canCommitFuture.onComplete(new OnComplete<Object>() {
+        canCommitFuture.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object resp) {
                 latch.countDown();
@@ -1155,7 +1166,7 @@ public class ShardTest extends AbstractShardTest {
         // Wait for the 2nd Tx to complete the canCommit phase.
 
         final CountDownLatch latch = new CountDownLatch(1);
-        canCommitFuture.onComplete(new OnComplete<Object>() {
+        canCommitFuture.onComplete(new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object resp) {
                 latch.countDown();
@@ -1183,7 +1194,7 @@ public class ShardTest extends AbstractShardTest {
         final Duration duration = Duration.ofSeconds(5);
         final TransactionIdentifier transactionID1 = nextTransactionId();
 
-        doThrow(new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock canCommit failure"))
+        doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
         .doNothing().when(dataTree).validate(any(DataTreeModification.class));
 
         shard.tell(newBatchedModifications(transactionID1, TestModel.TEST_PATH,
@@ -1224,7 +1235,7 @@ public class ShardTest extends AbstractShardTest {
 
         ShardTestKit.waitUntilLeader(shard);
 
-        doThrow(new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock canCommit failure"))
+        doThrow(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock canCommit failure"))
         .doNothing().when(dataTree).validate(any(DataTreeModification.class));
 
         final Duration duration = Duration.ofSeconds(5);
@@ -1261,8 +1272,7 @@ public class ShardTest extends AbstractShardTest {
         final ShardTestKit testKit = new ShardTestKit(getSystem());
         final Creator<Shard> creator = () -> new Shard(newShardBuilder()) {
             @Override
-            void persistPayload(final Identifier id, final Payload payload,
-                    final boolean batchHint) {
+            void persistPayload(final Identifier id, final Payload payload, final boolean batchHint) {
                 // Simulate an AbortTransaction message occurring during
                 // replication, after
                 // persisting and before finishing the commit to the
@@ -1273,8 +1283,8 @@ public class ShardTest extends AbstractShardTest {
             }
         };
 
-        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props
-            .create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
+        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props.create(Shard.class,
+            new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
             "testAbortWithCommitPending");
 
         ShardTestKit.waitUntilLeader(shard);
@@ -1293,7 +1303,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new CommitTransaction(transactionID, CURRENT_VERSION).toSerializable(), testKit.getRef());
         testKit.expectMsgClass(duration, CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST_PATH);
+        final NormalizedNode node = readStore(shard, TestModel.TEST_PATH);
 
         // Since we're simulating an abort occurring during replication
         // and before finish commit,
@@ -1361,7 +1371,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new CommitTransaction(transactionID2, CURRENT_VERSION).toSerializable(), testKit.getRef());
         testKit.expectMsgClass(duration, CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> node = readStore(shard, listNodePath);
+        final NormalizedNode node = readStore(shard, listNodePath);
         assertNotNull(listNodePath + " not found", node);
     }
 
@@ -1521,7 +1531,7 @@ public class ShardTest extends AbstractShardTest {
 
         testKit.expectMsgClass(duration, CommitTransactionReply.class);
 
-        final NormalizedNode<?, ?> node = readStore(shard, TestModel.TEST2_PATH);
+        final NormalizedNode node = readStore(shard, TestModel.TEST2_PATH);
         assertNotNull(TestModel.TEST2_PATH + " not found", node);
     }
 
@@ -1706,9 +1716,9 @@ public class ShardTest extends AbstractShardTest {
 
         dataStoreContextBuilder.persistent(persistent);
 
-        class TestShard extends Shard {
+        final class TestShard extends Shard {
 
-            protected TestShard(final AbstractBuilder<?, ?> builder) {
+            TestShard(final AbstractBuilder<?, ?> builder) {
                 super(builder);
                 setPersistence(new TestPersistentDataProvider(super.persistence()));
             }
@@ -1731,14 +1741,13 @@ public class ShardTest extends AbstractShardTest {
 
         final Creator<Shard> creator = () -> new TestShard(newShardBuilder());
 
-        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props
-            .create(new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()),
-            shardActorName);
+        final TestActorRef<Shard> shard = actorFactory.createTestActor(Props.create(Shard.class,
+            new DelegatingShardCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()), shardActorName);
 
         ShardTestKit.waitUntilLeader(shard);
         writeToStore(shard, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
 
-        final NormalizedNode<?, ?> expectedRoot = readStore(shard, YangInstanceIdentifier.EMPTY);
+        final NormalizedNode expectedRoot = readStore(shard, YangInstanceIdentifier.of());
 
         // Trigger creation of a snapshot by ensuring
         final RaftActorContext raftActorContext = ((TestShard) shard.underlyingActor()).getRaftActorContext();
@@ -1750,7 +1759,7 @@ public class ShardTest extends AbstractShardTest {
     }
 
     private static void awaitAndValidateSnapshot(final AtomicReference<CountDownLatch> latch,
-            final AtomicReference<Object> savedSnapshot, final NormalizedNode<?, ?> expectedRoot)
+            final AtomicReference<Object> savedSnapshot, final NormalizedNode expectedRoot)
                     throws InterruptedException {
         assertTrue("Snapshot saved", latch.get().await(5, TimeUnit.SECONDS));
 
@@ -1762,9 +1771,9 @@ public class ShardTest extends AbstractShardTest {
         savedSnapshot.set(null);
     }
 
-    private static void verifySnapshot(final Snapshot snapshot, final NormalizedNode<?, ?> expectedRoot) {
-        final NormalizedNode<?, ?> actual = ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().get();
-        assertEquals("Root node", expectedRoot, actual);
+    private static void verifySnapshot(final Snapshot snapshot, final NormalizedNode expectedRoot) {
+        assertEquals("Root node", expectedRoot,
+            ((ShardSnapshotState)snapshot.getState()).getSnapshot().getRootNode().orElseThrow());
     }
 
     /**
@@ -1781,16 +1790,16 @@ public class ShardTest extends AbstractShardTest {
         commitTransaction(store, putTransaction);
 
 
-        final NormalizedNode<?, ?> expected = readStore(store, YangInstanceIdentifier.EMPTY);
+        final NormalizedNode expected = readStore(store, YangInstanceIdentifier.of());
 
         final DataTreeModification writeTransaction = store.takeSnapshot().newModification();
 
-        writeTransaction.delete(YangInstanceIdentifier.EMPTY);
-        writeTransaction.write(YangInstanceIdentifier.EMPTY, expected);
+        writeTransaction.delete(YangInstanceIdentifier.of());
+        writeTransaction.write(YangInstanceIdentifier.of(), expected);
 
         commitTransaction(store, writeTransaction);
 
-        final NormalizedNode<?, ?> actual = readStore(store, YangInstanceIdentifier.EMPTY);
+        final NormalizedNode actual = readStore(store, YangInstanceIdentifier.of());
 
         assertEquals(expected, actual);
     }
@@ -1855,9 +1864,9 @@ public class ShardTest extends AbstractShardTest {
 
         ShardLeaderStateChanged leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener,
             ShardLeaderStateChanged.class);
-        assertTrue("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
-        assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(),
-            leaderStateChanged.getLocalShardDataTree().get());
+        final var dataTree = leaderStateChanged.localShardDataTree();
+        assertNotNull("getLocalShardDataTree present", dataTree);
+        assertSame("getLocalShardDataTree", shard.underlyingActor().getDataStore().getDataTree(), dataTree);
 
         MessageCollectorActor.clearMessages(listener);
 
@@ -1866,7 +1875,7 @@ public class ShardTest extends AbstractShardTest {
         shard.tell(new RequestVote(10000, "member2", 50, 50), testKit.getRef());
 
         leaderStateChanged = MessageCollectorActor.expectFirstMatching(listener, ShardLeaderStateChanged.class);
-        assertFalse("getLocalShardDataTree present", leaderStateChanged.getLocalShardDataTree().isPresent());
+        assertNull("getLocalShardDataTree present", leaderStateChanged.localShardDataTree());
     }
 
     @Test
index b22ca277e507479ac86846c912f18e9f5d0615e9..515c0b5b92bd47af4df09dfcd6e60cb2e6093f38 100644 (file)
@@ -17,7 +17,6 @@ import akka.testkit.javadsl.EventFilter;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
@@ -48,10 +47,9 @@ public class ShardTestKit extends TestKit {
         for (int i = 0; i < 20 * 5; i++) {
             Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
             try {
-                final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
-                        .getLeaderActor();
+                final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
                 if (maybeLeader.isPresent()) {
-                    return maybeLeader.get();
+                    return maybeLeader.orElseThrow();
                 }
             } catch (TimeoutException e) {
                 LOG.trace("FindLeader timed out", e);
@@ -73,13 +71,12 @@ public class ShardTestKit extends TestKit {
         for (int i = 0; i < 20 * 5; i++) {
             Future<Object> future = Patterns.ask(shard, FindLeader.INSTANCE, new Timeout(duration));
             try {
-                final Optional<String> maybeLeader = ((FindLeaderReply) Await.result(future, duration))
-                        .getLeaderActor();
+                final var maybeLeader = ((FindLeaderReply) Await.result(future, duration)).getLeaderActor();
                 if (!maybeLeader.isPresent()) {
                     return;
                 }
 
-                lastResponse = maybeLeader.get();
+                lastResponse = maybeLeader.orElseThrow();
             } catch (TimeoutException e) {
                 lastResponse = e;
             } catch (Exception e) {
index 65ca16a0f536d1b43eb8da1c9a15db2b26a741ab..7a96f263c66a12de3c45179657667834bdb8d69c 100644 (file)
@@ -7,22 +7,24 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.testkit.TestActorRef;
 import java.util.concurrent.TimeUnit;
+import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.common.api.ReadFailedException;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -32,12 +34,13 @@ import scala.concurrent.duration.FiniteDuration;
  *
  * @author Basheeruddin Ahmed
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardTransactionFailureTest extends AbstractActorTest {
-    private static final SchemaContext TEST_SCHEMA_CONTEXT = TestModel.createTestContext();
+    private static final EffectiveModelContext TEST_SCHEMA_CONTEXT = TestModel.createTestContext();
     private static final TransactionType RO = TransactionType.READ_ONLY;
     private static final TransactionType RW = TransactionType.READ_WRITE;
 
-    private static final Shard MOCK_SHARD = Mockito.mock(Shard.class);
+    private static final Shard MOCK_SHARD = mock(Shard.class);
 
     private static final ShardDataTree STORE = new ShardDataTree(MOCK_SHARD, TEST_SCHEMA_CONTEXT, TreeType.OPERATIONAL);
 
@@ -55,6 +58,11 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
         return shard;
     }
 
+    @Before
+    public void setup() {
+        doReturn(new ShardStats("inventory", "mxBeanType", MOCK_SHARD)).when(MOCK_SHARD).getShardMBean();
+    }
+
     @Test(expected = ReadFailedException.class)
     public void testNegativeReadWithReadOnlyTransactionClosed() throws Exception {
 
@@ -66,12 +74,12 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeReadWithReadOnlyTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new ReadData(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION), 3000);
+                new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
-        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.EMPTY,
+        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
                 DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
@@ -88,12 +96,12 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeReadWithReadWriteTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new ReadData(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION), 3000);
+                new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
-        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.EMPTY,
+        future = akka.pattern.Patterns.ask(subject, new ReadData(YangInstanceIdentifier.of(),
                 DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
@@ -109,13 +117,13 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
                 "testNegativeExistsWithReadWriteTransactionClosed");
 
         Future<Object> future = akka.pattern.Patterns.ask(subject,
-                new DataExists(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION), 3000);
+                new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
 
         subject.underlyingActor().getDOMStoreTransaction().abortFromTransactionActor();
 
         future = akka.pattern.Patterns.ask(subject,
-                new DataExists(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION), 3000);
+                new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION), 3000);
         Await.result(future, FiniteDuration.create(3, TimeUnit.SECONDS));
     }
 }
index d67d5b4763eb281d5e3f266ace0f610f8fba8c59..d80a5d1fc04b260f6907467ed365d4bb13b94e22 100644 (file)
@@ -47,13 +47,15 @@ import org.opendaylight.controller.cluster.datastore.modification.WriteModificat
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ShardTransactionTest extends AbstractActorTest {
 
     private static final TransactionType RO = TransactionType.READ_ONLY;
@@ -62,7 +64,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
     private static final ShardIdentifier SHARD_IDENTIFIER =
         ShardIdentifier.create("inventory", MEMBER_NAME, "config");
-    private static final SchemaContext TEST_MODEL = TestModel.createTestContext();
+    private static final EffectiveModelContext TEST_MODEL = TestModel.createTestContext();
 
     private DatastoreContext datastoreContext = DatastoreContext.newBuilder().persistent(false).build();
 
@@ -104,7 +106,7 @@ public class ShardTransactionTest extends AbstractActorTest {
     }
 
     private void testOnReceiveReadData(final ActorRef transaction) {
-        transaction.tell(new ReadData(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION),
+        transaction.tell(new ReadData(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
             testKit.getRef());
 
         ReadDataReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), ReadDataReply.class);
@@ -135,7 +137,7 @@ public class ShardTransactionTest extends AbstractActorTest {
     }
 
     private void testOnReceiveDataExistsPositive(final ActorRef transaction) {
-        transaction.tell(new DataExists(YangInstanceIdentifier.EMPTY, DataStoreVersions.CURRENT_VERSION),
+        transaction.tell(new DataExists(YangInstanceIdentifier.of(), DataStoreVersions.CURRENT_VERSION),
             testKit.getRef());
 
         DataExistsReply reply = testKit.expectMsgClass(Duration.ofSeconds(5), DataExistsReply.class);
@@ -166,14 +168,15 @@ public class ShardTransactionTest extends AbstractActorTest {
         final ActorRef transaction = newTransactionActor(RW, mockWriteTx, "testOnReceiveBatchedModifications");
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME))
-                .build();
+        NormalizedNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
@@ -204,9 +207,10 @@ public class ShardTransactionTest extends AbstractActorTest {
         watcher.watch(transaction);
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         final TransactionIdentifier tx1 = nextTransactionId();
         BatchedModifications batched = new BatchedModifications(tx1, DataStoreVersions.CURRENT_VERSION);
@@ -235,9 +239,10 @@ public class ShardTransactionTest extends AbstractActorTest {
         watcher.watch(transaction);
 
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        NormalizedNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         BatchedModifications batched = new BatchedModifications(nextTransactionId(),
             DataStoreVersions.CURRENT_VERSION);
index 7a4cb9b5efecc3233826c92f6ac7331bce478e4a..b8390f9fcdbb9f19f2c9ddf91d780ca7db65bf6d 100644 (file)
@@ -23,20 +23,23 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.Future;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ConflictingModificationAppliedException;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateTip;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
 
 /**
  * Unit tests for SimpleShardDataTreeCohort.
  *
  * @author Thomas Pantelis
  */
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class SimpleShardDataTreeCohortTest extends AbstractTest {
     @Mock
     private ShardDataTree mockShardDataTree;
@@ -54,8 +57,6 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
-
         doReturn(Optional.empty()).when(mockUserCohorts).commit();
         doReturn(Optional.empty()).when(mockUserCohorts).abort();
 
@@ -75,10 +76,10 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
         }).when(mockShardDataTree).startCanCommit(cohort);
 
         @SuppressWarnings("unchecked")
-        final FutureCallback<Void> callback = mock(FutureCallback.class);
+        final FutureCallback<Empty> callback = mock(FutureCallback.class);
         cohort.canCommit(callback);
 
-        verify(callback).onSuccess(null);
+        verify(callback).onSuccess(Empty.value());
         verifyNoMoreInteractions(callback);
     }
 
@@ -89,7 +90,7 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
         }).when(mockShardDataTree).startCanCommit(cohort);
 
         @SuppressWarnings("unchecked")
-        final FutureCallback<Void> callback = mock(FutureCallback.class);
+        final FutureCallback<Empty> callback = mock(FutureCallback.class);
         cohort.canCommit(callback);
 
         verify(callback).onFailure(cause);
@@ -98,12 +99,12 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
 
     @Test
     public void testCanCommitWithConflictingModEx() {
-        testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.EMPTY, "mock"));
+        testValidatationPropagates(new ConflictingModificationAppliedException(YangInstanceIdentifier.of(), "mock"));
     }
 
     @Test
     public void testCanCommitWithDataValidationEx() {
-        testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.EMPTY, "mock"));
+        testValidatationPropagates(new DataValidationFailedException(YangInstanceIdentifier.of(), "mock"));
     }
 
     @Test
@@ -209,11 +210,11 @@ public class SimpleShardDataTreeCohortTest extends AbstractTest {
     }
 
     private static Future<?> abort(final ShardDataTreeCohort cohort) {
-        final CompletableFuture<Void> f = new CompletableFuture<>();
-        cohort.abort(new FutureCallback<Void>() {
+        final CompletableFuture<Empty> f = new CompletableFuture<>();
+        cohort.abort(new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void result) {
-                f.complete(null);
+            public void onSuccess(final Empty result) {
+                f.complete(result);
             }
 
             @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestShard.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TestShard.java
new file mode 100644 (file)
index 0000000..9eb20c0
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Predicate;
+import org.opendaylight.controller.cluster.datastore.persisted.FrontendShardDataTreeSnapshotMetadata;
+
+public class TestShard extends Shard {
+    public static class Builder extends Shard.Builder {
+        Builder() {
+            super(TestShard.class);
+        }
+    }
+
+    // Message to request FrontendMetadata
+    public static final class RequestFrontendMetadata {
+
+    }
+
+    private abstract static class DropMessages<T> {
+        private final Class<T> msgClass;
+
+        DropMessages(final Class<T> msgClass) {
+            this.msgClass = requireNonNull(msgClass);
+        }
+
+        final Class<T> getMsgClass() {
+            return msgClass;
+        }
+    }
+
+    public static class StartDropMessages<T> extends DropMessages<T> {
+        public StartDropMessages(final Class<T> msgClass) {
+            super(msgClass);
+        }
+    }
+
+    public static class StopDropMessages<T> extends DropMessages<T> {
+        public StopDropMessages(final Class<T> msgClass) {
+            super(msgClass);
+        }
+    }
+
+    private final Map<Class<?>, Predicate<?>> dropMessages = new ConcurrentHashMap<>();
+
+    protected TestShard(AbstractBuilder<?, ?> builder) {
+        super(builder);
+    }
+
+    @Override
+    protected void handleNonRaftCommand(Object message) {
+        if (message instanceof  RequestFrontendMetadata) {
+            FrontendShardDataTreeSnapshotMetadata metadataSnapshot = frontendMetadata.toSnapshot();
+            sender().tell(metadataSnapshot, self());
+        } else {
+            super.handleNonRaftCommand(message);
+        }
+    }
+
+    @Override
+    protected void handleCommand(Object message) {
+        if (message instanceof StartDropMessages) {
+            startDropMessages(((StartDropMessages<?>) message).getMsgClass());
+        } else if (message instanceof StopDropMessages) {
+            stopDropMessages(((StopDropMessages<?>) message).getMsgClass());
+        } else {
+            dropOrHandle(message);
+        }
+    }
+
+    private <T> void dropOrHandle(T message) {
+        Predicate<T> drop = (Predicate<T>) dropMessages.get(message.getClass());
+        if (drop == null || !drop.test(message)) {
+            super.handleCommand(message);
+        }
+    }
+
+    private void startDropMessages(final Class<?> msgClass) {
+        dropMessages.put(msgClass, msg -> true);
+    }
+
+    <T> void startDropMessages(final Class<T> msgClass, final Predicate<T> filter) {
+        dropMessages.put(msgClass, filter);
+    }
+
+    public void stopDropMessages(final Class<?> msgClass) {
+        dropMessages.remove(msgClass);
+    }
+
+    public static TestShard.Builder builder() {
+        return new TestShard.Builder();
+    }
+}
@@ -12,8 +12,6 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorSelection;
 import akka.dispatch.OnComplete;
-import com.google.common.base.Supplier;
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -23,6 +21,8 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Supplier;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
@@ -31,6 +31,9 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.common.Empty;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -38,9 +41,11 @@ import scala.concurrent.Future;
 /**
  * ThreePhaseCommitCohortProxy represents a set of remote cohort proxies.
  */
-public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<ActorSelection> {
-
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort {
     private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
+    private static final @NonNull ListenableFuture<Empty> IMMEDIATE_EMPTY_SUCCESS =
+        Futures.immediateFuture(Empty.value());
 
     private static final MessageSupplier COMMIT_MESSAGE_SUPPLIER = new MessageSupplier() {
         @Override
@@ -66,24 +71,47 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
     };
 
+    private static final OperationCallback NO_OP_CALLBACK = new OperationCallback() {
+        @Override
+        public void run() {
+        }
+
+        @Override
+        public void success() {
+        }
+
+        @Override
+        public void failure() {
+        }
+
+        @Override
+        public void pause() {
+        }
+
+        @Override
+        public void resume() {
+        }
+    };
+
+
     private final ActorUtils actorUtils;
     private final List<CohortInfo> cohorts;
-    private final SettableFuture<Void> cohortsResolvedFuture = SettableFuture.create();
+    private final SettableFuture<Empty> cohortsResolvedFuture = SettableFuture.create();
     private final TransactionIdentifier transactionId;
     private volatile OperationCallback commitOperationCallback;
 
-    public ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
+    ThreePhaseCommitCohortProxy(final ActorUtils actorUtils, final List<CohortInfo> cohorts,
             final TransactionIdentifier transactionId) {
         this.actorUtils = actorUtils;
         this.cohorts = cohorts;
         this.transactionId = requireNonNull(transactionId);
 
         if (cohorts.isEmpty()) {
-            cohortsResolvedFuture.set(null);
+            cohortsResolvedFuture.set(Empty.value());
         }
     }
 
-    private ListenableFuture<Void> resolveCohorts() {
+    private ListenableFuture<Empty> resolveCohorts() {
         if (cohortsResolvedFuture.isDone()) {
             return cohortsResolvedFuture;
         }
@@ -105,7 +133,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                             info.setResolvedActor(actor);
                             if (done) {
                                 LOG.debug("Tx {}: successfully resolved all cohort actors", transactionId);
-                                cohortsResolvedFuture.set(null);
+                                cohortsResolvedFuture.set(Empty.value());
                             }
                         }
                     }
@@ -128,9 +156,9 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         // extracted from ReadyTransactionReply messages by the Futures that were obtained earlier
         // and passed to us from upstream processing. If any one fails then  we'll fail canCommit.
 
-        Futures.addCallback(resolveCohorts(), new FutureCallback<Void>() {
+        Futures.addCallback(resolveCohorts(), new FutureCallback<>() {
             @Override
-            public void onSuccess(final Void notUsed) {
+            public void onSuccess(final Empty result) {
                 finishCanCommit(returnFuture);
             }
 
@@ -158,7 +186,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
 
         final Iterator<CohortInfo> iterator = cohorts.iterator();
 
-        final OnComplete<Object> onComplete = new OnComplete<Object>() {
+        final OnComplete<Object> onComplete = new OnComplete<>() {
             @Override
             public void onComplete(final Throwable failure, final Object response) {
                 if (failure != null) {
@@ -193,7 +221,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                     sendCanCommitTransaction(iterator.next(), this);
                 } else {
                     LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-                    returnFuture.set(Boolean.valueOf(result));
+                    returnFuture.set(result);
                 }
 
             }
@@ -213,7 +241,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
     }
 
     private Future<Iterable<Object>> invokeCohorts(final MessageSupplier messageSupplier) {
-        List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohorts.size());
+        List<Future<Object>> futureList = new ArrayList<>(cohorts.size());
         for (CohortInfo cohort : cohorts) {
             Object message = messageSupplier.newMessage(transactionId, cohort.getActorVersion());
 
@@ -227,35 +255,34 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
     }
 
     @Override
-    public ListenableFuture<Void> preCommit() {
-        // We don't need to do anything here - preCommit is done atomically with the commit phase
-        // by the shard.
-        return IMMEDIATE_VOID_SUCCESS;
+    public ListenableFuture<Empty> preCommit() {
+        // We don't need to do anything here - preCommit is done atomically with the commit phase by the shard.
+        return IMMEDIATE_EMPTY_SUCCESS;
     }
 
     @Override
-    public ListenableFuture<Void> abort() {
+    public ListenableFuture<Empty> abort() {
         // Note - we pass false for propagateException. In the front-end data broker, this method
         // is called when one of the 3 phases fails with an exception. We'd rather have that
         // original exception propagated to the client. If our abort fails and we propagate the
         // exception then that exception will supersede and suppress the original exception. But
         // it's the original exception that is the root cause and of more interest to the client.
 
-        return voidOperation("abort", ABORT_MESSAGE_SUPPLIER,
-                AbortTransactionReply.class, false, OperationCallback.NO_OP_CALLBACK);
+        return operation("abort", Empty.value(), ABORT_MESSAGE_SUPPLIER, AbortTransactionReply.class, false,
+            NO_OP_CALLBACK);
     }
 
     @Override
-    public ListenableFuture<Void> commit() {
+    public ListenableFuture<? extends CommitInfo> commit() {
         OperationCallback operationCallback = commitOperationCallback != null ? commitOperationCallback :
-            OperationCallback.NO_OP_CALLBACK;
+            NO_OP_CALLBACK;
 
-        return voidOperation("commit", COMMIT_MESSAGE_SUPPLIER,
-                CommitTransactionReply.class, true, operationCallback);
+        return operation("commit", CommitInfo.empty(), COMMIT_MESSAGE_SUPPLIER, CommitTransactionReply.class, true,
+            operationCallback);
     }
 
     @SuppressWarnings("checkstyle:IllegalCatch")
-    private static boolean successfulFuture(final ListenableFuture<Void> future) {
+    private static boolean successfulFuture(final ListenableFuture<?> future) {
         if (!future.isDone()) {
             return false;
         }
@@ -268,26 +295,26 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
     }
 
-    private ListenableFuture<Void> voidOperation(final String operationName,
+    private <T> ListenableFuture<T> operation(final String operationName, final T futureValue,
             final MessageSupplier messageSupplier, final Class<?> expectedResponseClass,
             final boolean propagateException, final OperationCallback callback) {
         LOG.debug("Tx {} {}", transactionId, operationName);
 
-        final SettableFuture<Void> returnFuture = SettableFuture.create();
+        final SettableFuture<T> returnFuture = SettableFuture.create();
 
         // The cohort actor list should already be built at this point by the canCommit phase but,
         // if not for some reason, we'll try to build it here.
 
-        ListenableFuture<Void> future = resolveCohorts();
+        ListenableFuture<Empty> future = resolveCohorts();
         if (successfulFuture(future)) {
-            finishVoidOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
-                    returnFuture, callback);
+            finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException, returnFuture,
+                futureValue, callback);
         } else {
-            Futures.addCallback(future, new FutureCallback<Void>() {
+            Futures.addCallback(future, new FutureCallback<>() {
                 @Override
-                public void onSuccess(final Void notUsed) {
-                    finishVoidOperation(operationName, messageSupplier, expectedResponseClass,
-                            propagateException, returnFuture, callback);
+                public void onSuccess(final Empty result) {
+                    finishOperation(operationName, messageSupplier, expectedResponseClass, propagateException,
+                        returnFuture, futureValue, callback);
                 }
 
                 @Override
@@ -297,7 +324,7 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                     if (propagateException) {
                         returnFuture.setException(failure);
                     } else {
-                        returnFuture.set(null);
+                        returnFuture.set(futureValue);
                     }
                 }
             }, MoreExecutors.directExecutor());
@@ -306,9 +333,10 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         return returnFuture;
     }
 
-    private void finishVoidOperation(final String operationName, final MessageSupplier messageSupplier,
+    private <T> void finishOperation(final String operationName, final MessageSupplier messageSupplier,
                                      final Class<?> expectedResponseClass, final boolean propagateException,
-                                     final SettableFuture<Void> returnFuture, final OperationCallback callback) {
+                                     final SettableFuture<T> returnFuture, final T futureValue,
+                                     final OperationCallback callback) {
         LOG.debug("Tx {} finish {}", transactionId, operationName);
 
         callback.resume();
@@ -339,14 +367,14 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
                         // Since the caller doesn't want us to propagate the exception we'll also
                         // not log it normally. But it's usually not good to totally silence
                         // exceptions so we'll log it to debug level.
-                        returnFuture.set(null);
+                        returnFuture.set(futureValue);
                     }
 
                     callback.failure();
                 } else {
                     LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
 
-                    returnFuture.set(null);
+                    returnFuture.set(futureValue);
 
                     callback.success();
                 }
@@ -354,21 +382,12 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }, actorUtils.getClientDispatcher());
     }
 
-    @Override
-    List<Future<ActorSelection>> getCohortFutures() {
-        List<Future<ActorSelection>> cohortFutures = new ArrayList<>(cohorts.size());
-        for (CohortInfo info: cohorts) {
-            cohortFutures.add(info.getActorFuture());
-        }
-
-        return cohortFutures;
-    }
-
     static class CohortInfo {
         private final Future<ActorSelection> actorFuture;
-        private volatile ActorSelection resolvedActor;
         private final Supplier<Short> actorVersionSupplier;
 
+        private volatile ActorSelection resolvedActor;
+
         CohortInfo(final Future<ActorSelection> actorFuture, final Supplier<Short> actorVersionSupplier) {
             this.actorFuture = actorFuture;
             this.actorVersionSupplier = actorVersionSupplier;
index b7a303e8d2eec6729e8952065566923f1fc152bd..e2b3872d864bc9b1f9b542efe9a5596dfbb8e4dd 100644 (file)
@@ -5,13 +5,15 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
+import static java.util.Objects.requireNonNull;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.Mockito.lenient;
 import static org.opendaylight.controller.cluster.datastore.DataStoreVersions.CURRENT_VERSION;
 
 import akka.actor.ActorSelection;
@@ -22,20 +24,17 @@ import akka.dispatch.Futures;
 import akka.testkit.TestActorRef;
 import com.codahale.metrics.Snapshot;
 import com.codahale.metrics.Timer;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.ThreePhaseCommitCohortProxy.CohortInfo;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
@@ -52,20 +51,20 @@ import org.opendaylight.controller.cluster.datastore.utils.PrimaryShardInfoFutur
 import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
-
-    @SuppressWarnings("serial")
     static class TestException extends RuntimeException {
+        private static final long serialVersionUID = 1L;
+
     }
 
     private ActorUtils actorUtils;
 
     @Mock
     private Timer commitTimer;
-
     @Mock
     private Timer.Context commitTimerContext;
-
     @Mock
     private Snapshot commitSnapshot;
 
@@ -73,11 +72,8 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
     private final List<TestActorRef<CohortActor>> cohortActors = new ArrayList<>();
     private final TransactionIdentifier tx = nextTransactionId();
 
-
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         actorUtils = new ActorUtils(getSystem(), actorFactory.createActor(Props.create(DoNothingActor.class)),
                 new MockClusterWrapper(), new MockConfiguration(), DatastoreContext.newBuilder().build(),
                 new PrimaryShardInfoFutureCache()) {
@@ -92,20 +88,20 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
             }
         };
 
-        doReturn(commitTimerContext).when(commitTimer).time();
-        doReturn(commitSnapshot).when(commitTimer).getSnapshot();
+        lenient().doReturn(commitTimerContext).when(commitTimer).time();
+        lenient().doReturn(commitSnapshot).when(commitTimer).getSnapshot();
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
-            doReturn(TimeUnit.MILLISECONDS.toNanos(i) * 1D).when(commitSnapshot).getValue(i * 0.1);
+            lenient().doReturn(TimeUnit.MILLISECONDS.toNanos(i) * 1D).when(commitSnapshot).getValue(i * 0.1);
         }
     }
 
     @Test
     public void testCanCommitYesWithOneCohort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.yes(CURRENT_VERSION)))), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)))),
+            tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifyCohortActors();
@@ -113,9 +109,9 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testCanCommitNoWithOneCohort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.no(CURRENT_VERSION)))), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.no(CURRENT_VERSION)))),
+            tx);
 
         verifyCanCommit(proxy.canCommit(), false);
         verifyCohortActors();
@@ -123,12 +119,10 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testCanCommitYesWithTwoCohorts() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.yes(CURRENT_VERSION))),
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.yes(CURRENT_VERSION))));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))),
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)))),
+            tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifyCohortActors();
@@ -136,55 +130,49 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testCanCommitNoWithThreeCohorts() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.yes(CURRENT_VERSION))),
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(
-                        CanCommitTransactionReply.no(CURRENT_VERSION))),
-                newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))),
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.no(CURRENT_VERSION))),
+            newCohortInfo(new CohortActor.Builder(tx))), tx);
 
         verifyCanCommit(proxy.canCommit(), false);
         verifyCohortActors();
     }
 
-    @Test(expected = TestException.class)
-    public void testCanCommitWithExceptionFailure() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(new TestException()))), tx);
+    @Test
+    public void testCanCommitWithExceptionFailure() {
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+            List.of(newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(new TestException()))), tx);
 
-        propagateExecutionExceptionCause(proxy.canCommit());
+        propagateExecutionExceptionCause(proxy.canCommit(), TestException.class);
     }
 
-    @Test(expected = IllegalArgumentException.class)
-    public void testCanCommitWithInvalidResponseType() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectCanCommit("invalid"))), tx);
+    @Test
+    public void testCanCommitWithInvalidResponseType() {
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+            List.of(newCohortInfo(new CohortActor.Builder(tx).expectCanCommit("invalid"))), tx);
 
-        propagateExecutionExceptionCause(proxy.canCommit());
+        assertEquals("Unexpected response type class java.lang.String",
+            propagateExecutionExceptionCause(proxy.canCommit(), IllegalArgumentException.class));
     }
 
-    @Test(expected = TestException.class)
+    @Test
     public void testCanCommitWithFailedCohortFuture() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx)),
-                newCohortInfoWithFailedFuture(new TestException()),
-                newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx)),
+            newCohortInfoWithFailedFuture(new TestException()),
+            newCohortInfo(new CohortActor.Builder(tx))), tx);
 
-        propagateExecutionExceptionCause(proxy.canCommit());
+        propagateExecutionExceptionCause(proxy.canCommit(), TestException.class);
     }
 
     @Test
     public void testAllThreePhasesSuccessful() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfo(
-                        new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
-                                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
-                newCohortInfo(
-                        new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
-                                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION)))), tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
@@ -192,38 +180,38 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
         verifyCohortActors();
     }
 
-    @Test(expected = TestException.class)
+    @Test
     public void testCommitWithExceptionFailure() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfo(
-                        new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
-                                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
-                newCohortInfo(
-                        new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
-                                .expectCommit(new TestException())));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+                .expectCommit(CommitTransactionReply.instance(CURRENT_VERSION))),
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+            .expectCommit(new TestException()))), tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
-        propagateExecutionExceptionCause(proxy.commit());
+        propagateExecutionExceptionCause(proxy.commit(), TestException.class);
     }
 
-    @Test(expected = IllegalArgumentException.class)
+    @Test
     public void testCommitWithInvalidResponseType() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
-                Arrays.asList(newCohortInfo(new CohortActor.Builder(tx)
-                        .expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION)).expectCommit("invalid"))), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,List.of(
+            newCohortInfo(new CohortActor.Builder(tx).expectCanCommit(CanCommitTransactionReply.yes(CURRENT_VERSION))
+                .expectCommit("invalid"))),
+            tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
-        propagateExecutionExceptionCause(proxy.commit());
+        assertEquals("Unexpected response type class java.lang.String",
+            propagateExecutionExceptionCause(proxy.commit(), IllegalArgumentException.class));
     }
 
     @Test
     public void testAbort() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectAbort(
-                        AbortTransactionReply.instance(CURRENT_VERSION)))), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+            List.of(newCohortInfo(new CohortActor.Builder(tx).expectAbort(
+                AbortTransactionReply.instance(CURRENT_VERSION)))),
+            tx);
 
         verifySuccessfulFuture(proxy.abort());
         verifyCohortActors();
@@ -231,8 +219,8 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testAbortWithFailure() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, Arrays.asList(
-                newCohortInfo(new CohortActor.Builder(tx).expectAbort(new RuntimeException("mock")))), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
+            List.of(newCohortInfo(new CohortActor.Builder(tx).expectAbort(new RuntimeException("mock")))), tx);
 
         // The exception should not get propagated.
         verifySuccessfulFuture(proxy.abort());
@@ -241,9 +229,8 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testAbortWithFailedCohortFuture() throws Exception {
-        List<CohortInfo> cohorts = Arrays.asList(
-                newCohortInfoWithFailedFuture(new TestException()), newCohortInfo(new CohortActor.Builder(tx)));
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, cohorts, tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(
+            newCohortInfoWithFailedFuture(new TestException()), newCohortInfo(new CohortActor.Builder(tx))), tx);
 
         verifySuccessfulFuture(proxy.abort());
         verifyCohortActors();
@@ -251,8 +238,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
 
     @Test
     public void testWithNoCohorts() throws Exception {
-        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils,
-                Collections.<CohortInfo>emptyList(), tx);
+        ThreePhaseCommitCohortProxy proxy = new ThreePhaseCommitCohortProxy(actorUtils, List.of(), tx);
 
         verifyCanCommit(proxy.canCommit(), true);
         verifySuccessfulFuture(proxy.preCommit());
@@ -260,16 +246,12 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
         verifyCohortActors();
     }
 
-    @SuppressWarnings("checkstyle:avoidHidingCauseException")
-    private void propagateExecutionExceptionCause(final ListenableFuture<?> future) throws Exception {
-        try {
-            future.get(5, TimeUnit.SECONDS);
-            fail("Expected ExecutionException");
-        } catch (ExecutionException e) {
-            verifyCohortActors();
-            Throwables.propagateIfPossible(e.getCause(), Exception.class);
-            throw new RuntimeException(e.getCause());
-        }
+    private String propagateExecutionExceptionCause(final ListenableFuture<?> future,
+            final Class<? extends Exception> expected) {
+        final var ex = assertThrows(ExecutionException.class, () -> future.get(5, TimeUnit.SECONDS)).getCause();
+        verifyCohortActors();
+        assertThat(ex, instanceOf(expected));
+        return ex.getMessage();
     }
 
     private CohortInfo newCohortInfo(final CohortActor.Builder builder, final short version) {
@@ -383,12 +365,12 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
             private final TransactionIdentifier transactionId;
 
             Builder(final TransactionIdentifier transactionId) {
-                this.transactionId = Preconditions.checkNotNull(transactionId);
+                this.transactionId = requireNonNull(transactionId);
             }
 
             Builder expectCanCommit(final Class<?> newExpCanCommitType, final Object newCanCommitReply) {
-                this.expCanCommitType = newExpCanCommitType;
-                this.canCommitReply = newCanCommitReply;
+                expCanCommitType = newExpCanCommitType;
+                canCommitReply = newCanCommitReply;
                 return this;
             }
 
@@ -397,8 +379,8 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
             }
 
             Builder expectCommit(final Class<?> newExpCommitType, final Object newCommitReply) {
-                this.expCommitType = newExpCommitType;
-                this.commitReply = newCommitReply;
+                expCommitType = newExpCommitType;
+                commitReply = newCommitReply;
                 return this;
             }
 
@@ -407,8 +389,8 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
             }
 
             Builder expectAbort(final Class<?> newExpAbortType, final Object newAbortReply) {
-                this.expAbortType = newExpAbortType;
-                this.abortReply = newAbortReply;
+                expAbortType = newExpAbortType;
+                abortReply = newAbortReply;
                 return this;
             }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java
deleted file mode 100644 (file)
index e4ad8fd..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.util.Timeout;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Function;
-import org.junit.Assert;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Promise;
-
-public class TransactionChainProxyTest extends AbstractTransactionProxyTest {
-    private LocalHistoryIdentifier historyId;
-
-    @Override
-    public void setUp() {
-        super.setUp();
-        historyId = MockIdentifiers.historyIdentifier(TransactionChainProxyTest.class, memberName);
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewReadOnlyTransaction() {
-
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadOnlyTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreReadTransaction);
-
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewReadWriteTransaction() {
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newReadWriteTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreReadWriteTransaction);
-
-    }
-
-    @SuppressWarnings("resource")
-    @Test
-    public void testNewWriteOnlyTransaction() {
-        DOMStoreTransaction dst = new TransactionChainProxy(mockComponentFactory, historyId).newWriteOnlyTransaction();
-        Assert.assertTrue(dst instanceof DOMStoreWriteTransaction);
-
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testClose() {
-        new TransactionChainProxy(mockComponentFactory, historyId).close();
-
-        verify(mockActorContext, times(1)).broadcast(any(Function.class), any(Class.class));
-    }
-
-    @Test
-    public void testRateLimitingUsedInReadWriteTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newReadWriteTransaction();
-
-            verify(mockActorContext, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingUsedInWriteOnlyTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newWriteOnlyTransaction();
-
-            verify(mockActorContext, times(1)).acquireTxCreationPermit();
-        }
-    }
-
-    @Test
-    public void testRateLimitingNotUsedInReadOnlyTxCreation() {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            txChainProxy.newReadOnlyTransaction();
-
-            verify(mockActorContext, times(0)).acquireTxCreationPermit();
-        }
-    }
-
-    /**
-     * Tests 2 successive chained write-only transactions and verifies the second transaction isn't
-     * initiated until the first one completes its read future.
-     */
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testChainedWriteOnlyTransactions() throws Exception {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            ActorRef txActorRef1 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
-            Promise<Object> batchedReplyPromise1 = akka.dispatch.Futures.promise();
-            doReturn(batchedReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
-            NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            writeTx1.ready();
-
-            verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-            verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
-            ActorRef txActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-
-            expectBatchedModifications(txActorRef2, 1);
-
-            final NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-            final DOMStoreWriteTransaction writeTx2 = txChainProxy.newWriteOnlyTransaction();
-
-            final AtomicReference<Exception> caughtEx = new AtomicReference<>();
-            final CountDownLatch write2Complete = new CountDownLatch(1);
-            new Thread(() -> {
-                try {
-                    writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
-                } catch (Exception e) {
-                    caughtEx.set(e);
-                } finally {
-                    write2Complete.countDown();
-                }
-            }).start();
-
-            assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
-            if (caughtEx.get() != null) {
-                throw caughtEx.get();
-            }
-
-            try {
-                verify(mockActorContext, times(1)).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-            } catch (AssertionError e) {
-                fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
-            }
-
-            batchedReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
-            // Tx 2 should've proceeded to find the primary shard.
-            verify(mockActorContext, timeout(5000).times(2)).findPrimaryShardAsync(
-                    eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-    }
-
-    /**
-     * Tests 2 successive chained read-write transactions and verifies the second transaction isn't
-     * initiated until the first one completes its read future.
-     */
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testChainedReadWriteTransactions() throws Exception {
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            ActorRef txActorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-            expectBatchedModifications(txActorRef1, 1);
-
-            Promise<Object> readyReplyPromise1 = akka.dispatch.Futures.promise();
-            doReturn(readyReplyPromise1.future()).when(mockActorContext).executeOperationAsync(
-                    eq(actorSelection(txActorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newReadWriteTransaction();
-
-            NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            writeTx1.ready();
-
-            verifyOneBatchedModification(txActorRef1, new WriteModification(TestModel.TEST_PATH, writeNode1), true);
-
-            String tx2MemberName = "mock-member";
-            ActorRef shardActorRef2 = setupActorContextWithoutInitialCreateTransaction(getSystem());
-            ActorRef txActorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE,
-                    DataStoreVersions.CURRENT_VERSION, tx2MemberName, shardActorRef2);
-
-            expectBatchedModifications(txActorRef2, 1);
-
-            final NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-            final DOMStoreWriteTransaction writeTx2 = txChainProxy.newReadWriteTransaction();
-
-            final AtomicReference<Exception> caughtEx = new AtomicReference<>();
-            final CountDownLatch write2Complete = new CountDownLatch(1);
-            new Thread(() -> {
-                try {
-                    writeTx2.write(TestModel.OUTER_LIST_PATH, writeNode2);
-                } catch (Exception e) {
-                    caughtEx.set(e);
-                } finally {
-                    write2Complete.countDown();
-                }
-            }).start();
-
-            assertTrue("Tx 2 write should've completed", write2Complete.await(5, TimeUnit.SECONDS));
-
-            if (caughtEx.get() != null) {
-                throw caughtEx.get();
-            }
-
-            try {
-                verify(mockActorContext, never()).executeOperationAsync(
-                        eq(getSystem().actorSelection(shardActorRef2.path())),
-                        eqCreateTransaction(tx2MemberName, READ_WRITE));
-            } catch (AssertionError e) {
-                fail("Tx 2 should not have initiated until the Tx 1's ready future completed");
-            }
-
-            readyReplyPromise1.success(readyTxReply(txActorRef1.path().toString()).value().get().get());
-
-            verify(mockActorContext, timeout(5000)).executeOperationAsync(
-                    eq(getSystem().actorSelection(shardActorRef2.path())),
-                    eqCreateTransaction(tx2MemberName, READ_WRITE), any(Timeout.class));
-        }
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testChainedWriteTransactionsWithPreviousTxNotReady() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        expectBatchedModifications(actorRef, 1);
-
-        try (TransactionChainProxy txChainProxy = new TransactionChainProxy(mockComponentFactory, historyId)) {
-
-            DOMStoreWriteTransaction writeTx1 = txChainProxy.newWriteOnlyTransaction();
-
-            NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            writeTx1.write(TestModel.TEST_PATH, writeNode1);
-
-            txChainProxy.newWriteOnlyTransaction();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapperTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionContextWrapperTest.java
deleted file mode 100644 (file)
index e899ad0..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-
-public class TransactionContextWrapperTest {
-    @Mock
-    private ActorUtils actorUtils;
-
-    @Mock
-    private TransactionContext transactionContext;
-
-    private TransactionContextWrapper transactionContextWrapper;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        doReturn(DatastoreContext.newBuilder().build()).when(actorUtils).getDatastoreContext();
-        transactionContextWrapper = new TransactionContextWrapper(MockIdentifiers.transactionIdentifier(
-            TransactionContextWrapperTest.class, "mock"), actorUtils, "mock");
-    }
-
-    @Test
-    public void testExecutePriorTransactionOperations() {
-        for (int i = 0; i < 100; i++) {
-            transactionContextWrapper.maybeExecuteTransactionOperation(mock(TransactionOperation.class));
-        }
-        assertEquals(901, transactionContextWrapper.getLimiter().availablePermits());
-
-        transactionContextWrapper.executePriorTransactionOperations(transactionContext);
-
-        assertEquals(1001, transactionContextWrapper.getLimiter().availablePermits());
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java
deleted file mode 100644 (file)
index 5fa0dda..0000000
+++ /dev/null
@@ -1,1530 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyString;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.ArgumentMatchers.isA;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_ONLY;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.READ_WRITE;
-import static org.opendaylight.controller.cluster.datastore.TransactionType.WRITE_ONLY;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSelection;
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.dispatch.Futures;
-import akka.util.Timeout;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableSortedSet;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.SortedSet;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.InOrder;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
-import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
-import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
-import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
-import org.opendaylight.controller.cluster.datastore.messages.ReadyLocalTransaction;
-import org.opendaylight.controller.cluster.datastore.modification.DeleteModification;
-import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
-import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.DefaultShardStrategy;
-import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregatorTest;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.Promise;
-
-@SuppressWarnings({"resource", "checkstyle:IllegalThrows", "checkstyle:AvoidHidingCauseException"})
-public class TransactionProxyTest extends AbstractTransactionProxyTest {
-
-    @SuppressWarnings("serial")
-    static class TestException extends RuntimeException {
-    }
-
-    interface Invoker {
-        FluentFuture<?> invoke(TransactionProxy proxy);
-    }
-
-    @Test
-    public void testRead() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
-                TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertFalse("NormalizedNode isPresent", readOptional.isPresent());
-
-        NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        readOptional = transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
-        assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithInvalidReplyMessageType() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        try {
-            transactionProxy.read(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWithAsyncRemoteOperatonFailure() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.failed(new TestException())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
-    }
-
-    private void testExceptionOnInitialCreateTransaction(final Exception exToThrow, final Invoker invoker)
-            throws Throwable {
-        ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        if (exToThrow instanceof PrimaryNotFoundException) {
-            doReturn(Futures.failed(exToThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
-        } else {
-            doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
-                    .findPrimaryShardAsync(anyString());
-        }
-
-        doReturn(Futures.failed(exToThrow)).when(mockActorContext).executeOperationAsync(
-                any(ActorSelection.class), any(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(invoker.invoke(transactionProxy));
-    }
-
-    private void testReadWithExceptionOnInitialCreateTransaction(final Exception exToThrow) throws Throwable {
-        testExceptionOnInitialCreateTransaction(exToThrow, proxy -> proxy.read(TestModel.TEST_PATH));
-    }
-
-    @Test(expected = PrimaryNotFoundException.class)
-    public void testReadWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"));
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWhenATimeoutExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new TimeoutException("test",
-                new TestException()));
-    }
-
-    @Test(expected = TestException.class)
-    public void testReadWhenAnyOtherExceptionIsThrown() throws Throwable {
-        testReadWithExceptionOnInitialCreateTransaction(new TestException());
-    }
-
-    @Test
-    public void testReadWithPriorRecordingOperationSuccessful() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode<?, ?> expectedNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, expectedNode);
-
-        Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
-                TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", expectedNode, readOptional.get());
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testReadPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-        transactionProxy.read(TestModel.TEST_PATH);
-    }
-
-    @Test(expected = IllegalArgumentException.class)
-    public void testInvalidCreateTransactionReply() throws Throwable {
-        ActorRef actorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(actorRef.path())).when(mockActorContext)
-                .actorSelection(actorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(getSystem(), actorRef)).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
-            eq(getSystem().actorSelection(actorRef.path())), eqCreateTransaction(memberName, READ_ONLY),
-            any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
-    }
-
-    @Test
-    public void testExists() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        doReturn(dataExistsReply(false)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.FALSE, exists);
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.TRUE, exists);
-    }
-
-    @Test(expected = PrimaryNotFoundException.class)
-    public void testExistsWhenAPrimaryNotFoundExceptionIsThrown() throws Throwable {
-        testExceptionOnInitialCreateTransaction(new PrimaryNotFoundException("test"),
-            proxy -> proxy.exists(TestModel.TEST_PATH));
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testExistsWithInvalidReplyMessageType() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        try {
-            transactionProxy.exists(TestModel.TEST_PATH).get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    @Test(expected = TestException.class)
-    public void testExistsWithAsyncRemoteOperatonFailure() throws Throwable {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-
-        doReturn(Futures.failed(new TestException())).when(mockActorContext)
-                .executeOperationAsync(eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
-    }
-
-    @Test
-    public void testExistsWithPriorRecordingOperationSuccessful() throws Exception {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-
-        assertEquals("Exists response", Boolean.TRUE, exists);
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testExistsPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-        transactionProxy.exists(TestModel.TEST_PATH);
-    }
-
-    @Test
-    public void testWrite() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), false);
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testWriteAfterAsyncRead() throws Exception {
-        ActorRef actorRef = setupActorContextWithoutInitialCreateTransaction(getSystem(),
-                DefaultShardStrategy.DEFAULT_SHARD);
-
-        Promise<Object> createTxPromise = akka.dispatch.Futures.promise();
-        doReturn(createTxPromise).when(mockActorContext).executeOperationAsync(
-                eq(getSystem().actorSelection(actorRef.path())),
-                eqCreateTransaction(memberName, READ_WRITE), any(Timeout.class));
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef);
-
-        final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        final TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        final CountDownLatch readComplete = new CountDownLatch(1);
-        final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
-        com.google.common.util.concurrent.Futures.addCallback(transactionProxy.read(TestModel.TEST_PATH),
-                new  FutureCallback<Optional<NormalizedNode<?, ?>>>() {
-                    @Override
-                    public void onSuccess(final Optional<NormalizedNode<?, ?>> result) {
-                        try {
-                            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-                        } catch (Exception e) {
-                            caughtEx.set(e);
-                        } finally {
-                            readComplete.countDown();
-                        }
-                    }
-
-                    @Override
-                    public void onFailure(final Throwable failure) {
-                        caughtEx.set(failure);
-                        readComplete.countDown();
-                    }
-                }, MoreExecutors.directExecutor());
-
-        createTxPromise.success(createTransactionReply(actorRef, DataStoreVersions.CURRENT_VERSION));
-
-        Uninterruptibles.awaitUninterruptibly(readComplete, 5, TimeUnit.SECONDS);
-
-        final Throwable t = caughtEx.get();
-        if (t != null) {
-            Throwables.propagateIfPossible(t, Exception.class);
-            throw new RuntimeException(t);
-        }
-
-        // This sends the batched modification.
-        transactionProxy.ready();
-
-        verifyOneBatchedModification(actorRef, new WriteModification(TestModel.TEST_PATH, nodeToWrite), true);
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testWritePreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testWriteAfterReadyPreConditionCheck() {
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.ready();
-
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-    }
-
-    @Test
-    public void testMerge() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        verifyOneBatchedModification(actorRef, new MergeModification(TestModel.TEST_PATH, nodeToWrite), false);
-    }
-
-    @Test
-    public void testDelete() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        verifyOneBatchedModification(actorRef, new DeleteModification(TestModel.TEST_PATH), false);
-    }
-
-    @Test
-    public void testReadWrite() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModifications(actorRef, 1);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-    }
-
-    @Test
-    public void testReadyWithReadWrite() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
-        assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
-    }
-
-    @Test
-    public void testReadyWithNoModifications() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true);
-    }
-
-    @Test
-    public void testReadyWithMultipleShardWrites() {
-        ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
-                TestModel.JUNK_QNAME.getLocalName());
-
-        expectBatchedModificationsReady(actorRef1);
-        expectBatchedModificationsReady(actorRef2);
-
-        ActorRef actorRef3 = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(actorRef3.path())).when(mockActorContext)
-                .actorSelection(actorRef3.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(actorRef3, createDataTree()))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(CarsModel.BASE_QNAME.getLocalName()));
-
-        expectReadyLocalTransaction(actorRef3, false);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-        transactionProxy.write(CarsModel.BASE_PATH, ImmutableNodes.containerNode(CarsModel.BASE_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
-        verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef1),
-                actorSelection(actorRef2), actorSelection(actorRef3));
-
-        SortedSet<String> expShardNames =
-                ImmutableSortedSet.of(DefaultShardStrategy.DEFAULT_SHARD,
-                        TestModel.JUNK_QNAME.getLocalName(), CarsModel.BASE_QNAME.getLocalName());
-
-        ArgumentCaptor<BatchedModifications> batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef1)), batchedMods.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
-        batchedMods = ArgumentCaptor.forClass(BatchedModifications.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef2)), batchedMods.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", batchedMods.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, batchedMods.getValue().getParticipatingShardNames().get());
-
-        ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef3)), readyLocalTx.capture(), any(Timeout.class));
-        assertTrue("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
-        assertEquals("Participating shards", expShardNames, readyLocalTx.getValue().getParticipatingShardNames().get());
-    }
-
-    @Test
-    public void testReadyWithWriteOnlyAndLastBatchPending() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), true, true,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-    }
-
-    @Test
-    public void testReadyWithWriteOnlyAndLastBatchEmpty() {
-        dataStoreContextBuilder.shardBatchedModificationCount(1).writeOnlyTransactionOptimizationsEnabled(true);
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 2, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false,
-                new WriteModification(TestModel.TEST_PATH, nodeToWrite));
-
-        verifyBatchedModifications(batchedModifications.get(1), true, true);
-    }
-
-    @Test
-    public void testReadyWithReplyFailure() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectFailedBatchedModifications(actorRef);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, TestException.class);
-    }
-
-    @Test
-    public void testReadyWithDebugContextEnabled() {
-        dataStoreContextBuilder.transactionDebugContextEnabled(true);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        expectBatchedModificationsReady(actorRef, true);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.merge(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof DebugThreePhaseCommitCohort);
-
-        verifyCohortFutures((DebugThreePhaseCommitCohort)ready, new CommitTransactionReply().toSerializable());
-    }
-
-    @Test
-    public void testReadyWithLocalTransaction() {
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, createDataTree()))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        expectReadyLocalTransaction(shardActorRef, true);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-        verifyCohortFutures((SingleCommitCohortProxy)ready, new CommitTransactionReply().toSerializable());
-
-        ArgumentCaptor<ReadyLocalTransaction> readyLocalTx = ArgumentCaptor.forClass(ReadyLocalTransaction.class);
-        verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(shardActorRef)), readyLocalTx.capture(), any(Timeout.class));
-        assertFalse("Participating shards present", readyLocalTx.getValue().getParticipatingShardNames().isPresent());
-    }
-
-    @Test
-    public void testReadyWithLocalTransactionWithFailure() {
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        DataTree mockDataTree = createDataTree();
-        DataTreeModification mockModification = mockDataTree.takeSnapshot().newModification();
-        doThrow(new RuntimeException("mock")).when(mockModification).ready();
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, mockDataTree))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        expectReadyLocalTransaction(shardActorRef, true);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-        verifyCohortFutures((SingleCommitCohortProxy)ready, RuntimeException.class);
-    }
-
-    private void testWriteOnlyTxWithFindPrimaryShardFailure(final Exception toThrow) {
-        doReturn(Futures.failed(toThrow)).when(mockActorContext).findPrimaryShardAsync(anyString());
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof SingleCommitCohortProxy);
-
-        verifyCohortFutures((SingleCommitCohortProxy)ready, toThrow.getClass());
-    }
-
-    @Test
-    public void testWriteOnlyTxWithPrimaryNotFoundException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new PrimaryNotFoundException("mock"));
-    }
-
-    @Test
-    public void testWriteOnlyTxWithNotInitializedException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new NotInitializedException("mock"));
-    }
-
-    @Test
-    public void testWriteOnlyTxWithNoShardLeaderException() {
-        testWriteOnlyTxWithFindPrimaryShardFailure(new NoShardLeaderException("mock"));
-    }
-
-    @Test
-    public void testReadyWithInvalidReplyMessageType() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-        ActorRef actorRef1 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY);
-
-        ActorRef actorRef2 = setupActorContextWithInitialCreateTransaction(getSystem(), WRITE_ONLY,
-                TestModel.JUNK_QNAME.getLocalName());
-
-        doReturn(Futures.successful(new Object())).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef1)), isA(BatchedModifications.class), any(Timeout.class));
-
-        expectBatchedModificationsReady(actorRef2);
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, WRITE_ONLY);
-
-        transactionProxy.write(TestModel.JUNK_PATH, ImmutableNodes.containerNode(TestModel.JUNK_QNAME));
-        transactionProxy.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
-
-        assertTrue(ready instanceof ThreePhaseCommitCohortProxy);
-
-        verifyCohortFutures((ThreePhaseCommitCohortProxy)ready, actorSelection(actorRef2),
-                IllegalArgumentException.class);
-    }
-
-    @Test
-    public void testGetIdentifier() {
-        setupActorContextWithInitialCreateTransaction(getSystem(), READ_ONLY);
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        Object id = transactionProxy.getIdentifier();
-        assertNotNull("getIdentifier returned null", id);
-        assertTrue("Invalid identifier: " + id, id.toString().contains(memberName));
-    }
-
-    @Test
-    public void testClose() {
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.read(TestModel.TEST_PATH);
-
-        transactionProxy.close();
-
-        verify(mockActorContext).sendOperationAsync(
-                eq(actorSelection(actorRef)), isA(CloseTransaction.class));
-    }
-
-    private interface TransactionProxyOperation {
-        void run(TransactionProxy transactionProxy);
-    }
-
-    private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef) {
-        return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION);
-    }
-
-    private PrimaryShardInfo newPrimaryShardInfo(final ActorRef actorRef, final DataTree dataTree) {
-        return new PrimaryShardInfo(getSystem().actorSelection(actorRef.path()), DataStoreVersions.CURRENT_VERSION,
-                dataTree);
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation) {
-        throttleOperation(operation, 1, true);
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
-            final boolean shardFound) {
-        throttleOperation(operation, outstandingOpsLimit, shardFound, TimeUnit.MILLISECONDS.toNanos(
-                mockActorContext.getDatastoreContext().getOperationTimeoutInMillis()));
-    }
-
-    private void throttleOperation(final TransactionProxyOperation operation, final int outstandingOpsLimit,
-            final boolean shardFound, final long expectedCompletionTime) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        // Note that we setting batchedModificationCount to one less than what we need because in TransactionProxy
-        // we now allow one extra permit to be allowed for ready
-        doReturn(dataStoreContextBuilder.operationTimeoutInSeconds(2)
-                .shardBatchedModificationCount(outstandingOpsLimit - 1).build()).when(mockActorContext)
-                        .getDatastoreContext();
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        if (shardFound) {
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq("cars"));
-
-        } else {
-            doReturn(Futures.failed(new Exception("not found")))
-                    .when(mockActorContext).findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-
-        doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
-                any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
-                expectedCompletionTime, end - start),
-                end - start > expectedCompletionTime && end - start < expectedCompletionTime * 2);
-
-    }
-
-    private void completeOperation(final TransactionProxyOperation operation) {
-        completeOperation(operation, true);
-    }
-
-    private void completeOperation(final TransactionProxyOperation operation, final boolean shardFound) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        if (shardFound) {
-            doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        } else {
-            doReturn(Futures.failed(new PrimaryNotFoundException("test"))).when(mockActorContext)
-                    .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-        }
-
-        ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-        String actorPath = txActorRef.path().toString();
-        CreateTransactionReply createTransactionReply = new CreateTransactionReply(actorPath, nextTransactionId(),
-                DataStoreVersions.CURRENT_VERSION);
-
-        doReturn(actorSystem.actorSelection(actorPath)).when(mockActorContext).actorSelection(actorPath);
-
-        doReturn(Futures.successful(createTransactionReply)).when(mockActorContext).executeOperationAsync(
-                eq(actorSystem.actorSelection(shardActorRef.path())), eqCreateTransaction(memberName, READ_WRITE),
-                any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis());
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s",
-                expected, end - start), end - start <= expected);
-    }
-
-    private void completeOperationLocal(final TransactionProxyOperation operation, final DataTree dataTree) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(Futures.successful(newPrimaryShardInfo(shardActorRef, dataTree))).when(mockActorContext)
-                .findPrimaryShardAsync(eq(DefaultShardStrategy.DEFAULT_SHARD));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        long start = System.nanoTime();
-
-        operation.run(transactionProxy);
-
-        long end = System.nanoTime();
-
-        long expected = TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis());
-        Assert.assertTrue(String.format("Expected elapsed time: %s. Actual: %s", expected, end - start),
-                end - start <= expected);
-    }
-
-    private static DataTree createDataTree() {
-        DataTree dataTree = mock(DataTree.class);
-        DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
-        DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
-        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
-        doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
-
-        return dataTree;
-    }
-
-    private static DataTree createDataTree(final NormalizedNode<?, ?> readResponse) {
-        DataTree dataTree = mock(DataTree.class);
-        DataTreeSnapshot dataTreeSnapshot = mock(DataTreeSnapshot.class);
-        DataTreeModification dataTreeModification = mock(DataTreeModification.class);
-
-        doReturn(dataTreeSnapshot).when(dataTree).takeSnapshot();
-        doReturn(dataTreeModification).when(dataTreeSnapshot).newModification();
-        doReturn(Optional.of(readResponse)).when(dataTreeModification).readNode(any(YangInstanceIdentifier.class));
-
-        return dataTree;
-    }
-
-
-    @Test
-    public void testWriteCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        }, createDataTree());
-    }
-
-    @Test
-    public void testWriteThrottlingWhenShardFound() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        });
-    }
-
-    @Test
-    public void testWriteThrottlingWhenShardNotFound() {
-        // Confirm that there is no throttling when the Shard is not found
-        completeOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        }, false);
-
-    }
-
-
-    @Test
-    public void testWriteCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-        });
-    }
-
-    @Test
-    public void testMergeThrottlingWhenShardFound() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        });
-    }
-
-    @Test
-    public void testMergeThrottlingWhenShardNotFound() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        }, false);
-    }
-
-    @Test
-    public void testMergeCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToMerge = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToMerge);
-        });
-
-    }
-
-    @Test
-    public void testMergeCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.merge(TestModel.TEST_PATH, nodeToWrite);
-
-        }, createDataTree());
-    }
-
-
-    @Test
-    public void testDeleteThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            expectIncompleteBatchedModifications();
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        });
-    }
-
-
-    @Test
-    public void testDeleteThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            expectBatchedModifications(2);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        }, false);
-    }
-
-    @Test
-    public void testDeleteCompletionForLocalShard() {
-        completeOperationLocal(transactionProxy -> {
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testDeleteCompletion() {
-        completeOperation(transactionProxy -> {
-            expectBatchedModifications(2);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-
-            transactionProxy.delete(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testReadThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData());
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        });
-    }
-
-    @Test
-    public void testReadThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData());
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, false);
-    }
-
-
-    @Test
-    public void testReadCompletion() {
-        completeOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            doReturn(readDataReply(nodeToRead)).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqReadData(), any(Timeout.class));
-
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testReadCompletionForLocalShard() {
-        final NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, createDataTree(nodeToRead));
-
-    }
-
-    @Test
-    public void testReadCompletionForLocalShardWhenExceptionOccurs() {
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.read(TestModel.TEST_PATH);
-
-            transactionProxy.read(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testExistsThrottlingWhenShardFound() {
-
-        throttleOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists());
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        });
-    }
-
-    @Test
-    public void testExistsThrottlingWhenShardNotFound() {
-
-        completeOperation(transactionProxy -> {
-            doReturn(incompleteFuture()).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists());
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, false);
-    }
-
-
-    @Test
-    public void testExistsCompletion() {
-        completeOperation(transactionProxy -> {
-            doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                    any(ActorSelection.class), eqDataExists(), any(Timeout.class));
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        });
-
-    }
-
-    @Test
-    public void testExistsCompletionForLocalShard() {
-        final NormalizedNode<?, ?> nodeToRead = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, createDataTree(nodeToRead));
-
-    }
-
-    @Test
-    public void testExistsCompletionForLocalShardWhenExceptionOccurs() {
-        completeOperationLocal(transactionProxy -> {
-            transactionProxy.exists(TestModel.TEST_PATH);
-
-            transactionProxy.exists(TestModel.TEST_PATH);
-        }, createDataTree());
-
-    }
-
-    @Test
-    public void testReadyThrottling() {
-
-        throttleOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-            expectBatchedModifications(1);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            transactionProxy.ready();
-        });
-    }
-
-    @Test
-    public void testReadyThrottlingWithTwoTransactionContexts() {
-        throttleOperation(transactionProxy -> {
-            NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-            NormalizedNode<?, ?> carsNode = ImmutableNodes.containerNode(CarsModel.BASE_QNAME);
-
-            expectBatchedModifications(2);
-
-            transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-            // Trying to write to Cars will cause another transaction context to get created
-            transactionProxy.write(CarsModel.BASE_PATH, carsNode);
-
-            // Now ready should block for both transaction contexts
-            transactionProxy.ready();
-        }, 1, true, TimeUnit.MILLISECONDS.toNanos(mockActorContext.getDatastoreContext()
-                .getOperationTimeoutInMillis()) * 2);
-    }
-
-    private void testModificationOperationBatching(final TransactionType type) {
-        int shardBatchedModificationCount = 3;
-        dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), type);
-
-        expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
-        YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        YangInstanceIdentifier writePath3 = TestModel.INNER_LIST_PATH;
-        NormalizedNode<?, ?> writeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier mergePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        YangInstanceIdentifier mergePath3 = TestModel.INNER_LIST_PATH;
-        NormalizedNode<?, ?> mergeNode3 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        YangInstanceIdentifier deletePath1 = TestModel.TEST_PATH;
-        YangInstanceIdentifier deletePath2 = TestModel.OUTER_LIST_PATH;
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, type);
-
-        transactionProxy.write(writePath1, writeNode1);
-        transactionProxy.write(writePath2, writeNode2);
-        transactionProxy.delete(deletePath1);
-        transactionProxy.merge(mergePath1, mergeNode1);
-        transactionProxy.merge(mergePath2, mergeNode2);
-        transactionProxy.write(writePath3, writeNode3);
-        transactionProxy.merge(mergePath3, mergeNode3);
-        transactionProxy.delete(deletePath2);
-
-        // This sends the last batch.
-        transactionProxy.ready();
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
-                new WriteModification(writePath2, writeNode2), new DeleteModification(deletePath1));
-
-        verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
-                new MergeModification(mergePath2, mergeNode2), new WriteModification(writePath3, writeNode3));
-
-        verifyBatchedModifications(batchedModifications.get(2), true, true,
-                new MergeModification(mergePath3, mergeNode3), new DeleteModification(deletePath2));
-
-        assertEquals("getTotalMessageCount", 3, batchedModifications.get(2).getTotalMessagesSent());
-    }
-
-    @Test
-    public void testReadWriteModificationOperationBatching() {
-        testModificationOperationBatching(READ_WRITE);
-    }
-
-    @Test
-    public void testWriteOnlyModificationOperationBatching() {
-        testModificationOperationBatching(WRITE_ONLY);
-    }
-
-    @Test
-    public void testOptimizedWriteOnlyModificationOperationBatching() {
-        dataStoreContextBuilder.writeOnlyTransactionOptimizationsEnabled(true);
-        testModificationOperationBatching(WRITE_ONLY);
-    }
-
-    @Test
-    public void testModificationOperationBatchingWithInterleavedReads() throws Exception {
-
-        int shardBatchedModificationCount = 10;
-        dataStoreContextBuilder.shardBatchedModificationCount(shardBatchedModificationCount);
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        expectBatchedModifications(actorRef, shardBatchedModificationCount);
-
-        final YangInstanceIdentifier writePath1 = TestModel.TEST_PATH;
-        final NormalizedNode<?, ?> writeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier writePath2 = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> writeNode2 = ImmutableNodes.containerNode(TestModel.OUTER_LIST_QNAME);
-
-        final YangInstanceIdentifier mergePath1 = TestModel.TEST_PATH;
-        final NormalizedNode<?, ?> mergeNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        YangInstanceIdentifier mergePath2 = TestModel.INNER_LIST_PATH;
-        NormalizedNode<?, ?> mergeNode2 = ImmutableNodes.containerNode(TestModel.INNER_LIST_QNAME);
-
-        final YangInstanceIdentifier deletePath = TestModel.OUTER_LIST_PATH;
-
-        doReturn(readDataReply(writeNode2)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
-        doReturn(readDataReply(mergeNode2)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
-        doReturn(dataExistsReply(true)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
-
-        transactionProxy.write(writePath1, writeNode1);
-        transactionProxy.write(writePath2, writeNode2);
-
-        Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(writePath2).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", writeNode2, readOptional.get());
-
-        transactionProxy.merge(mergePath1, mergeNode1);
-        transactionProxy.merge(mergePath2, mergeNode2);
-
-        readOptional = transactionProxy.read(mergePath2).get(5, TimeUnit.SECONDS);
-
-        transactionProxy.delete(deletePath);
-
-        Boolean exists = transactionProxy.exists(TestModel.TEST_PATH).get();
-        assertEquals("Exists response", Boolean.TRUE, exists);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-        assertEquals("Response NormalizedNode", mergeNode2, readOptional.get());
-
-        List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
-        assertEquals("Captured BatchedModifications count", 3, batchedModifications.size());
-
-        verifyBatchedModifications(batchedModifications.get(0), false, new WriteModification(writePath1, writeNode1),
-                new WriteModification(writePath2, writeNode2));
-
-        verifyBatchedModifications(batchedModifications.get(1), false, new MergeModification(mergePath1, mergeNode1),
-                new MergeModification(mergePath2, mergeNode2));
-
-        verifyBatchedModifications(batchedModifications.get(2), false, new DeleteModification(deletePath));
-
-        InOrder inOrder = Mockito.inOrder(mockActorContext);
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(writePath2), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqReadData(mergePath2), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), isA(BatchedModifications.class), any(Timeout.class));
-
-        inOrder.verify(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqDataExists(), any(Timeout.class));
-    }
-
-    @Test
-    public void testReadRoot() throws InterruptedException, ExecutionException,
-            java.util.concurrent.TimeoutException {
-        SchemaContext schemaContext = SchemaContextHelper.full();
-        Configuration configuration = mock(Configuration.class);
-        doReturn(configuration).when(mockActorContext).getConfiguration();
-        doReturn(schemaContext).when(mockActorContext).getSchemaContext();
-        doReturn(Sets.newHashSet("test", "cars")).when(configuration).getAllShardNames();
-
-        NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
-        setUpReadData("test", NormalizedNodeAggregatorTest.getRootNode(expectedNode1, schemaContext));
-        setUpReadData("cars", NormalizedNodeAggregatorTest.getRootNode(expectedNode2, schemaContext));
-
-        doReturn(MemberName.forName(memberName)).when(mockActorContext).getCurrentMemberName();
-
-        doReturn(getSystem().dispatchers().defaultGlobalDispatcher()).when(mockActorContext).getClientDispatcher();
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_ONLY);
-
-        Optional<NormalizedNode<?, ?>> readOptional = transactionProxy.read(
-                YangInstanceIdentifier.EMPTY).get(5, TimeUnit.SECONDS);
-
-        assertTrue("NormalizedNode isPresent", readOptional.isPresent());
-
-        NormalizedNode<?, ?> normalizedNode = readOptional.get();
-
-        assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
-
-        @SuppressWarnings("unchecked")
-        Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
-
-        for (NormalizedNode<?,?> node : collection) {
-            assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
-        }
-
-        assertTrue("Child with QName = " + TestModel.TEST_QNAME + " not found",
-                NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME) != null);
-
-        assertEquals(expectedNode1, NormalizedNodeAggregatorTest.findChildWithQName(collection, TestModel.TEST_QNAME));
-
-        assertTrue("Child with QName = " + CarsModel.BASE_QNAME + " not found",
-                NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME) != null);
-
-        assertEquals(expectedNode2, NormalizedNodeAggregatorTest.findChildWithQName(collection, CarsModel.BASE_QNAME));
-    }
-
-
-    private void setUpReadData(final String shardName, final NormalizedNode<?, ?> expectedNode) {
-        ActorSystem actorSystem = getSystem();
-        ActorRef shardActorRef = getSystem().actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(getSystem().actorSelection(shardActorRef.path())).when(mockActorContext)
-                .actorSelection(shardActorRef.path().toString());
-
-        doReturn(primaryShardInfoReply(getSystem(), shardActorRef)).when(mockActorContext)
-                .findPrimaryShardAsync(eq(shardName));
-
-        ActorRef txActorRef = actorSystem.actorOf(Props.create(DoNothingActor.class));
-
-        doReturn(actorSystem.actorSelection(txActorRef.path())).when(mockActorContext)
-                .actorSelection(txActorRef.path().toString());
-
-        doReturn(Futures.successful(createTransactionReply(txActorRef, DataStoreVersions.CURRENT_VERSION)))
-                .when(mockActorContext).executeOperationAsync(eq(actorSystem.actorSelection(shardActorRef.path())),
-                        eqCreateTransaction(memberName, TransactionType.READ_ONLY), any(Timeout.class));
-
-        doReturn(readDataReply(expectedNode)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(txActorRef)), eqReadData(YangInstanceIdentifier.EMPTY), any(Timeout.class));
-    }
-}
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore;
 
 import com.codahale.metrics.Timer;
@@ -19,7 +18,8 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
  * TransactionRateLimitingCallback computes the new transaction rate limit on the successful completion of a
  * transaction.
  */
-public class TransactionRateLimitingCallback implements OperationCallback {
+@Deprecated(since = "9.0.0", forRemoval = true)
+final class TransactionRateLimitingCallback implements OperationCallback {
     private static Ticker TICKER = Ticker.systemTicker();
 
     private enum State {
@@ -33,7 +33,7 @@ public class TransactionRateLimitingCallback implements OperationCallback {
     private long elapsedTime;
     private volatile State state = State.STOPPED;
 
-    TransactionRateLimitingCallback(ActorUtils actorUtils) {
+    TransactionRateLimitingCallback(final ActorUtils actorUtils) {
         commitTimer = actorUtils.getOperationTimer(ActorUtils.COMMIT);
     }
 
@@ -75,7 +75,7 @@ public class TransactionRateLimitingCallback implements OperationCallback {
     }
 
     @VisibleForTesting
-    static void setTicker(Ticker ticker) {
+    static void setTicker(final Ticker ticker) {
         TICKER = ticker;
     }
 }
index de3a78073f26cf5d72ee87998ed28a74229a28a0..b37dfb545064936ea4936671c8c07d93bf9f1122 100644 (file)
@@ -7,7 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import static org.junit.Assert.fail;
+import static org.junit.Assert.assertThrows;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.doReturn;
@@ -19,8 +19,9 @@ import com.google.common.base.Ticker;
 import java.util.concurrent.TimeUnit;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
 
 /**
@@ -28,14 +29,13 @@ import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class TransactionRateLimitingCallbackTest {
-
     @Mock
     ActorUtils mockContext;
-
     @Mock
     Timer mockTimer;
-
     @Mock
     Ticker mockTicker;
 
@@ -43,7 +43,6 @@ public class TransactionRateLimitingCallbackTest {
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         doReturn(mockTimer).when(mockContext).getOperationTimer(ActorUtils.COMMIT);
         callback = new TransactionRateLimitingCallback(mockContext);
         TransactionRateLimitingCallback.setTicker(mockTicker);
@@ -85,12 +84,7 @@ public class TransactionRateLimitingCallbackTest {
 
     @Test
     public void testSuccessWithoutRun() {
-        try {
-            callback.success();
-            fail("Expected IllegalStateException");
-        } catch (IllegalStateException e) {
-            // expected
-        }
+        final var ex = assertThrows(IllegalStateException.class, callback::success);
 
         verify(mockTimer, never()).update(anyLong(), any(TimeUnit.class));
     }
index 00ad155425633a9a1e6409657c30a4a73750f5c7..0b45d595c0f19a7d8fbc4c4ad0011b5aee22d9a8 100644 (file)
@@ -16,13 +16,15 @@ import akka.testkit.javadsl.TestKit;
 import java.time.Duration;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeNotificationListenerRegistrationReply;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
 public class DataTreeNotificationListenerRegistrationActorTest extends AbstractActorTest {
     @Mock
     private ListenerRegistration<?> mockListenerReg;
@@ -34,7 +36,6 @@ public class DataTreeNotificationListenerRegistrationActorTest extends AbstractA
 
     @Before
     public void setup() {
-        MockitoAnnotations.initMocks(this);
         DataTreeNotificationListenerRegistrationActor.killDelay = 100;
         kit = new TestKit(getSystem());
     }
index 5878675fdf84fe1e122cd858d0469ad604ce1769..0a5c40d29d3ae6b96f5765bfd549e10aff177a70 100644 (file)
@@ -9,11 +9,10 @@ package org.opendaylight.controller.cluster.datastore.actors;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
 
 import akka.actor.ActorRef;
 import akka.testkit.javadsl.TestKit;
-import java.io.ByteArrayInputStream;
+import com.google.common.io.ByteSource;
 import java.io.ByteArrayOutputStream;
 import java.io.ObjectInputStream;
 import java.time.Duration;
@@ -23,21 +22,24 @@ import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
 import org.opendaylight.controller.cluster.datastore.persisted.MetadataShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot;
 import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState;
+import org.opendaylight.controller.cluster.io.InputOutputStreamFactory;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 
 public class ShardSnapshotActorTest extends AbstractActorTest {
-    private static final NormalizedNode<?, ?> DATA = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
+    private static final InputOutputStreamFactory STREAM_FACTORY = InputOutputStreamFactory.simple();
+
+    private static final NormalizedNode DATA = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
 
     private static void testSerializeSnapshot(final String testName, final ShardDataTreeSnapshot snapshot,
             final boolean withInstallSnapshot) throws Exception {
         final TestKit kit = new TestKit(getSystem());
-        final ActorRef snapshotActor = getSystem().actorOf(ShardSnapshotActor.props(), testName);
+        final ActorRef snapshotActor = getSystem().actorOf(ShardSnapshotActor.props(STREAM_FACTORY), testName);
         kit.watch(snapshotActor);
 
-        final NormalizedNode<?, ?> expectedRoot = snapshot.getRootNode().get();
+        final NormalizedNode expectedRoot = snapshot.getRootNode().orElseThrow();
 
         ByteArrayOutputStream installSnapshotStream = withInstallSnapshot ? new ByteArrayOutputStream() : null;
         ShardSnapshotActor.requestSnapshot(snapshotActor, snapshot,
@@ -50,16 +52,13 @@ public class ShardSnapshotActorTest extends AbstractActorTest {
 
         if (installSnapshotStream != null) {
             final ShardDataTreeSnapshot deserialized;
-            try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(
-                installSnapshotStream.toByteArray()))) {
-                deserialized = ShardDataTreeSnapshot.deserialize(in);
+            try (ObjectInputStream in = new ObjectInputStream(STREAM_FACTORY.createInputStream(
+                    ByteSource.wrap(installSnapshotStream.toByteArray())))) {
+                deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
             }
 
             assertEquals("Deserialized snapshot type", snapshot.getClass(), deserialized.getClass());
-
-            final Optional<NormalizedNode<?, ?>> maybeNode = deserialized.getRootNode();
-            assertTrue("isPresent", maybeNode.isPresent());
-            assertEquals("Root node", expectedRoot, maybeNode.get());
+            assertEquals("Root node", Optional.of(expectedRoot), deserialized.getRootNode());
         }
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplBaseTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplBaseTest.java
new file mode 100644 (file)
index 0000000..c51fea8
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.config;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import com.google.common.collect.ImmutableSortedSet;
+import com.google.common.collect.Sets;
+import java.util.Collection;
+import java.util.Set;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
+import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
+
+public abstract class ConfigurationImplBaseTest {
+    private static final MemberName MEMBER_1 = MemberName.forName("member-1");
+    private static final MemberName MEMBER_2 = MemberName.forName("member-2");
+    private static final MemberName MEMBER_3 = MemberName.forName("member-3");
+    private static final MemberName MEMBER_4 = MemberName.forName("member-4");
+    private static final MemberName MEMBER_5 = MemberName.forName("member-5");
+    private static final MemberName MEMBER_100 = MemberName.forName("member-100");
+
+    private ConfigurationImpl configuration;
+
+    @Before
+    public void setup() {
+        this.configuration = createConfiguration();
+    }
+
+    public abstract ConfigurationImpl createConfiguration();
+
+    @Test
+    public void testConstructor() {
+        Assert.assertNotNull(configuration);
+    }
+
+    @Test
+    public void testGetMemberShardNames() {
+        Collection<String> memberShardNames = configuration.getMemberShardNames(MEMBER_1);
+        assertEquals("getMemberShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default"),
+                ImmutableSortedSet.copyOf(memberShardNames));
+
+        memberShardNames = configuration.getMemberShardNames(MEMBER_2);
+        assertEquals("getMemberShardNames", ImmutableSortedSet.of("default"),
+                ImmutableSortedSet.copyOf(memberShardNames));
+
+        memberShardNames = configuration.getMemberShardNames(MEMBER_100);
+        assertEquals("getMemberShardNames size", 0, memberShardNames.size());
+    }
+
+    @Test
+    public void testGetMembersFromShardName() {
+        Collection<MemberName> members = configuration.getMembersFromShardName("default");
+        assertEquals("getMembersFromShardName", ImmutableSortedSet.of(MEMBER_1, MEMBER_2, MEMBER_3),
+                ImmutableSortedSet.copyOf(members));
+
+        members = configuration.getMembersFromShardName("cars-1");
+        assertEquals("getMembersFromShardName", ImmutableSortedSet.of(MEMBER_1),
+                ImmutableSortedSet.copyOf(members));
+
+        // Try to find a shard which is not present
+
+        members = configuration.getMembersFromShardName("foobar");
+        assertEquals("getMembersFromShardName size", 0, members.size());
+    }
+
+    @Test
+    public void testGetAllShardNames() {
+        Set<String> allShardNames = configuration.getAllShardNames();
+        assertEquals("getAllShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default"),
+                ImmutableSortedSet.copyOf(allShardNames));
+    }
+
+    @Test
+    public void testGetModuleNameFromNameSpace() {
+        String moduleName = configuration.getModuleNameFromNameSpace(
+                "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test:cars");
+        assertEquals("getModuleNameFromNameSpace", "cars", moduleName);
+
+        moduleName = configuration.getModuleNameFromNameSpace(
+                "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test");
+        assertEquals("getModuleNameFromNameSpace", "test", moduleName);
+
+        moduleName = configuration.getModuleNameFromNameSpace("non-existent");
+        assertNull("getModuleNameFromNameSpace - expected null", moduleName);
+    }
+
+    @Test
+    public void testGetStrategyForModule() {
+        ShardStrategy strategy = configuration.getStrategyForModule("cars");
+        assertNotNull("getStrategyForModule null", strategy);
+        assertEquals("getStrategyForModule type", ModuleShardStrategy.class, strategy.getClass());
+
+        strategy = configuration.getStrategyForModule("people");
+        assertNotNull("getStrategyForModule null", strategy);
+        assertEquals("getStrategyForModule type", ModuleShardStrategy.class, strategy.getClass());
+
+        strategy = configuration.getStrategyForModule("default");
+        assertNull("getStrategyForModule - expected null", strategy);
+
+        strategy = configuration.getStrategyForModule("non-existent");
+        assertNull("getStrategyForModule - expected null", strategy);
+    }
+
+    @Test
+    public void testGetShardNameForModule() {
+        String shardName = configuration.getShardNameForModule("cars");
+        assertEquals("getShardNameForModule", "cars-1", shardName);
+
+        shardName = configuration.getShardNameForModule("people");
+        assertEquals("getShardNameForModule", "people-1", shardName);
+
+        shardName = configuration.getShardNameForModule("non-existent");
+        assertNull("getShardNameForModule - expected null", shardName);
+    }
+
+    @Test
+    public void testAddModuleShardConfiguration() throws Exception {
+        XMLNamespace namespace = XMLNamespace.of("urn:opendaylight:test:oven");
+        String moduleName = "oven";
+        String shardName = "oven-shard";
+        String shardStrategyName = ModuleShardStrategy.NAME;
+        Collection<MemberName> shardMemberNames = ImmutableSortedSet.of(MEMBER_1, MEMBER_4, MEMBER_5);
+
+        configuration.addModuleShardConfiguration(new ModuleShardConfiguration(namespace, moduleName, shardName,
+                shardStrategyName, shardMemberNames));
+
+        assertEquals("getMemberShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default", shardName),
+                ImmutableSortedSet.copyOf(configuration.getMemberShardNames(MEMBER_1)));
+        assertEquals("getMemberShardNames", ImmutableSortedSet.of(shardName),
+                ImmutableSortedSet.copyOf(configuration.getMemberShardNames(MEMBER_4)));
+        assertEquals("getMemberShardNames", ImmutableSortedSet.of(shardName),
+                ImmutableSortedSet.copyOf(configuration.getMemberShardNames(MEMBER_5)));
+        assertEquals("getMembersFromShardName", shardMemberNames,
+                ImmutableSortedSet.copyOf(configuration.getMembersFromShardName(shardName)));
+        assertEquals("getShardNameForModule", shardName, configuration.getShardNameForModule(moduleName));
+        assertEquals("getModuleNameFromNameSpace", moduleName,
+                configuration.getModuleNameFromNameSpace(namespace.toString()));
+        assertEquals("getAllShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default", shardName),
+                ImmutableSortedSet.copyOf(configuration.getAllShardNames()));
+
+        ShardStrategy strategy = configuration.getStrategyForModule("cars");
+        assertNotNull("getStrategyForModule null", strategy);
+        assertEquals("getStrategyForModule type", ModuleShardStrategy.class, strategy.getClass());
+    }
+
+    @Test
+    public void testGetUniqueMemberNamesForAllShards() {
+        assertEquals("getUniqueMemberNamesForAllShards", Sets.newHashSet(MEMBER_1, MEMBER_2, MEMBER_3),
+                configuration.getUniqueMemberNamesForAllShards());
+    }
+
+    @Test
+    public void testAddMemberReplicaForShard() {
+        configuration.addMemberReplicaForShard("people-1", MEMBER_2);
+        String shardName = configuration.getShardNameForModule("people");
+        assertEquals("ModuleShardName", "people-1", shardName);
+        ShardStrategy shardStrategy = configuration.getStrategyForModule("people");
+        assertEquals("ModuleStrategy", ModuleShardStrategy.class, shardStrategy.getClass());
+        Collection<MemberName> members = configuration.getMembersFromShardName("people-1");
+        assertEquals("Members", ImmutableSortedSet.of(MEMBER_1, MEMBER_2),
+                ImmutableSortedSet.copyOf(members));
+
+        configuration.addMemberReplicaForShard("non-existent", MEMBER_2);
+        Set<String> shardNames = configuration.getAllShardNames();
+        assertEquals("ShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default"),
+                ImmutableSortedSet.copyOf(shardNames));
+    }
+
+    @Test
+    public void testRemoveMemberReplicaForShard() {
+        configuration.removeMemberReplicaForShard("default", MEMBER_2);
+        String shardName = configuration.getShardNameForModule("default");
+        assertEquals("ModuleShardName", "default", shardName);
+        ShardStrategy shardStrategy = configuration.getStrategyForModule("default");
+        assertNull("ModuleStrategy", shardStrategy);
+        Collection<MemberName> members = configuration.getMembersFromShardName("default");
+        assertEquals("Members", ImmutableSortedSet.of(MEMBER_1, MEMBER_3),
+                ImmutableSortedSet.copyOf(members));
+
+        configuration.removeMemberReplicaForShard("non-existent", MEMBER_2);
+        Set<String> shardNames = configuration.getAllShardNames();
+        assertEquals("ShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default"),
+                ImmutableSortedSet.copyOf(shardNames));
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplFileTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplFileTest.java
new file mode 100644 (file)
index 0000000..3c2ad7d
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.config;
+
+public class ConfigurationImplFileTest extends ConfigurationImplBaseTest {
+
+    @Override
+    public ConfigurationImpl createConfiguration() {
+        return new ConfigurationImpl("module-shards.conf", "modules.conf");
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplHybridTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImplHybridTest.java
new file mode 100644 (file)
index 0000000..2174e07
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.config;
+
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import java.util.List;
+import org.junit.Test;
+
+public class ConfigurationImplHybridTest extends ConfigurationImplBaseTest {
+
+    @Override
+    public ConfigurationImpl createConfiguration() {
+        Config moduleShardsConf = generateModuleShards(List.of(
+                generateShard("default", "default", List.of("member-1", "member-2", "member-3")),
+                generateShard("people", "people-1", List.of("member-1")),
+                generateShard("cars", "cars-1", List.of("member-1")),
+                generateShard("test", "test-1", List.of("member-1"))
+        ));
+        return new ConfigurationImpl(new HybridModuleShardConfigProvider(moduleShardsConf, "modules.conf"));
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testNullModuleShardsConf() {
+        new HybridModuleShardConfigProvider(null, "modules.conf");
+    }
+
+    private static Config generateModuleShards(final List<String> shards) {
+        String moduleShardsContent = String.format("module-shards = [%n%s]", String.join(",\n", shards));
+        return ConfigFactory.parseString(moduleShardsContent);
+    }
+
+    private static String generateShard(final String name, final String shardsName, final List<String> replicas) {
+        return "    {"
+                + "        name = \"" + name + "\"\n"
+                + "        shards = [\n"
+                + "            {\n"
+                + "                name=\"" + shardsName + "\"\n"
+                + "                replicas = " + replicas
+                + "                \n"
+                + "            }\n"
+                + "        ]\n"
+                + "    }";
+    }
+}
index 68bad4f4c02076bc4f5aa5b09017373d87554935..1b13be7c8d2dcd8e8d9a5104266f88386d9b0260 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.config;
 
 import static org.junit.Assert.assertEquals;
@@ -14,7 +13,6 @@ import static org.junit.Assert.assertNull;
 
 import com.google.common.collect.ImmutableSortedSet;
 import com.google.common.collect.Sets;
-import java.net.URI;
 import java.util.Collection;
 import java.util.Set;
 import org.junit.Assert;
@@ -23,6 +21,7 @@ import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ModuleShardStrategy;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
 
 public class ConfigurationImplTest {
     private static final MemberName MEMBER_1 = MemberName.forName("member-1");
@@ -126,7 +125,7 @@ public class ConfigurationImplTest {
 
     @Test
     public void testAddModuleShardConfiguration() throws Exception {
-        URI namespace = new URI("urn:opendaylight:test:oven");
+        XMLNamespace namespace = XMLNamespace.of("urn:opendaylight:test:oven");
         String moduleName = "oven";
         String shardName = "oven-shard";
         String shardStrategyName = ModuleShardStrategy.NAME;
@@ -145,7 +144,7 @@ public class ConfigurationImplTest {
                 ImmutableSortedSet.copyOf(configuration.getMembersFromShardName(shardName)));
         assertEquals("getShardNameForModule", shardName, configuration.getShardNameForModule(moduleName));
         assertEquals("getModuleNameFromNameSpace", moduleName,
-                configuration.getModuleNameFromNameSpace(namespace.toASCIIString()));
+                configuration.getModuleNameFromNameSpace(namespace.toString()));
         assertEquals("getAllShardNames", ImmutableSortedSet.of("people-1", "cars-1", "test-1", "default", shardName),
                 ImmutableSortedSet.copyOf(configuration.getAllShardNames()));
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractClusterRefEntityOwnershipTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractClusterRefEntityOwnershipTest.java
deleted file mode 100644 (file)
index 4c8cfcc..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import akka.actor.ActorSystem;
-import akka.testkit.javadsl.TestKit;
-import com.typesafe.config.ConfigFactory;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-public class AbstractClusterRefEntityOwnershipTest extends AbstractEntityOwnershipTest {
-
-    private static ActorSystem system;
-
-    @BeforeClass
-    public static void setUpClass() {
-        system = ActorSystem.create("test", ConfigFactory.load().getConfig("test-config"));
-    }
-
-    @AfterClass
-    public static void tearDownClass() {
-        TestKit.shutdownActorSystem(system);
-        system = null;
-    }
-
-    protected static ActorSystem getSystem() {
-        return system;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnershipTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/AbstractEntityOwnershipTest.java
deleted file mode 100644 (file)
index 9864323..0000000
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.argThat;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NAME_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNER_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_TYPE_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import akka.pattern.Patterns;
-import akka.testkit.TestActorRef;
-import akka.util.Timeout;
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.function.Consumer;
-import org.junit.Assert;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.AbstractShardTest;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.raft.client.messages.GetOnDemandRaftState;
-import org.opendaylight.controller.cluster.raft.client.messages.OnDemandRaftState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.EntityOwners;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.EntityType;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Abstract base class providing utility methods.
- *
- * @author Thomas Pantelis
- */
-public class AbstractEntityOwnershipTest extends AbstractActorTest {
-    protected final Logger testLog = LoggerFactory.getLogger(getClass());
-
-    private static final AtomicInteger NEXT_SHARD_NUM = new AtomicInteger();
-
-    protected void verifyEntityCandidate(final NormalizedNode<?, ?> node, final String entityType,
-            final YangInstanceIdentifier entityId, final String candidateName, final boolean expectPresent) {
-        try {
-            assertNotNull("Missing " + EntityOwners.QNAME.toString(), node);
-            assertTrue(node instanceof ContainerNode);
-
-            ContainerNode entityOwnersNode = (ContainerNode) node;
-
-            MapEntryNode entityTypeEntry = getMapEntryNodeChild(entityOwnersNode, EntityType.QNAME,
-                    ENTITY_TYPE_QNAME, entityType, true);
-
-            MapEntryNode entityEntry = getMapEntryNodeChild(entityTypeEntry, ENTITY_QNAME, ENTITY_ID_QNAME,
-                    entityId, true);
-
-            getMapEntryNodeChild(entityEntry, Candidate.QNAME, CANDIDATE_NAME_QNAME, candidateName, expectPresent);
-        } catch (AssertionError e) {
-            throw new AssertionError("Verification of entity candidate failed - returned data was: " + node, e);
-        }
-    }
-
-    protected void verifyEntityCandidate(final String entityType, final YangInstanceIdentifier entityId,
-            final String candidateName, final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader,
-            final boolean expectPresent) {
-        AssertionError lastError = null;
-        Stopwatch sw = Stopwatch.createStarted();
-        while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
-            NormalizedNode<?, ?> node = reader.apply(ENTITY_OWNERS_PATH);
-            try {
-                verifyEntityCandidate(node, entityType, entityId, candidateName, expectPresent);
-                return;
-            } catch (AssertionError e) {
-                lastError = e;
-                Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
-            }
-        }
-
-        throw lastError;
-    }
-
-    protected void verifyEntityCandidate(final String entityType, final YangInstanceIdentifier entityId,
-            final String candidateName, final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
-        verifyEntityCandidate(entityType, entityId, candidateName, reader, true);
-    }
-
-    protected MapEntryNode getMapEntryNodeChild(final DataContainerNode<? extends PathArgument> parent,
-            final QName childMap, final QName child, final Object key, final boolean expectPresent) {
-        Optional<DataContainerChild<? extends PathArgument, ?>> childNode =
-                parent.getChild(new NodeIdentifier(childMap));
-        // We have to account for empty maps disappearing. If we expect the entry to be non-present, tolerate a missing
-        // map.
-        if (!expectPresent && !childNode.isPresent()) {
-            return null;
-        }
-
-        assertTrue("Missing " + childMap.toString(), childNode.isPresent());
-
-        MapNode entityTypeMapNode = (MapNode) childNode.get();
-        Optional<MapEntryNode> entityTypeEntry = entityTypeMapNode.getChild(new NodeIdentifierWithPredicates(
-                childMap, child, key));
-        if (expectPresent && !entityTypeEntry.isPresent()) {
-            fail("Missing " + childMap.toString() + " entry for " + key + ". Actual: " + entityTypeMapNode.getValue());
-        } else if (!expectPresent && entityTypeEntry.isPresent()) {
-            fail("Found unexpected " + childMap.toString() + " entry for " + key);
-        }
-
-        return entityTypeEntry.isPresent() ? entityTypeEntry.get() : null;
-    }
-
-    static void verifyOwner(final String expected, final String entityType, final YangInstanceIdentifier entityId,
-            final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
-        AssertionError lastError = null;
-        YangInstanceIdentifier entityPath = entityPath(entityType, entityId).node(ENTITY_OWNER_QNAME);
-        Stopwatch sw = Stopwatch.createStarted();
-        while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
-            try {
-                NormalizedNode<?, ?> node = reader.apply(entityPath);
-                Assert.assertNotNull("Owner was not set for entityId: " + entityId, node);
-                Assert.assertEquals("Entity owner", expected, node.getValue().toString());
-                return;
-            } catch (AssertionError e) {
-                lastError = e;
-                Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
-            }
-        }
-
-        throw lastError;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    static void verifyOwner(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
-            final YangInstanceIdentifier entityId, final String localMemberName) {
-        verifyOwner(localMemberName, entityType, entityId, path -> {
-            try {
-                return AbstractShardTest.readStore(shard, path);
-            } catch (Exception e) {
-                return null;
-            }
-        });
-    }
-
-    protected void verifyNodeRemoved(final YangInstanceIdentifier path,
-            final Function<YangInstanceIdentifier,NormalizedNode<?,?>> reader) {
-        AssertionError lastError = null;
-        Stopwatch sw = Stopwatch.createStarted();
-        while (sw.elapsed(TimeUnit.MILLISECONDS) <= 5000) {
-            try {
-                NormalizedNode<?, ?> node = reader.apply(path);
-                Assert.assertNull("Node was not removed at path: " + path, node);
-                return;
-            } catch (AssertionError e) {
-                lastError = e;
-                Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
-            }
-        }
-
-        throw lastError;
-    }
-
-    static void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node,
-            final ShardDataTree shardDataTree) throws DataValidationFailedException {
-        DataTreeModification modification = shardDataTree.newModification();
-        modification.merge(path, node);
-        commit(shardDataTree, modification);
-    }
-
-    static void deleteNode(final YangInstanceIdentifier path, final ShardDataTree shardDataTree)
-            throws DataValidationFailedException {
-        DataTreeModification modification = shardDataTree.newModification();
-        modification.delete(path);
-        commit(shardDataTree, modification);
-    }
-
-    static void commit(final ShardDataTree shardDataTree, final DataTreeModification modification)
-            throws DataValidationFailedException {
-        modification.ready();
-        shardDataTree.getDataTree().validate(modification);
-        final DataTreeCandidate candidate = shardDataTree.getDataTree().prepare(modification);
-        shardDataTree.getDataTree().commit(candidate);
-        shardDataTree.notifyListeners(candidate);
-    }
-
-    static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity, final boolean expWasOwner,
-            final boolean expIsOwner, final boolean expHasOwner) {
-        return ownershipChange(expEntity, expWasOwner, expIsOwner, expHasOwner, false);
-    }
-
-    static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity, final boolean expWasOwner,
-            final boolean expIsOwner, final boolean expHasOwner, final boolean expInJeopardy) {
-        return argThat(change -> expEntity.equals(change.getEntity()) && expWasOwner == change.getState().wasOwner()
-                && expIsOwner == change.getState().isOwner() && expHasOwner == change.getState().hasOwner()
-                && expInJeopardy == change.inJeopardy());
-    }
-
-    static DOMEntityOwnershipChange ownershipChange(final DOMEntity expEntity) {
-        return argThat(change -> expEntity.equals(change.getEntity()));
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    static void verifyNoOwnerSet(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
-            final YangInstanceIdentifier entityId) {
-        YangInstanceIdentifier entityPath = entityPath(entityType, entityId).node(ENTITY_OWNER_QNAME);
-        try {
-            NormalizedNode<?, ?> node = AbstractShardTest.readStore(shard, entityPath);
-            if (node != null) {
-                Assert.fail("Owner " + node.getValue() + " was set for " + entityPath);
-            }
-
-        } catch (Exception e) {
-            throw new AssertionError("read failed", e);
-        }
-    }
-
-    static void verifyRaftState(final TestActorRef<? extends EntityOwnershipShard> shard,
-            final Consumer<OnDemandRaftState> verifier)
-            throws Exception {
-        AssertionError lastError = null;
-        Stopwatch sw = Stopwatch.createStarted();
-        while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
-            FiniteDuration operationDuration = FiniteDuration.create(5, TimeUnit.SECONDS);
-            Future<Object> future = Patterns.ask(shard, GetOnDemandRaftState.INSTANCE, new Timeout(operationDuration));
-            OnDemandRaftState raftState = (OnDemandRaftState)Await.result(future, operationDuration);
-            try {
-                verifier.accept(raftState);
-                return;
-            } catch (AssertionError e) {
-                lastError = e;
-                Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            }
-        }
-
-        throw lastError;
-    }
-
-    static ShardIdentifier newShardId(final String memberName) {
-        return ShardIdentifier.create("entity-ownership", MemberName.forName(memberName),
-            "operational" + NEXT_SHARD_NUM.getAndIncrement());
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    void verifyEntityCandidateRemoved(final TestActorRef<EntityOwnershipShard> shard, final String entityType,
-            final YangInstanceIdentifier entityId, final String candidateName) {
-        verifyNodeRemoved(candidatePath(entityType, entityId, candidateName), path -> {
-            try {
-                return AbstractShardTest.readStore(shard, path);
-            } catch (Exception e) {
-                throw new AssertionError("Failed to read " + path, e);
-            }
-        });
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    void verifyCommittedEntityCandidate(final TestActorRef<? extends EntityOwnershipShard> shard,
-            final String entityType, final YangInstanceIdentifier entityId, final String candidateName) {
-        verifyEntityCandidate(entityType, entityId, candidateName, path -> {
-            try {
-                return AbstractShardTest.readStore(shard, path);
-            } catch (Exception e) {
-                throw new AssertionError("Failed to read " + path, e);
-            }
-        });
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    void verifyNoEntityCandidate(final TestActorRef<? extends EntityOwnershipShard> shard, final String entityType,
-            final YangInstanceIdentifier entityId, final String candidateName) {
-        verifyEntityCandidate(entityType, entityId, candidateName, path -> {
-            try {
-                return AbstractShardTest.readStore(shard, path);
-            } catch (Exception e) {
-                throw new AssertionError("Failed to read " + path, e);
-            }
-        }, false);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListenerTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/CandidateListChangeListenerTest.java
deleted file mode 100644 (file)
index d79de61..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableSet;
-import java.time.Duration;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateRemoved;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-/**
- * Unit tests for CandidateListChangeListener.
- *
- * @author Thomas Pantelis
- */
-public class CandidateListChangeListenerTest extends AbstractActorTest {
-    private static final String ENTITY_TYPE = "test";
-    private static final YangInstanceIdentifier ENTITY_ID1 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
-    private static final YangInstanceIdentifier ENTITY_ID2 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-
-    private ShardDataTree shardDataTree;
-
-    @Mock
-    private Shard mockShard;
-
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-        shardDataTree = new ShardDataTree(mockShard, SchemaContextHelper.entityOwners(), TreeType.OPERATIONAL);
-    }
-
-    @Test
-    public void testOnDataTreeChanged() throws Exception {
-        TestKit kit = new TestKit(getSystem());
-
-        new CandidateListChangeListener(kit.getRef(), "test").init(shardDataTree);
-
-        String memberName1 = "member-1";
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName1));
-
-        CandidateAdded candidateAdded = kit.expectMsgClass(CandidateAdded.class);
-        assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateAdded.getEntityPath());
-        assertEquals("getNewCandidate", memberName1, candidateAdded.getNewCandidate());
-        assertEquals("getAllCandidates", ImmutableSet.of(memberName1),
-                ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName1));
-        kit.expectNoMessage(Duration.ofMillis(500));
-
-        String memberName2 = "member-2";
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, memberName2));
-
-        candidateAdded = kit.expectMsgClass(CandidateAdded.class);
-        assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateAdded.getEntityPath());
-        assertEquals("getNewCandidate", memberName2, candidateAdded.getNewCandidate());
-        assertEquals("getAllCandidates", ImmutableSet.of(memberName1, memberName2),
-                ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, memberName1));
-
-        candidateAdded = kit.expectMsgClass(CandidateAdded.class);
-        assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID2), candidateAdded.getEntityPath());
-        assertEquals("getNewCandidate", memberName1, candidateAdded.getNewCandidate());
-        assertEquals("getAllCandidates", ImmutableSet.of(memberName1),
-                ImmutableSet.copyOf(candidateAdded.getAllCandidates()));
-
-        deleteNode(candidatePath(ENTITY_TYPE, ENTITY_ID1, memberName1));
-
-        CandidateRemoved candidateRemoved = kit.expectMsgClass(CandidateRemoved.class);
-        assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateRemoved.getEntityPath());
-        assertEquals("getRemovedCandidate", memberName1, candidateRemoved.getRemovedCandidate());
-        assertEquals("getRemainingCandidates", ImmutableSet.of(memberName2),
-                ImmutableSet.copyOf(candidateRemoved.getRemainingCandidates()));
-
-        deleteNode(candidatePath(ENTITY_TYPE, ENTITY_ID1, memberName2));
-
-        candidateRemoved = kit.expectMsgClass(CandidateRemoved.class);
-        assertEquals("getEntityId", entityPath(ENTITY_TYPE, ENTITY_ID1), candidateRemoved.getEntityPath());
-        assertEquals("getRemovedCandidate", memberName2, candidateRemoved.getRemovedCandidate());
-        assertEquals("getRemainingCandidates", ImmutableSet.of(),
-                ImmutableSet.copyOf(candidateRemoved.getRemainingCandidates()));
-    }
-
-    private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
-            throws DataValidationFailedException {
-        AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
-    }
-
-    private void deleteNode(final YangInstanceIdentifier path) throws DataValidationFailedException {
-        AbstractEntityOwnershipTest.deleteNode(path, shardDataTree);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipIntegrationTest.java
deleted file mode 100644 (file)
index ad80b86..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.AdditionalMatchers.or;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.MemberNode.verifyRaftState;
-import static org.opendaylight.controller.cluster.datastore.entityownership.AbstractEntityOwnershipTest.ownershipChange;
-import static org.opendaylight.controller.cluster.datastore.entityownership.DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.CANDIDATE_NAME_NODE_ID;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import akka.actor.ActorRef;
-import akka.actor.Status.Failure;
-import akka.actor.Status.Success;
-import akka.cluster.Cluster;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.mockito.exceptions.base.MockitoException;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.MemberNode;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.datastore.messages.AddShardReplica;
-import org.opendaylight.controller.cluster.datastore.messages.ChangeShardMembersVotingStatus;
-import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.clustering.entity.owners.rev150804.entity.owners.entity.type.entity.Candidate;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * End-to-end integration tests for the entity ownership functionality.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipIntegrationTest {
-    private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
-    private static final String MODULE_SHARDS_5_NODE_CONFIG = "module-shards-default-5-node.conf";
-    private static final String MODULE_SHARDS_MEMBER_1_CONFIG = "module-shards-default-member-1.conf";
-    private static final String ENTITY_TYPE1 = "entityType1";
-    private static final String ENTITY_TYPE2 = "entityType2";
-    private static final DOMEntity ENTITY1 = new DOMEntity(ENTITY_TYPE1, "entity1");
-    private static final DOMEntity ENTITY1_2 = new DOMEntity(ENTITY_TYPE2, "entity1");
-    private static final DOMEntity ENTITY2 = new DOMEntity(ENTITY_TYPE1, "entity2");
-    private static final DOMEntity ENTITY3 = new DOMEntity(ENTITY_TYPE1, "entity3");
-    private static final DOMEntity ENTITY4 = new DOMEntity(ENTITY_TYPE1, "entity4");
-    private static final SchemaContext SCHEMA_CONTEXT = SchemaContextHelper.entityOwners();
-
-    private final DatastoreContext.Builder leaderDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5)
-                    .shardIsolatedLeaderCheckIntervalInMillis(1000000);
-
-    private final DatastoreContext.Builder followerDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(10000);
-
-    private final List<MemberNode> memberNodes = new ArrayList<>();
-
-    @Mock
-    private DOMEntityOwnershipListener leaderMockListener;
-
-    @Mock
-    private DOMEntityOwnershipListener leaderMockListener2;
-
-    @Mock
-    private DOMEntityOwnershipListener follower1MockListener;
-
-    @Mock
-    private DOMEntityOwnershipListener follower2MockListener;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-    }
-
-    @After
-    public void tearDown() {
-        for (MemberNode m : Lists.reverse(memberNodes)) {
-            m.cleanup();
-        }
-        memberNodes.clear();
-    }
-
-    private static DistributedEntityOwnershipService newOwnershipService(final AbstractDataStore datastore) {
-        return DistributedEntityOwnershipService.start(datastore.getActorUtils(),
-                EntityOwnerSelectionStrategyConfig.newBuilder().build());
-    }
-
-    @Test
-    public void testFunctionalityWithThreeNodes() throws Exception {
-        String name = "testFunctionalityWithThreeNodes";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        final DOMEntityOwnershipService follower2EntityOwnershipService =
-                newOwnershipService(follower2Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
-        leaderEntityOwnershipService.registerListener(ENTITY_TYPE2, leaderMockListener2);
-        follower1EntityOwnershipService.registerListener(ENTITY_TYPE1, follower1MockListener);
-
-        // Register leader candidate for entity1 and verify it becomes owner
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY1);
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, true, true));
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-        reset(leaderMockListener, follower1MockListener);
-
-        verifyGetOwnershipState(leaderEntityOwnershipService, ENTITY1, EntityOwnershipState.IS_OWNER);
-        verifyGetOwnershipState(follower1EntityOwnershipService, ENTITY1, EntityOwnershipState.OWNED_BY_OTHER);
-
-        // Register leader candidate for entity1_2 (same id, different type) and verify it becomes owner
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY1_2);
-        verify(leaderMockListener2, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1_2, false, true, true));
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1_2));
-        reset(leaderMockListener2);
-
-        // Register follower1 candidate for entity1 and verify it gets added but doesn't become owner
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1", "member-2");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
-        verifyOwner(follower2Node.configDataStore(), ENTITY1, "member-1");
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
-        verify(follower1MockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
-
-        // Register follower1 candidate for entity2 and verify it becomes owner
-
-        final DOMEntityOwnershipCandidateRegistration follower1Entity2Reg =
-                follower1EntityOwnershipService.registerCandidate(ENTITY2);
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, true));
-        verifyOwner(follower2Node.configDataStore(), ENTITY2, "member-2");
-        reset(leaderMockListener, follower1MockListener);
-
-        // Register follower2 candidate for entity2 and verify it gets added but doesn't become owner
-
-        follower2EntityOwnershipService.registerListener(ENTITY_TYPE1, follower2MockListener);
-        verify(follower2MockListener, timeout(5000).times(2)).ownershipChanged(or(
-                ownershipChange(ENTITY1, false, false, true), ownershipChange(ENTITY2, false, false, true)));
-
-        follower2EntityOwnershipService.registerCandidate(ENTITY2);
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-2", "member-3");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-2");
-
-        // Unregister follower1 candidate for entity2 and verify follower2 becomes owner
-
-        follower1Entity2Reg.close();
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-3");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-3");
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, true, false, true));
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, true));
-
-        // Depending on timing, follower2MockListener could get ownershipChanged with "false, false, true" if
-        // if the original ownership change with "member-2 is replicated to follower2 after the listener is
-        // registered.
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
-
-        // Register follower1 candidate for entity3 and verify it becomes owner
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY3);
-        verifyOwner(leaderDistributedDataStore, ENTITY3, "member-2");
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, true, true));
-        verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, false, true));
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY3, false, false, true));
-
-        // Register follower2 candidate for entity4 and verify it becomes owner
-
-        follower2EntityOwnershipService.registerCandidate(ENTITY4);
-        verifyOwner(leaderDistributedDataStore, ENTITY4, "member-3");
-        verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, true, true));
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
-        reset(follower1MockListener, follower2MockListener);
-
-        // Register follower1 candidate for entity4 and verify it gets added but doesn't become owner
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY4);
-        verifyCandidates(leaderDistributedDataStore, ENTITY4, "member-3", "member-2");
-        verifyOwner(leaderDistributedDataStore, ENTITY4, "member-3");
-
-        // Shutdown follower2 and verify it's owned entities (entity 4) get re-assigned
-
-        reset(leaderMockListener, follower1MockListener);
-        follower2Node.cleanup();
-
-        verify(follower1MockListener, timeout(15000)).ownershipChanged(ownershipChange(ENTITY4, false, true, true));
-        verify(leaderMockListener, timeout(15000)).ownershipChanged(ownershipChange(ENTITY4, false, false, true));
-
-        // Register leader candidate for entity2 and verify it becomes owner
-
-        DOMEntityOwnershipCandidateRegistration leaderEntity2Reg =
-                leaderEntityOwnershipService.registerCandidate(ENTITY2);
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, true, true));
-
-        // Unregister leader candidate for entity2 and verify the owner is cleared
-
-        leaderEntity2Reg.close();
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "");
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, true, false, false));
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY2, false, false, false));
-    }
-
-    @Test
-    public void testLeaderEntityOwnersReassignedAfterShutdown() throws Exception {
-        followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
-                    .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
-        String name = "testLeaderEntityOwnersReassignedAfterShutdown";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-
-        follower1Node.waitForMembersUp("member-1", "member-3");
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        final DOMEntityOwnershipService follower2EntityOwnershipService =
-                newOwnershipService(follower2Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        // Register follower1 candidate for entity1 and verify it becomes owner
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
-        // Register leader candidate for entity1
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY1);
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2", "member-1");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
-        // Register leader candidate for entity2 and verify it becomes owner
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY2);
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
-        // Register follower2 candidate for entity2
-
-        follower2EntityOwnershipService.registerCandidate(ENTITY2);
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
-        // Re-enable elections on all remaining followers so one becomes the new leader
-
-        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-        follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
-                .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
-        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-        follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
-                .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
-        // Shutdown the leader and verify its removed from the candidate list
-
-        leaderNode.cleanup();
-        follower1Node.waitForMemberDown("member-1");
-        follower2Node.waitForMemberDown("member-1");
-
-        // Verify the prior leader's entity owners are re-assigned.
-
-        verifyCandidates(follower1Node.configDataStore(), ENTITY1, "member-2", "member-1");
-        verifyCandidates(follower1Node.configDataStore(), ENTITY2, "member-1", "member-3");
-        verifyOwner(follower1Node.configDataStore(), ENTITY1, "member-2");
-        verifyOwner(follower1Node.configDataStore(), ENTITY2, "member-3");
-    }
-
-    @Test
-    public void testLeaderAndFollowerEntityOwnersReassignedAfterShutdown() throws Exception {
-        followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
-                .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
-        String name = "testLeaderAndFollowerEntityOwnersReassignedAfterShutdown";
-        final MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        final MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode follower3Node = MemberNode.builder(memberNodes).akkaConfig("Member4")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode follower4Node = MemberNode.builder(memberNodes).akkaConfig("Member5")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-        follower3Node.configDataStore().waitTillReady();
-        follower4Node.configDataStore().waitTillReady();
-
-        leaderNode.waitForMembersUp("member-2", "member-3", "member-4", "member-5");
-        follower1Node.waitForMembersUp("member-1", "member-3", "member-4", "member-5");
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        final DOMEntityOwnershipService follower2EntityOwnershipService =
-                newOwnershipService(follower2Node.configDataStore());
-        final DOMEntityOwnershipService follower3EntityOwnershipService =
-                newOwnershipService(follower3Node.configDataStore());
-        newOwnershipService(follower4Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        // Register follower1 candidate for entity1 and verify it becomes owner
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
-        // Register leader candidate for entity1
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY1);
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2", "member-1");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-
-        // Register leader candidate for entity2 and verify it becomes owner
-
-        leaderEntityOwnershipService.registerCandidate(ENTITY2);
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
-        // Register follower2 candidate for entity2
-
-        follower2EntityOwnershipService.registerCandidate(ENTITY2);
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
-        // Register follower3 as a candidate for entity2 as well
-
-        follower3EntityOwnershipService.registerCandidate(ENTITY2);
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-1", "member-3", "member-4");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-1");
-
-        // Re-enable elections on all remaining followers so one becomes the new leader
-
-        ActorRef follower1Shard = IntegrationTestKit.findLocalShard(follower1Node.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-        follower1Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
-                .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
-        ActorRef follower2Shard = IntegrationTestKit.findLocalShard(follower2Node.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-        follower2Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
-                .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
-        ActorRef follower4Shard = IntegrationTestKit.findLocalShard(follower4Node.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-        follower4Shard.tell(DatastoreContext.newBuilderFrom(followerDatastoreContextBuilder.build())
-                .customRaftPolicyImplementation(null).build(), ActorRef.noSender());
-
-        // Shutdown the leader and follower3
-
-        leaderNode.cleanup();
-        follower3Node.cleanup();
-
-        follower1Node.waitForMemberDown("member-1");
-        follower1Node.waitForMemberDown("member-4");
-        follower2Node.waitForMemberDown("member-1");
-        follower2Node.waitForMemberDown("member-4");
-        follower4Node.waitForMemberDown("member-1");
-        follower4Node.waitForMemberDown("member-4");
-
-        // Verify the prior leader's and follower3 entity owners are re-assigned.
-
-        verifyCandidates(follower1Node.configDataStore(), ENTITY1, "member-2", "member-1");
-        verifyCandidates(follower1Node.configDataStore(), ENTITY2, "member-1", "member-3", "member-4");
-        verifyOwner(follower1Node.configDataStore(), ENTITY1, "member-2");
-        verifyOwner(follower1Node.configDataStore(), ENTITY2, "member-3");
-    }
-
-    /**
-     * Reproduces bug <a href="https://bugs.opendaylight.org/show_bug.cgi?id=4554">4554</a>.
-     */
-    @Test
-    public void testCloseCandidateRegistrationInQuickSuccession() throws Exception {
-        String name = "testCloseCandidateRegistrationInQuickSuccession";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        final DOMEntityOwnershipService follower2EntityOwnershipService =
-                newOwnershipService(follower2Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
-        follower1EntityOwnershipService.registerListener(ENTITY_TYPE1, follower1MockListener);
-        follower2EntityOwnershipService.registerListener(ENTITY_TYPE1, follower2MockListener);
-
-        final DOMEntityOwnershipCandidateRegistration candidate1 =
-                leaderEntityOwnershipService.registerCandidate(ENTITY1);
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, true, true));
-
-        final DOMEntityOwnershipCandidateRegistration candidate2 =
-                follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        verify(follower1MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
-        final DOMEntityOwnershipCandidateRegistration candidate3 =
-                follower2EntityOwnershipService.registerCandidate(ENTITY1);
-        verify(follower2MockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
-        Mockito.reset(leaderMockListener, follower1MockListener, follower2MockListener);
-
-        ArgumentCaptor<DOMEntityOwnershipChange> leaderChangeCaptor =
-                ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
-        ArgumentCaptor<DOMEntityOwnershipChange> follower1ChangeCaptor =
-                ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
-        ArgumentCaptor<DOMEntityOwnershipChange> follower2ChangeCaptor =
-                ArgumentCaptor.forClass(DOMEntityOwnershipChange.class);
-        doNothing().when(leaderMockListener).ownershipChanged(leaderChangeCaptor.capture());
-        doNothing().when(follower1MockListener).ownershipChanged(follower1ChangeCaptor.capture());
-        doNothing().when(follower2MockListener).ownershipChanged(follower2ChangeCaptor.capture());
-
-        candidate1.close();
-        candidate2.close();
-        candidate3.close();
-
-        boolean passed = false;
-        for (int i = 0; i < 100; i++) {
-            Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
-            final Optional<EntityOwnershipState> leaderState = leaderEntityOwnershipService.getOwnershipState(ENTITY1);
-            final Optional<EntityOwnershipState> follower1State =
-                    follower1EntityOwnershipService.getOwnershipState(ENTITY1);
-            final Optional<EntityOwnershipState> follower2State =
-                    follower2EntityOwnershipService.getOwnershipState(ENTITY1);
-            final Optional<DOMEntityOwnershipChange> leaderChange = getValueSafely(leaderChangeCaptor);
-            final Optional<DOMEntityOwnershipChange> follower1Change = getValueSafely(follower1ChangeCaptor);
-            final Optional<DOMEntityOwnershipChange> follower2Change = getValueSafely(follower2ChangeCaptor);
-            if (!leaderState.isPresent() || leaderState.get() == EntityOwnershipState.NO_OWNER
-                    && follower1State.isPresent() && follower1State.get() == EntityOwnershipState.NO_OWNER
-                    && follower2State.isPresent() && follower2State.get() == EntityOwnershipState.NO_OWNER
-                    && leaderChange.isPresent() && !leaderChange.get().getState().hasOwner()
-                    && follower1Change.isPresent() && !follower1Change.get().getState().hasOwner()
-                    && follower2Change.isPresent() && !follower2Change.get().getState().hasOwner()) {
-                passed = true;
-                break;
-            }
-        }
-
-        assertTrue("No ownership change message was sent with hasOwner=false", passed);
-    }
-
-    private static Optional<DOMEntityOwnershipChange> getValueSafely(ArgumentCaptor<DOMEntityOwnershipChange> captor) {
-        try {
-            return Optional.ofNullable(captor.getValue());
-        } catch (MockitoException e) {
-            // No value was captured
-            return Optional.empty();
-        }
-    }
-
-    /**
-     * Tests bootstrapping the entity-ownership shard when there's no shards initially configured for local
-     * member. The entity-ownership shard is initially created as inactive (ie remains a follower), requiring
-     * an AddShardReplica request to join it to an existing leader.
-     */
-    @Test
-    public void testEntityOwnershipShardBootstrapping() throws Exception {
-        String name = "testEntityOwnershipShardBootstrapping";
-        String moduleShardsConfig = MODULE_SHARDS_MEMBER_1_CONFIG;
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore follower1DistributedDataStore = follower1Node.configDataStore();
-        follower1DistributedDataStore.waitTillReady();
-
-        leaderNode.waitForMembersUp("member-2");
-        follower1Node.waitForMembersUp("member-1");
-
-        DOMEntityOwnershipService follower1EntityOwnershipService = newOwnershipService(follower1DistributedDataStore);
-
-        leaderEntityOwnershipService.registerListener(ENTITY_TYPE1, leaderMockListener);
-
-        // Register a candidate for follower1 - should get queued since follower1 has no leader
-        final DOMEntityOwnershipCandidateRegistration candidateReg =
-                follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(leaderMockListener, never()).ownershipChanged(ownershipChange(ENTITY1));
-
-        // Add replica in follower1
-        AddShardReplica addReplica = new AddShardReplica(ENTITY_OWNERSHIP_SHARD_NAME);
-        follower1DistributedDataStore.getActorUtils().getShardManager().tell(addReplica,
-                follower1Node.kit().getRef());
-        Object reply = follower1Node.kit().expectMsgAnyClassOf(follower1Node.kit().duration("5 sec"),
-                Success.class, Failure.class);
-        if (reply instanceof Failure) {
-            throw new AssertionError("AddShardReplica failed", ((Failure)reply).cause());
-        }
-
-        // The queued candidate registration should proceed
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-        reset(leaderMockListener);
-
-        candidateReg.close();
-        verify(leaderMockListener, timeout(5000)).ownershipChanged(ownershipChange(ENTITY1, false, false, false));
-        reset(leaderMockListener);
-
-        // Restart follower1 and verify the entity ownership shard is re-instated by registering.
-        Cluster.get(leaderNode.kit().getSystem()).down(Cluster.get(follower1Node.kit().getSystem()).selfAddress());
-        follower1Node.cleanup();
-
-        follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(moduleShardsConfig).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-        follower1EntityOwnershipService = newOwnershipService(follower1Node.configDataStore());
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        verify(leaderMockListener, timeout(20000)).ownershipChanged(ownershipChange(ENTITY1, false, false, true));
-
-        verifyRaftState(follower1Node.configDataStore(), ENTITY_OWNERSHIP_SHARD_NAME, raftState -> {
-            assertNull("Custom RaftPolicy class name", raftState.getCustomRaftPolicyClassName());
-            assertEquals("Peer count", 1, raftState.getPeerAddresses().keySet().size());
-            assertThat("Peer Id", Iterables.<String>getLast(raftState.getPeerAddresses().keySet()),
-                    org.hamcrest.CoreMatchers.containsString("member-1"));
-        });
-    }
-
-    @Test
-    public void testOwnerSelectedOnRapidUnregisteringAndRegisteringOfCandidates() throws Exception {
-        String name = "testOwnerSelectedOnRapidUnregisteringAndRegisteringOfCandidates";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        newOwnershipService(follower2Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        // Register leader candidate for entity1 and verify it becomes owner
-
-        DOMEntityOwnershipCandidateRegistration leaderEntity1Reg =
-                leaderEntityOwnershipService.registerCandidate(ENTITY1);
-
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
-
-        leaderEntity1Reg.close();
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-    }
-
-    @Test
-    public void testOwnerSelectedOnRapidRegisteringAndUnregisteringOfCandidates() throws Exception {
-        String name = "testOwnerSelectedOnRapidRegisteringAndUnregisteringOfCandidates";
-        MemberNode leaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        MemberNode follower1Node = MemberNode.builder(memberNodes).akkaConfig("Member2").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        MemberNode follower2Node = MemberNode.builder(memberNodes).akkaConfig("Member3").testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_CONFIG).schemaContext(SCHEMA_CONTEXT).createOperDatastore(false)
-                .datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = leaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        follower1Node.configDataStore().waitTillReady();
-        follower2Node.configDataStore().waitTillReady();
-
-        final DOMEntityOwnershipService leaderEntityOwnershipService = newOwnershipService(leaderDistributedDataStore);
-        final DOMEntityOwnershipService follower1EntityOwnershipService =
-                newOwnershipService(follower1Node.configDataStore());
-        newOwnershipService(follower2Node.configDataStore());
-
-        leaderNode.kit().waitUntilLeader(leaderNode.configDataStore().getActorUtils(), ENTITY_OWNERSHIP_SHARD_NAME);
-
-        // Register leader candidate for entity1 and verify it becomes owner
-
-        final DOMEntityOwnershipCandidateRegistration leaderEntity1Reg =
-                leaderEntityOwnershipService.registerCandidate(ENTITY1);
-
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-1");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-1");
-
-        follower1EntityOwnershipService.registerCandidate(ENTITY1);
-        leaderEntity1Reg.close();
-
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-2");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-2");
-    }
-
-    @Test
-    public void testEntityOwnershipWithNonVotingMembers() throws Exception {
-        followerDatastoreContextBuilder.shardElectionTimeoutFactor(5)
-                .customRaftPolicyImplementation(DisableElectionsRaftPolicy.class.getName());
-
-        String name = "testEntityOwnershipWithNonVotingMembers";
-        final MemberNode member1LeaderNode = MemberNode.builder(memberNodes).akkaConfig("Member1")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(leaderDatastoreContextBuilder).build();
-
-        final MemberNode member2FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member2")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode member3FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member3")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode member4FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member4")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        final MemberNode member5FollowerNode = MemberNode.builder(memberNodes).akkaConfig("Member5")
-                .useAkkaArtery(false).testName(name)
-                .moduleShardsConfig(MODULE_SHARDS_5_NODE_CONFIG).schemaContext(SCHEMA_CONTEXT)
-                .createOperDatastore(false).datastoreContextBuilder(followerDatastoreContextBuilder).build();
-
-        AbstractDataStore leaderDistributedDataStore = member1LeaderNode.configDataStore();
-
-        leaderDistributedDataStore.waitTillReady();
-        member2FollowerNode.configDataStore().waitTillReady();
-        member3FollowerNode.configDataStore().waitTillReady();
-        member4FollowerNode.configDataStore().waitTillReady();
-        member5FollowerNode.configDataStore().waitTillReady();
-
-        member1LeaderNode.waitForMembersUp("member-2", "member-3", "member-4", "member-5");
-
-        final DOMEntityOwnershipService member3EntityOwnershipService =
-                newOwnershipService(member3FollowerNode.configDataStore());
-        final DOMEntityOwnershipService member4EntityOwnershipService =
-                newOwnershipService(member4FollowerNode.configDataStore());
-        final DOMEntityOwnershipService member5EntityOwnershipService =
-                newOwnershipService(member5FollowerNode.configDataStore());
-
-        newOwnershipService(member1LeaderNode.configDataStore());
-        member1LeaderNode.kit().waitUntilLeader(member1LeaderNode.configDataStore().getActorUtils(),
-                ENTITY_OWNERSHIP_SHARD_NAME);
-
-        // Make member4 and member5 non-voting
-
-        Future<Object> future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
-                new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
-                        ImmutableMap.of("member-4", Boolean.FALSE, "member-5", Boolean.FALSE)),
-                new Timeout(10, TimeUnit.SECONDS));
-        Object response = Await.result(future, FiniteDuration.apply(10, TimeUnit.SECONDS));
-        if (response instanceof Throwable) {
-            throw new AssertionError("ChangeShardMembersVotingStatus failed", (Throwable)response);
-        }
-
-        assertNull("Expected null Success response. Actual " + response, response);
-
-        // Register member4 candidate for entity1 - it should not become owner since it's non-voting
-
-        member4EntityOwnershipService.registerCandidate(ENTITY1);
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-4");
-
-        // Register member5 candidate for entity2 - it should not become owner since it's non-voting
-
-        member5EntityOwnershipService.registerCandidate(ENTITY2);
-        verifyCandidates(leaderDistributedDataStore, ENTITY2, "member-5");
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "");
-
-        // Register member3 candidate for entity1 - it should become owner since it's voting
-
-        member3EntityOwnershipService.registerCandidate(ENTITY1);
-        verifyCandidates(leaderDistributedDataStore, ENTITY1, "member-4", "member-3");
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-3");
-
-        // Switch member4 and member5 back to voting and member3 non-voting. This should result in member4 and member5
-        // to become entity owners.
-
-        future = Patterns.ask(leaderDistributedDataStore.getActorUtils().getShardManager(),
-                new ChangeShardMembersVotingStatus(ENTITY_OWNERSHIP_SHARD_NAME,
-                        ImmutableMap.of("member-3", Boolean.FALSE, "member-4", Boolean.TRUE, "member-5", Boolean.TRUE)),
-                new Timeout(10, TimeUnit.SECONDS));
-        response = Await.result(future, FiniteDuration.apply(10, TimeUnit.SECONDS));
-        if (response instanceof Throwable) {
-            throw new AssertionError("ChangeShardMembersVotingStatus failed", (Throwable)response);
-        }
-
-        assertNull("Expected null Success response. Actual " + response, response);
-
-        verifyOwner(leaderDistributedDataStore, ENTITY1, "member-4");
-        verifyOwner(leaderDistributedDataStore, ENTITY2, "member-5");
-    }
-
-    private static void verifyGetOwnershipState(final DOMEntityOwnershipService service, final DOMEntity entity,
-            final EntityOwnershipState expState) {
-        Optional<EntityOwnershipState> state = service.getOwnershipState(entity);
-        assertTrue("getOwnershipState present", state.isPresent());
-        assertEquals("EntityOwnershipState", expState, state.get());
-    }
-
-    private static void verifyCandidates(final AbstractDataStore dataStore, final DOMEntity entity,
-            final String... expCandidates) throws Exception {
-        AssertionError lastError = null;
-        Stopwatch sw = Stopwatch.createStarted();
-        while (sw.elapsed(TimeUnit.MILLISECONDS) <= 10000) {
-            Optional<NormalizedNode<?, ?>> possible = dataStore.newReadOnlyTransaction()
-                    .read(entityPath(entity.getType(), entity.getIdentifier()).node(Candidate.QNAME))
-                    .get(5, TimeUnit.SECONDS);
-            try {
-                assertTrue("Candidates not found for " + entity, possible.isPresent());
-                Collection<String> actual = new ArrayList<>();
-                for (MapEntryNode candidate: ((MapNode)possible.get()).getValue()) {
-                    actual.add(candidate.getChild(CANDIDATE_NAME_NODE_ID).get().getValue().toString());
-                }
-
-                assertEquals("Candidates for " + entity, Arrays.asList(expCandidates), actual);
-                return;
-            } catch (AssertionError e) {
-                lastError = e;
-                Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-            }
-        }
-
-        throw lastError;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private static void verifyOwner(final AbstractDataStore dataStore, final DOMEntity entity,
-            final String expOwner) {
-        AbstractEntityOwnershipTest.verifyOwner(expOwner, entity.getType(), entity.getIdentifier(), path -> {
-            try {
-                return dataStore.newReadOnlyTransaction().read(path).get(5, TimeUnit.SECONDS).get();
-            } catch (Exception e) {
-                return null;
-            }
-        });
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipServiceTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipServiceTest.java
deleted file mode 100644 (file)
index b130134..0000000
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_ID_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_QNAME;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.candidatePath;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithEntityTypeEntry;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityTypeEntryWithEntityEntry;
-
-import akka.actor.ActorRef;
-import com.google.common.collect.Sets;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractDataStore;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.cluster.datastore.config.Configuration;
-import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
-import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipCandidateRegistration;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListenerRegistration;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * Unit tests for DistributedEntityOwnershipService.
- *
- * @author Thomas Pantelis
- */
-public class DistributedEntityOwnershipServiceTest extends AbstractClusterRefEntityOwnershipTest {
-    static final String ENTITY_TYPE = "test";
-    static final String ENTITY_TYPE2 = "test2";
-    static final QName QNAME = QName.create("test", "2015-08-11", "foo");
-    static int ID_COUNTER = 1;
-
-    private final String dataStoreName = "config" + ID_COUNTER++;
-    private AbstractDataStore dataStore;
-
-    @Before
-    public void setUp() {
-        DatastoreContext datastoreContext = DatastoreContext.newBuilder().dataStoreName(dataStoreName)
-                .shardInitializationTimeout(10, TimeUnit.SECONDS).build();
-
-        Configuration configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider()) {
-            @Override
-            public Collection<MemberName> getUniqueMemberNamesForAllShards() {
-                return Sets.newHashSet(MemberName.forName("member-1"));
-            }
-        };
-
-        DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class);
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext();
-        Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString());
-
-        dataStore = new DistributedDataStore(getSystem(), new MockClusterWrapper(), configuration,
-                mockContextFactory, null);
-
-        dataStore.onGlobalContextUpdated(SchemaContextHelper.entityOwners());
-    }
-
-    @After
-    public void tearDown() {
-        dataStore.close();
-    }
-
-    private static <T> T verifyMessage(final DistributedEntityOwnershipService mock, final Class<T> type) {
-        final ArgumentCaptor<T> message = ArgumentCaptor.forClass(type);
-        verify(mock).executeLocalEntityOwnershipShardOperation(message.capture());
-        return message.getValue();
-    }
-
-    @Test
-    public void testEntityOwnershipShardCreated() throws Exception {
-        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
-                EntityOwnerSelectionStrategyConfig.newBuilder().build());
-
-        Future<ActorRef> future = dataStore.getActorUtils().findLocalShardAsync(
-                DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME);
-        ActorRef shardActor = Await.result(future, FiniteDuration.create(10, TimeUnit.SECONDS));
-        assertNotNull(DistributedEntityOwnershipService.ENTITY_OWNERSHIP_SHARD_NAME + " not found", shardActor);
-
-        service.close();
-    }
-
-    @Test
-    public void testRegisterCandidate() throws Exception {
-        DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
-        YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
-        DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
-        verifyRegisterCandidateLocal(service, entity);
-        verifyEntityOwnershipCandidateRegistration(entity, reg);
-        verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE, entityId,
-                dataStore.getActorUtils().getCurrentMemberName().getName());
-
-        // Register the same entity - should throw exception
-
-        try {
-            service.registerCandidate(entity);
-            fail("Expected CandidateAlreadyRegisteredException");
-        } catch (CandidateAlreadyRegisteredException e) {
-            // expected
-            assertEquals("getEntity", entity, e.getEntity());
-        }
-
-        // Register a different entity - should succeed
-        reset(service);
-
-        DOMEntity entity2 = new DOMEntity(ENTITY_TYPE2, entityId);
-        DOMEntityOwnershipCandidateRegistration reg2 = service.registerCandidate(entity2);
-
-        verifyEntityOwnershipCandidateRegistration(entity2, reg2);
-        verifyEntityCandidate(service.getLocalEntityOwnershipShard(), ENTITY_TYPE2, entityId,
-                dataStore.getActorUtils().getCurrentMemberName().getName());
-
-        service.close();
-    }
-
-    @Test
-    public void testCloseCandidateRegistration() throws Exception {
-        DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, YangInstanceIdentifier.of(QNAME));
-
-        DOMEntityOwnershipCandidateRegistration reg = service.registerCandidate(entity);
-
-        verifyEntityOwnershipCandidateRegistration(entity, reg);
-        verifyRegisterCandidateLocal(service, entity);
-
-        reset(service);
-        reg.close();
-        UnregisterCandidateLocal unregCandidate = verifyMessage(service, UnregisterCandidateLocal.class);
-        assertEquals("getEntity", entity, unregCandidate.getEntity());
-
-        // Re-register - should succeed.
-        reset(service);
-        service.registerCandidate(entity);
-        verifyRegisterCandidateLocal(service, entity);
-
-        service.close();
-    }
-
-    @Test
-    public void testListenerRegistration() {
-        DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
-        YangInstanceIdentifier entityId = YangInstanceIdentifier.of(QNAME);
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-        DOMEntityOwnershipListener listener = mock(DOMEntityOwnershipListener.class);
-
-        DOMEntityOwnershipListenerRegistration reg = service.registerListener(entity.getType(), listener);
-
-        assertNotNull("EntityOwnershipListenerRegistration null", reg);
-        assertEquals("getEntityType", entity.getType(), reg.getEntityType());
-        assertEquals("getInstance", listener, reg.getInstance());
-
-        RegisterListenerLocal regListener = verifyMessage(service, RegisterListenerLocal.class);
-        assertSame("getListener", listener, regListener.getListener());
-        assertEquals("getEntityType", entity.getType(), regListener.getEntityType());
-
-        reset(service);
-        reg.close();
-        UnregisterListenerLocal unregListener = verifyMessage(service, UnregisterListenerLocal.class);
-        assertEquals("getEntityType", entity.getType(), unregListener.getEntityType());
-        assertSame("getListener", listener, unregListener.getListener());
-
-        service.close();
-    }
-
-    @Test
-    public void testGetOwnershipState() throws Exception {
-        DistributedEntityOwnershipService service = spy(DistributedEntityOwnershipService.start(
-            dataStore.getActorUtils(), EntityOwnerSelectionStrategyConfig.newBuilder().build()));
-
-        final Shard mockShard = Mockito.mock(Shard.class);
-        ShardDataTree shardDataTree = new ShardDataTree(mockShard, SchemaContextHelper.entityOwners(),
-            TreeType.OPERATIONAL);
-
-        when(service.getLocalEntityOwnershipShardDataTree()).thenReturn(shardDataTree.getDataTree());
-
-        DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, "one");
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, entity1.getIdentifier(), "member-1"),
-                shardDataTree);
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithEntityTypeEntry(entityTypeEntryWithEntityEntry(entity1.getType(),
-                entityEntryWithOwner(entity1.getIdentifier(), "member-1"))), shardDataTree);
-        verifyGetOwnershipState(service, entity1, EntityOwnershipState.IS_OWNER);
-
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE,
-                entity1.getIdentifier(), "member-2"), shardDataTree);
-        writeNode(entityPath(entity1.getType(), entity1.getIdentifier()),
-                entityEntryWithOwner(entity1.getIdentifier(), "member-2"), shardDataTree);
-        verifyGetOwnershipState(service, entity1, EntityOwnershipState.OWNED_BY_OTHER);
-
-        writeNode(entityPath(entity1.getType(), entity1.getIdentifier()), entityEntryWithOwner(entity1.getIdentifier(),
-                ""), shardDataTree);
-        verifyGetOwnershipState(service, entity1, EntityOwnershipState.NO_OWNER);
-
-        DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, "two");
-        Optional<EntityOwnershipState> state = service.getOwnershipState(entity2);
-        assertFalse("getOwnershipState present", state.isPresent());
-
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, entity2.getIdentifier(), "member-1"),
-                shardDataTree);
-        writeNode(entityPath(entity2.getType(), entity2.getIdentifier()), ImmutableNodes.mapEntry(ENTITY_QNAME,
-                ENTITY_ID_QNAME, entity2.getIdentifier()), shardDataTree);
-        verifyGetOwnershipState(service, entity2, EntityOwnershipState.NO_OWNER);
-
-        deleteNode(candidatePath(entityPath(entity2.getType(), entity2.getIdentifier()), "member-1"), shardDataTree);
-        Optional<EntityOwnershipState> state2 = service.getOwnershipState(entity2);
-        assertFalse("getOwnershipState present", state2.isPresent());
-        service.close();
-    }
-
-    @Test
-    public void testIsCandidateRegistered() throws CandidateAlreadyRegisteredException {
-        DistributedEntityOwnershipService service = DistributedEntityOwnershipService.start(dataStore.getActorUtils(),
-                EntityOwnerSelectionStrategyConfig.newBuilder().build());
-
-        final DOMEntity test = new DOMEntity("test-type", "test");
-
-        assertFalse(service.isCandidateRegistered(test));
-
-        service.registerCandidate(test);
-
-        assertTrue(service.isCandidateRegistered(test));
-
-        service.close();
-    }
-
-    private static void verifyGetOwnershipState(final DistributedEntityOwnershipService service, final DOMEntity entity,
-            final EntityOwnershipState expState) {
-        Optional<EntityOwnershipState> state = service.getOwnershipState(entity);
-        assertTrue("getOwnershipState present", state.isPresent());
-        assertEquals("EntityOwnershipState", expState, state.get());
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void verifyEntityCandidate(final ActorRef entityOwnershipShard, final String entityType,
-            final YangInstanceIdentifier entityId, final String candidateName) {
-        verifyEntityCandidate(entityType, entityId, candidateName, path -> {
-            try {
-                return dataStore.newReadOnlyTransaction().read(path).get(5, TimeUnit.SECONDS).get();
-            } catch (Exception e) {
-                return null;
-            }
-        });
-    }
-
-    private static void verifyRegisterCandidateLocal(final DistributedEntityOwnershipService service,
-            final DOMEntity entity) {
-        RegisterCandidateLocal regCandidate = verifyMessage(service, RegisterCandidateLocal.class);
-        assertEquals("getEntity", entity, regCandidate.getEntity());
-    }
-
-    private static void verifyEntityOwnershipCandidateRegistration(final DOMEntity entity,
-            final DOMEntityOwnershipCandidateRegistration reg) {
-        assertNotNull("EntityOwnershipCandidateRegistration null", reg);
-        assertEquals("getInstance", entity, reg.getInstance());
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListenerTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnerChangeListenerTest.java
deleted file mode 100644 (file)
index 8f5e6c1..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.verify;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-/**
- * Unit tests for EntityOwnerChangeListener.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnerChangeListenerTest {
-    private static final String LOCAL_MEMBER_NAME = "member-1";
-    private static final String REMOTE_MEMBER_NAME1 = "member-2";
-    private static final String REMOTE_MEMBER_NAME2 = "member-3";
-    private static final String ENTITY_TYPE = "test";
-    private static final YangInstanceIdentifier ENTITY_ID1 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
-    private static final YangInstanceIdentifier ENTITY_ID2 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-    private static final DOMEntity ENTITY1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-    private static final DOMEntity ENTITY2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
-
-    private final Shard mockShard = Mockito.mock(Shard.class);
-
-    private final ShardDataTree shardDataTree = new ShardDataTree(mockShard, SchemaContextHelper.entityOwners(),
-        TreeType.OPERATIONAL);
-    private final EntityOwnershipListenerSupport mockListenerSupport = mock(EntityOwnershipListenerSupport.class);
-    private EntityOwnerChangeListener listener;
-
-    @Before
-    public void setup() {
-        listener = new EntityOwnerChangeListener(MemberName.forName(LOCAL_MEMBER_NAME), mockListenerSupport);
-        listener.init(shardDataTree);
-    }
-
-    @Test
-    public void testOnDataTreeChanged() throws Exception {
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME));
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, LOCAL_MEMBER_NAME));
-        verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
-                anyBoolean(), anyBoolean());
-
-        // Write local member as owner for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, true, true);
-
-        // Add remote member 1 as candidate for entity 1 - listener support should not get notified
-
-        reset(mockListenerSupport);
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, REMOTE_MEMBER_NAME1));
-        verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
-                anyBoolean(), anyBoolean());
-
-        // Change owner to remote member 1 for entity 1
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME1));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, true, false, true);
-
-        // Change owner to remote member 2 for entity 1
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME2));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, false, true);
-
-        // Clear the owner for entity 1
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, ""));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, false, false);
-
-        // Change owner to the local member for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY1, false, true, true);
-
-        // Change owner to remote member 2 for entity 2
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, REMOTE_MEMBER_NAME1));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, false, false, true);
-
-        // Change owner to the local member for entity 2
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, false, true, true);
-
-        // Write local member owner for entity 2 again - expect no change
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
-        verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
-                anyBoolean(), anyBoolean());
-
-        // Clear the owner for entity 2
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, null));
-        verify(mockListenerSupport).notifyEntityOwnershipListeners(ENTITY2, true, false, false);
-
-        // Clear the owner for entity 2 again - expect no change
-
-        reset(mockListenerSupport);
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, null));
-        verify(mockListenerSupport, never()).notifyEntityOwnershipListeners(any(DOMEntity.class), anyBoolean(),
-                anyBoolean(), anyBoolean());
-    }
-
-    private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
-            throws DataValidationFailedException {
-        AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActorTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActorTest.java
deleted file mode 100644 (file)
index e655f96..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.testkit.TestActorRef;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.mdsal.eos.common.api.EntityOwnershipChangeState;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Unit tests for EntityOwnershipListenerActor.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipListenerActorTest extends AbstractEntityOwnershipTest {
-    private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-
-    @After
-    public void tearDown() {
-        actorFactory.close();
-    }
-
-    @Test
-    public void testOnEntityOwnershipChanged() {
-        DOMEntityOwnershipListener mockListener = mock(DOMEntityOwnershipListener.class);
-
-        TestActorRef<EntityOwnershipListenerActor> listenerActor = actorFactory.createTestActor(
-                EntityOwnershipListenerActor.props(mockListener), actorFactory.generateActorId("listener"));
-
-        DOMEntity entity = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id1")));
-        boolean wasOwner = false;
-        boolean isOwner = true;
-        boolean hasOwner = true;
-        listenerActor.tell(new DOMEntityOwnershipChange(entity, EntityOwnershipChangeState.from(
-                wasOwner, isOwner, hasOwner)), ActorRef.noSender());
-
-        verify(mockListener, timeout(5000)).ownershipChanged(ownershipChange(entity, wasOwner, isOwner, hasOwner));
-    }
-
-    @Test
-    public void testOnEntityOwnershipChangedWithListenerEx() {
-        DOMEntityOwnershipListener mockListener = mock(DOMEntityOwnershipListener.class);
-
-        DOMEntity entity1 = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id1")));
-        doThrow(new RuntimeException("mock")).when(mockListener).ownershipChanged(
-                ownershipChange(entity1, false, true, true));
-        DOMEntity entity2 = new DOMEntity("test", YangInstanceIdentifier.of(QName.create("test", "id2")));
-        doNothing().when(mockListener).ownershipChanged(ownershipChange(entity2, true, false, false));
-
-        TestActorRef<EntityOwnershipListenerActor> listenerActor = actorFactory.createTestActor(
-                EntityOwnershipListenerActor.props(mockListener), actorFactory.generateActorId("listener"));
-
-        listenerActor.tell(new DOMEntityOwnershipChange(entity1, EntityOwnershipChangeState.from(
-                false, true, true)), ActorRef.noSender());
-        listenerActor.tell(new DOMEntityOwnershipChange(entity2, EntityOwnershipChangeState.from(
-                true, false, false)), ActorRef.noSender());
-
-        verify(mockListener, timeout(5000)).ownershipChanged(ownershipChange(entity2, true, false, false));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupportTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerSupportTest.java
deleted file mode 100644 (file)
index b331c99..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorContext;
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.TestActorRef;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import scala.collection.Iterator;
-import scala.collection.immutable.Iterable;
-
-/**
- * Unit tests for EntityOwnershipListenerSupport.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipListenerSupportTest extends AbstractEntityOwnershipTest {
-    private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-    private ActorContext actorContext;
-
-    @Before
-    public void setup() {
-        TestActorRef<DoNothingActor> actor = actorFactory.createTestActor(
-                Props.create(DoNothingActor.class), actorFactory.generateActorId("test"));
-
-        actorContext = actor.underlyingActor().getContext();
-    }
-
-    @After
-    public void tearDown() {
-        actorFactory.close();
-    }
-
-    @Test
-    public void testNotifyEntityOwnershipListeners() {
-        EntityOwnershipListenerSupport support = new EntityOwnershipListenerSupport(actorContext, "test");
-
-        DOMEntityOwnershipListener mockListener1 = mock(DOMEntityOwnershipListener.class, "EntityOwnershipListener1");
-        DOMEntityOwnershipListener mockListener2 = mock(DOMEntityOwnershipListener.class, "EntityOwnershipListener2");
-        DOMEntityOwnershipListener mockListener12 = mock(DOMEntityOwnershipListener.class,
-                "EntityOwnershipListener1_2");
-        String entityType1 = "type1";
-        String entityType2 = "type2";
-        final DOMEntity entity1 = new DOMEntity(entityType1, YangInstanceIdentifier.of(QName.create("test", "id1")));
-        final DOMEntity entity2 = new DOMEntity(entityType2, YangInstanceIdentifier.of(QName.create("test", "id2")));
-        final DOMEntity entity3 = new DOMEntity("noListener", YangInstanceIdentifier.of(QName.create("test", "id5")));
-
-        // Add EntityOwnershipListener registrations.
-
-        support.addEntityOwnershipListener(entityType1, mockListener1);
-        support.addEntityOwnershipListener(entityType1, mockListener1); // register again - should be noop
-        support.addEntityOwnershipListener(entityType1, mockListener12);
-        support.addEntityOwnershipListener(entityType2, mockListener2);
-
-        // Notify entity1 changed and verify appropriate listeners are notified.
-
-        support.notifyEntityOwnershipListeners(entity1, false, true, true);
-
-        verify(mockListener1, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
-        verify(mockListener12, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        assertEquals("# of listener actors", 2, actorContext.children().size());
-        reset(mockListener1, mockListener2, mockListener12);
-
-        // Notify entity2 changed and verify appropriate listeners are notified.
-
-        support.notifyEntityOwnershipListeners(entity2, false, true, true);
-
-        verify(mockListener2, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        assertEquals("# of listener actors", 3, actorContext.children().size());
-        reset(mockListener1, mockListener2, mockListener12);
-
-        // Notify entity3 changed and verify no listeners are notified.
-
-        support.notifyEntityOwnershipListeners(entity3, true, false, true);
-
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        reset(mockListener1, mockListener2, mockListener12);
-
-        Iterable<ActorRef> listenerActors = actorContext.children();
-        assertEquals("# of listener actors", 3, listenerActors.size());
-
-        // Unregister mockListener1, issue a change for entity1 and verify only remaining listeners are notified.
-
-        support.removeEntityOwnershipListener(entityType1, mockListener1);
-        support.notifyEntityOwnershipListeners(entity1, true, false, true);
-
-        verify(mockListener12, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(mockListener1, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        reset(mockListener1, mockListener2, mockListener12);
-
-        // Unregister all listeners and verify their listener actors are destroyed.
-
-        List<TestKit> watchers = new ArrayList<>();
-        for (Iterator<ActorRef> iter = listenerActors.iterator(); iter.hasNext();) {
-            TestKit kit = new TestKit(getSystem());
-            kit.watch(iter.next());
-            watchers.add(kit);
-        }
-
-        support.removeEntityOwnershipListener(entityType1, mockListener12);
-        support.removeEntityOwnershipListener(entityType1, mockListener12); // un-register again - should be noop
-        support.removeEntityOwnershipListener(entityType2, mockListener2);
-
-        Iterator<ActorRef> iter = listenerActors.iterator();
-        for (TestKit kit: watchers) {
-            kit.expectTerminated(kit.duration("3 seconds"), iter.next());
-        }
-
-        assertEquals("# of listener actors", 0, actorContext.children().size());
-
-        // Re-register mockListener1 and verify it is notified.
-
-        reset(mockListener1, mockListener2);
-
-        support.addEntityOwnershipListener(entityType1, mockListener1);
-        support.notifyEntityOwnershipListeners(entity1, false, false, true);
-
-        verify(mockListener1, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
-        verify(mockListener12, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-        verify(mockListener2, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-
-        // Quickly register and unregister mockListener2 - expecting no exceptions.
-
-        support.addEntityOwnershipListener(entityType1, mockListener2);
-        support.removeEntityOwnershipListener(entityType1, mockListener2);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardTest.java
deleted file mode 100644 (file)
index 887336d..0000000
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.AdditionalMatchers.or;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.clearMessages;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectFirstMatching;
-import static org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor.expectMatching;
-
-import akka.actor.ActorRef;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
-import akka.actor.Terminated;
-import akka.dispatch.Dispatchers;
-import akka.testkit.TestActorRef;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.function.Predicate;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.ShardTestKit;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.CandidateAdded;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.RegisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterCandidateLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.messages.UnregisterListenerLocal;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.EntityOwnerSelectionStrategyConfig;
-import org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy.LastCandidateSelectionStrategy;
-import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.BatchedModifications;
-import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
-import org.opendaylight.controller.cluster.datastore.messages.SuccessReply;
-import org.opendaylight.controller.cluster.raft.RaftState;
-import org.opendaylight.controller.cluster.raft.TestActorFactory;
-import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
-import org.opendaylight.controller.cluster.raft.base.messages.TimeoutNow;
-import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
-import org.opendaylight.controller.cluster.raft.messages.RequestVote;
-import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntity;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
-import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Unit tests for EntityOwnershipShard.
- *
- * @author Thomas Pantelis
- */
-public class EntityOwnershipShardTest extends AbstractEntityOwnershipTest {
-    private static final String ENTITY_TYPE = "test type";
-    private static final YangInstanceIdentifier ENTITY_ID1 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
-    private static final YangInstanceIdentifier ENTITY_ID2 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-    private static final YangInstanceIdentifier ENTITY_ID3 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity3"));
-    private static final YangInstanceIdentifier ENTITY_ID4 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity4"));
-    private static final YangInstanceIdentifier ENTITY_ID5 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity5"));
-    private static final SchemaContext SCHEMA_CONTEXT = SchemaContextHelper.entityOwners();
-    private static final String LOCAL_MEMBER_NAME = "local-member-1";
-    private static final String PEER_MEMBER_1_NAME = "peer-member-1";
-    private static final String PEER_MEMBER_2_NAME = "peer-member-2";
-
-    private Builder dataStoreContextBuilder = DatastoreContext.newBuilder().persistent(false);
-    private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
-
-    @After
-    public void tearDown() {
-        actorFactory.close();
-    }
-
-    @Test
-    public void testOnRegisterCandidateLocal() {
-        testLog.info("testOnRegisterCandidateLocal starting");
-
-        ShardTestKit kit = new ShardTestKit(getSystem());
-
-        TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(newLocalShardProps());
-
-        ShardTestKit.waitUntilLeader(shard);
-
-        YangInstanceIdentifier entityId = ENTITY_ID1;
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
-        shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-        verifyOwner(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
-        testLog.info("testOnRegisterCandidateLocal ending");
-    }
-
-    @Test
-    public void testOnRegisterCandidateLocalWithNoInitialLeader() {
-        testLog.info("testOnRegisterCandidateLocalWithNoInitialLeader starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
-        TestEntityOwnershipShard peerShard = peer.underlyingActor();
-        peerShard.startDroppingMessagesOfType(RequestVote.class);
-        peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
-        YangInstanceIdentifier entityId = ENTITY_ID1;
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
-        shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Now allow RequestVotes to the peer so the shard becomes the leader. This should retry the commit.
-        peerShard.stopDroppingMessagesOfType(RequestVote.class);
-
-        verifyCommittedEntityCandidate(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-        verifyOwner(shard, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
-        testLog.info("testOnRegisterCandidateLocalWithNoInitialLeader ending");
-    }
-
-    @Test
-    public void testOnRegisterCandidateLocalWithNoInitialConsensus() {
-        testLog.info("testOnRegisterCandidateLocalWithNoInitialConsensus starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
-                .shardTransactionCommitTimeoutInSeconds(1);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
-        TestEntityOwnershipShard peerShard = peer.underlyingActor();
-        peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
-        // Drop AppendEntries so consensus isn't reached.
-        peerShard.startDroppingMessagesOfType(AppendEntries.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        YangInstanceIdentifier entityId = ENTITY_ID1;
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
-        leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Wait enough time for the commit to timeout.
-        Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
-
-        // Resume AppendEntries - the follower should ack the commit which should then result in the candidate
-        // write being applied to the state.
-        peerShard.stopDroppingMessagesOfType(AppendEntries.class);
-
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
-        testLog.info("testOnRegisterCandidateLocalWithNoInitialConsensus ending");
-    }
-
-    @Test
-    public void testOnRegisterCandidateLocalWithIsolatedLeader() throws Exception {
-        testLog.info("testOnRegisterCandidateLocalWithIsolatedLeader starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
-                .shardIsolatedLeaderCheckIntervalInMillis(50);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
-        TestEntityOwnershipShard peerShard = peer.underlyingActor();
-        peerShard.startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME));
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        // Drop AppendEntries and wait enough time for the shard to switch to IsolatedLeader.
-        peerShard.startDroppingMessagesOfType(AppendEntries.class);
-        verifyRaftState(leader, state ->
-                assertEquals("getRaftState", RaftState.IsolatedLeader.toString(), state.getRaftState()));
-
-        YangInstanceIdentifier entityId = ENTITY_ID1;
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, entityId);
-
-        leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Resume AppendEntries - the candidate write should now be committed.
-        peerShard.stopDroppingMessagesOfType(AppendEntries.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, entityId, LOCAL_MEMBER_NAME);
-
-        testLog.info("testOnRegisterCandidateLocalWithIsolatedLeader ending");
-    }
-
-    @Test
-    public void testOnRegisterCandidateLocalWithRemoteLeader() {
-        testLog.info("testOnRegisterCandidateLocalWithRemoteLeader starting");
-
-        ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2)
-                .shardBatchedModificationCount(5);
-
-        ShardIdentifier leaderId = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier localId = newShardId(LOCAL_MEMBER_NAME);
-        TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(leaderId, peerMap(localId.toString()), PEER_MEMBER_1_NAME),
-                actorFactory.createActor(MessageCollectorActor.props())), leaderId.toString());
-        final TestEntityOwnershipShard leaderShard = leader.underlyingActor();
-
-        TestActorRef<TestEntityOwnershipShard> local = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(localId, peerMap(leaderId.toString()),LOCAL_MEMBER_NAME)), localId.toString());
-        local.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
-        // Test with initial commit timeout and subsequent retry.
-
-        local.tell(dataStoreContextBuilder.shardTransactionCommitTimeoutInSeconds(1).build(), ActorRef.noSender());
-        leaderShard.startDroppingMessagesOfType(BatchedModifications.class);
-
-        local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        expectFirstMatching(leaderShard.collectorActor(), BatchedModifications.class);
-
-        // Send a bunch of registration messages quickly and verify.
-
-        leaderShard.stopDroppingMessagesOfType(BatchedModifications.class);
-        clearMessages(leaderShard.collectorActor());
-
-        int max = 100;
-        List<YangInstanceIdentifier> entityIds = new ArrayList<>();
-        for (int i = 1; i <= max; i++) {
-            YangInstanceIdentifier id = YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "test" + i));
-            entityIds.add(id);
-            local.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, id)), kit.getRef());
-        }
-
-        for (int i = 0; i < max; i++) {
-            verifyCommittedEntityCandidate(local, ENTITY_TYPE, entityIds.get(i), LOCAL_MEMBER_NAME);
-        }
-
-        testLog.info("testOnRegisterCandidateLocalWithRemoteLeader ending");
-    }
-
-    @Test
-    public void testOnUnregisterCandidateLocal() {
-        testLog.info("testOnUnregisterCandidateLocal starting");
-
-        ShardTestKit kit = new ShardTestKit(getSystem());
-        TestActorRef<EntityOwnershipShard> shard = actorFactory.createTestActor(newLocalShardProps());
-        ShardTestKit.waitUntilLeader(shard);
-
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
-        // Register
-
-        shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
-        // Unregister
-
-        shard.tell(new UnregisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, "");
-
-        // Register again
-
-        shard.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(shard, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
-        testLog.info("testOnUnregisterCandidateLocal ending");
-    }
-
-    @Test
-    public void testOwnershipChanges() {
-        testLog.info("testOwnershipChanges starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
-                    peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
-                    peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
-                    leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
-        // Add a remote candidate
-
-        peer1.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Register local
-
-        leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Verify the remote candidate becomes owner
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Add another remote candidate and verify ownership doesn't change
-
-        peer2.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Remove the second remote candidate and verify ownership doesn't change
-
-        peer2.tell(new UnregisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Remove the first remote candidate and verify the local candidate becomes owner
-
-        peer1.tell(new UnregisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        // Add the second remote candidate back and verify ownership doesn't change
-
-        peer2.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        // Unregister the local candidate and verify the second remote candidate becomes owner
-
-        leader.tell(new UnregisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyEntityCandidateRemoved(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        testLog.info("testOwnershipChanges ending");
-    }
-
-    @Test
-    public void testOwnerChangesOnPeerAvailabilityChanges() throws Exception {
-        testLog.info("testOwnerChangesOnPeerAvailabilityChanges starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
-                .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
-                    peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
-                    peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
-                    leaderId.toString());
-
-        verifyRaftState(leader, state ->
-                assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
-        // Send PeerDown and PeerUp with no entities
-
-        leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-        leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
-        // Add candidates for entity1 with the local leader as the owner
-
-        leader.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-
-        peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-
-        // Add candidates for entity2 with peerMember2 as the owner
-
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
-        peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
-        // Add candidates for entity3 with peerMember2 as the owner.
-
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-
-        leader.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-
-        peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-
-        // Add only candidate peerMember2 for entity4.
-
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID4)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
-        // Add only candidate peerMember1 for entity5.
-
-        peer1.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID5)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID5, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID5, PEER_MEMBER_1_NAME);
-
-        // Kill peerMember2 and send PeerDown - the entities (2, 3, 4) owned by peerMember2 should get a new
-        // owner selected
-
-        kit.watch(peer2);
-        peer2.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
-        kit.unwatch(peer2);
-
-        leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-        // Send PeerDown again - should be noop
-        leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-        peer1.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        // no other candidates for entity4 so peerMember2 should remain owner.
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
-        // Reinstate peerMember2
-
-        peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
-                    peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-        leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-        // Send PeerUp again - should be noop
-        leader.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-        peer1.tell(new PeerUp(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
-        // peerMember2's candidates should be removed on startup.
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID4, PEER_MEMBER_2_NAME);
-
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, "");
-
-        // Add back candidate peerMember2 for entities 1, 2, & 3.
-
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID1)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID2)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        peer2.tell(new RegisterCandidateLocal(new DOMEntity(ENTITY_TYPE, ENTITY_ID3)), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyCommittedEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
-        // Kill peerMember1 and send PeerDown - entity 2 should get a new owner selected
-
-        kit.watch(peer1);
-        peer1.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
-        kit.unwatch(peer1);
-        leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-
-        // Verify the reinstated peerMember2 is fully synced.
-
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
-        // Reinstate peerMember1 and verify no owner changes
-
-        peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(newShardBuilder(
-                peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)), peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-        leader.tell(new PeerUp(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(leader, ENTITY_TYPE, ENTITY_ID4, "");
-
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyNoEntityCandidate(leader, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
-
-        verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_1_NAME);
-        verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_1_NAME);
-        verifyNoEntityCandidate(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_1_NAME);
-
-        // Verify the reinstated peerMember1 is fully synced.
-
-        verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME);
-        verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID3, LOCAL_MEMBER_NAME);
-        verifyOwner(peer1, ENTITY_TYPE, ENTITY_ID4, "");
-
-        AtomicLong leaderLastApplied = new AtomicLong();
-        verifyRaftState(leader, rs -> {
-            assertEquals("LastApplied up-to-date", rs.getLastApplied(), rs.getLastIndex());
-            leaderLastApplied.set(rs.getLastApplied());
-        });
-
-        verifyRaftState(peer2, rs -> assertEquals("LastApplied", leaderLastApplied.get(), rs.getLastIndex()));
-
-        // Kill the local leader and elect peer2 the leader. This should cause a new owner to be selected for
-        // the entities (1 and 3) previously owned by the local leader member.
-
-        peer2.tell(new PeerAddressResolved(peerId1.toString(), peer1.path().toString()), ActorRef.noSender());
-        peer2.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-        peer2.tell(new PeerUp(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-
-        kit.watch(leader);
-        leader.tell(PoisonPill.getInstance(), ActorRef.noSender());
-        kit.expectMsgClass(Duration.ofSeconds(5), Terminated.class);
-        kit.unwatch(leader);
-        peer2.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-        peer2.tell(TimeoutNow.INSTANCE, peer2);
-
-        verifyRaftState(peer2, state ->
-                assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID1, PEER_MEMBER_2_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID2, PEER_MEMBER_2_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID3, PEER_MEMBER_2_NAME);
-        verifyOwner(peer2, ENTITY_TYPE, ENTITY_ID4, "");
-
-        testLog.info("testOwnerChangesOnPeerAvailabilityChanges ending");
-    }
-
-    @Test
-    public void testLeaderIsolation() throws Exception {
-        testLog.info("testLeaderIsolation starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
-                .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
-        TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
-                    peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
-                    peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        dataStoreContextBuilder = DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
-                .shardIsolatedLeaderCheckIntervalInMillis(500);
-
-        TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME)),
-                    leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        // Add entity1 candidates for all members with the leader as the owner
-
-        DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-        leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        peer1.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        peer2.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(peer1, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(peer2, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        // Add entity2 candidates for all members with peer1 as the owner
-
-        DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
-        peer1.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        peer2.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        verifyOwner(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyOwner(peer2, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Add entity3 candidates for all members with peer2 as the owner
-
-        DOMEntity entity3 = new DOMEntity(ENTITY_TYPE, ENTITY_ID3);
-        peer2.tell(new RegisterCandidateLocal(entity3), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        leader.tell(new RegisterCandidateLocal(entity3), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        peer1.tell(new RegisterCandidateLocal(entity3), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        verifyOwner(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-        verifyOwner(peer1, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-        verifyOwner(peer2, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        // Add listeners on all members
-
-        DOMEntityOwnershipListener leaderListener = mock(DOMEntityOwnershipListener.class);
-        leader.tell(new RegisterListenerLocal(leaderListener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
-                ownershipChange(entity1, false, true, true), ownershipChange(entity2, false, false, true)),
-                ownershipChange(entity3, false, false, true)));
-        reset(leaderListener);
-
-        DOMEntityOwnershipListener peer1Listener = mock(DOMEntityOwnershipListener.class);
-        peer1.tell(new RegisterListenerLocal(peer1Listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verify(peer1Listener, timeout(5000).times(3)).ownershipChanged(or(or(
-                ownershipChange(entity1, false, false, true), ownershipChange(entity2, false, true, true)),
-                ownershipChange(entity3, false, false, true)));
-        reset(peer1Listener);
-
-        DOMEntityOwnershipListener peer2Listener = mock(DOMEntityOwnershipListener.class);
-        peer2.tell(new RegisterListenerLocal(peer2Listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verify(peer2Listener, timeout(5000).times(3)).ownershipChanged(or(or(
-                ownershipChange(entity1, false, false, true), ownershipChange(entity2, false, false, true)),
-                ownershipChange(entity3, false, true, true)));
-        reset(peer2Listener);
-
-        // Isolate the leader by dropping AppendEntries to the followers and incoming messages from the followers.
-
-        leader.underlyingActor().startDroppingMessagesOfType(RequestVote.class);
-        leader.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
-        peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class,
-            ae -> ae.getLeaderId().equals(leaderId.toString()));
-        peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
-        // Make peer1 start an election and become leader by enabling the ElectionTimeout message.
-
-        peer1.underlyingActor().stopDroppingMessagesOfType(ElectionTimeout.class);
-
-        // Send PeerDown to the isolated leader so it tries to re-assign ownership for the entities owned by the
-        // isolated peers.
-
-        leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-        leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
-        verifyRaftState(leader, state ->
-                assertEquals("getRaftState", RaftState.IsolatedLeader.toString(), state.getRaftState()));
-
-        // Expect inJeopardy notification on the isolated leader.
-
-        verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
-                ownershipChange(entity1, true, true, true, true), ownershipChange(entity2, false, false, true, true)),
-                ownershipChange(entity3, false, false, true, true)));
-        reset(leaderListener);
-
-        verifyRaftState(peer1, state ->
-                assertEquals("getRaftState", RaftState.Leader.toString(), state.getRaftState()));
-
-        // Send PeerDown to the new leader peer1 so it re-assigns ownership for the entities owned by the
-        // isolated leader.
-
-        peer1.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
-        verifyOwner(peer1, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
-        reset(peer1Listener);
-
-        verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
-        reset(peer2Listener);
-
-        // Remove the isolation.
-
-        leader.underlyingActor().stopDroppingMessagesOfType(RequestVote.class);
-        leader.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-        peer2.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-        peer1.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-
-        // Previous leader should switch to Follower and send inJeopardy cleared notifications for all entities.
-
-        verifyRaftState(leader, state ->
-                assertEquals("getRaftState", RaftState.Follower.toString(), state.getRaftState()));
-
-        verify(leaderListener, timeout(5000).times(3)).ownershipChanged(or(or(
-                ownershipChange(entity1, true, true, true), ownershipChange(entity2, false, false, true)),
-                ownershipChange(entity3, false, false, true)));
-
-        verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-        verify(leaderListener, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(leader, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyOwner(leader, entity3.getType(), entity3.getIdentifier(), PEER_MEMBER_2_NAME);
-
-        verifyNoMoreInteractions(leaderListener);
-        verifyNoMoreInteractions(peer1Listener);
-        verifyNoMoreInteractions(peer2Listener);
-
-        testLog.info("testLeaderIsolation ending");
-    }
-
-    @Test
-    public void testLeaderIsolationWithPendingCandidateAdded() throws Exception {
-        testLog.info("testLeaderIsolationWithPendingCandidateAdded starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(4)
-                .shardIsolatedLeaderCheckIntervalInMillis(100000);
-
-        TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME),
-                actorFactory.createActor(MessageCollectorActor.props())), peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME),
-                actorFactory.createTestActor(MessageCollectorActor.props())), peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        dataStoreContextBuilder = DatastoreContext.newBuilderFrom(dataStoreContextBuilder.build())
-                .shardIsolatedLeaderCheckIntervalInMillis(500);
-
-        TestActorRef<TestEntityOwnershipShard> leader = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME),
-                actorFactory.createTestActor(MessageCollectorActor.props())), leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        // Add listeners on all members
-
-        DOMEntityOwnershipListener leaderListener = mock(DOMEntityOwnershipListener.class,
-                "DOMEntityOwnershipListener-" + LOCAL_MEMBER_NAME);
-        leader.tell(new RegisterListenerLocal(leaderListener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        DOMEntityOwnershipListener peer1Listener = mock(DOMEntityOwnershipListener.class,
-                "DOMEntityOwnershipListener-" + PEER_MEMBER_1_NAME);
-        peer1.tell(new RegisterListenerLocal(peer1Listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        DOMEntityOwnershipListener peer2Listener = mock(DOMEntityOwnershipListener.class,
-                "DOMEntityOwnershipListener-" + PEER_MEMBER_2_NAME);
-        peer2.tell(new RegisterListenerLocal(peer2Listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Drop the CandidateAdded message to the leader for now.
-
-        leader.underlyingActor().startDroppingMessagesOfType(CandidateAdded.class);
-
-        // Add an entity candidates for the leader. Since we've blocked the CandidateAdded message, it won't be
-        // assigned the owner.
-
-        DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-        leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyCommittedEntityCandidate(peer1, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyCommittedEntityCandidate(peer2, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
-        leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyCommittedEntityCandidate(peer1, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyCommittedEntityCandidate(peer2, entity2.getType(), entity2.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        // Capture the CandidateAdded messages.
-
-        final List<CandidateAdded> candidateAdded = expectMatching(leader.underlyingActor().collectorActor(),
-                CandidateAdded.class, 2);
-
-        // Drop AppendEntries to the followers containing a log entry, which will be for the owner writes after we
-        // forward the CandidateAdded messages to the leader. This will leave the pending owner write tx's uncommitted.
-
-        peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class, ae -> ae.getEntries().size() > 0);
-        peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class, ae -> ae.getEntries().size() > 0);
-
-        // Now forward the CandidateAdded messages to the leader and wait for it to send out the AppendEntries.
-
-        leader.underlyingActor().stopDroppingMessagesOfType(CandidateAdded.class);
-        leader.tell(candidateAdded.get(0), leader);
-        leader.tell(candidateAdded.get(1), leader);
-
-        expectMatching(peer1.underlyingActor().collectorActor(), AppendEntries.class, 2,
-            ae -> ae.getEntries().size() > 0);
-
-        // Verify no owner assigned.
-
-        verifyNoOwnerSet(leader, entity1.getType(), entity1.getIdentifier());
-        verifyNoOwnerSet(leader, entity2.getType(), entity2.getIdentifier());
-
-        // Isolate the leader by dropping AppendEntries to the followers and incoming messages from the followers.
-
-        leader.underlyingActor().startDroppingMessagesOfType(RequestVote.class);
-        leader.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
-        peer2.underlyingActor().startDroppingMessagesOfType(AppendEntries.class,
-            ae -> ae.getLeaderId().equals(leaderId.toString()));
-        peer1.underlyingActor().startDroppingMessagesOfType(AppendEntries.class);
-
-        // Send PeerDown to the isolated leader - should be no-op since there's no owned entities.
-
-        leader.tell(new PeerDown(peerId1.getMemberName(), peerId1.toString()), ActorRef.noSender());
-        leader.tell(new PeerDown(peerId2.getMemberName(), peerId2.toString()), ActorRef.noSender());
-
-        // Verify the leader transitions to IsolatedLeader.
-
-        verifyRaftState(leader, state -> assertEquals("getRaftState", RaftState.IsolatedLeader.toString(),
-                state.getRaftState()));
-
-        // Send PeerDown to the new leader peer1.
-
-        peer1.tell(new PeerDown(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
-        // Make peer1 start an election and become leader by sending the TimeoutNow message.
-
-        peer1.tell(TimeoutNow.INSTANCE, ActorRef.noSender());
-
-        // Verify the peer1 transitions to Leader.
-
-        verifyRaftState(peer1, state -> assertEquals("getRaftState", RaftState.Leader.toString(),
-                state.getRaftState()));
-
-        verifyNoOwnerSet(peer1, entity1.getType(), entity1.getIdentifier());
-        verifyNoOwnerSet(peer2, entity1.getType(), entity2.getIdentifier());
-
-        verifyNoMoreInteractions(peer1Listener);
-        verifyNoMoreInteractions(peer2Listener);
-
-        // Add candidate peer1 candidate for entity2.
-
-        peer1.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-
-        verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-        verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
-        verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, false, true));
-
-        reset(leaderListener, peer1Listener, peer2Listener);
-
-        // Remove the isolation.
-
-        leader.underlyingActor().stopDroppingMessagesOfType(RequestVote.class);
-        leader.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-        peer2.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-        peer1.underlyingActor().stopDroppingMessagesOfType(AppendEntries.class);
-
-        // Previous leader should switch to Follower.
-
-        verifyRaftState(leader, state -> assertEquals("getRaftState", RaftState.Follower.toString(),
-                state.getRaftState()));
-
-        // Send PeerUp to peer1 and peer2.
-
-        peer1.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-        peer2.tell(new PeerUp(leaderId.getMemberName(), leaderId.toString()), ActorRef.noSender());
-
-        // The previous leader should become the owner of entity1.
-
-        verifyOwner(leader, entity1.getType(), entity1.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        // The previous leader's DOMEntityOwnershipListener should get 4 total notifications:
-        //     - inJeopardy cleared for entity1 (wasOwner=false, isOwner=false, hasOwner=false, inJeopardy=false)
-        //     - inJeopardy cleared for entity2 (wasOwner=false, isOwner=false, hasOwner=false, inJeopardy=false)
-        //     - local owner granted for entity1 (wasOwner=false, isOwner=true, hasOwner=true, inJeopardy=false)
-        //     - remote owner for entity2 (wasOwner=false, isOwner=false, hasOwner=true, inJeopardy=false)
-        verify(leaderListener, timeout(5000).times(4)).ownershipChanged(or(
-                or(ownershipChange(entity1, false, false, false), ownershipChange(entity2, false, false, false)),
-                or(ownershipChange(entity1, false, true, true), ownershipChange(entity2, false, false, true))));
-
-        verify(peer1Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
-        verify(peer2Listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, false, true));
-
-        // Verify entity2's owner doesn't change.
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verifyOwner(peer1, entity2.getType(), entity2.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        verifyNoMoreInteractions(leaderListener);
-        verifyNoMoreInteractions(peer1Listener);
-        verifyNoMoreInteractions(peer2Listener);
-
-        testLog.info("testLeaderIsolationWithPendingCandidateAdded ending");
-    }
-
-    @Test
-    public void testListenerRegistration() {
-        testLog.info("testListenerRegistration starting");
-
-        ShardTestKit kit = new ShardTestKit(getSystem());
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
-        peer.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME), leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        String otherEntityType = "otherEntityType";
-        final DOMEntity entity1 = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-        final DOMEntity entity2 = new DOMEntity(ENTITY_TYPE, ENTITY_ID2);
-        final DOMEntity entity3 = new DOMEntity(ENTITY_TYPE, ENTITY_ID3);
-        final DOMEntity entity4 = new DOMEntity(otherEntityType, ENTITY_ID3);
-        DOMEntityOwnershipListener listener = mock(DOMEntityOwnershipListener.class);
-
-        // Register listener
-
-        leader.tell(new RegisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Register a couple candidates for the desired entity type and verify listener is notified.
-
-        leader.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, false, true, true));
-
-        leader.tell(new RegisterCandidateLocal(entity2), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity2, false, true, true));
-        reset(listener);
-
-        // Register another candidate for another entity type and verify listener is not notified.
-
-        leader.tell(new RegisterCandidateLocal(entity4), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verify(listener, never()).ownershipChanged(ownershipChange(entity4));
-
-        // Register remote candidate for entity1
-
-        peer.tell(new RegisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-        verifyCommittedEntityCandidate(leader, ENTITY_TYPE, entity1.getIdentifier(), PEER_MEMBER_1_NAME);
-
-        // Unregister the local candidate for entity1 and verify listener is notified
-
-        leader.tell(new UnregisterCandidateLocal(entity1), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verify(listener, timeout(5000)).ownershipChanged(ownershipChange(entity1, true, false, true));
-        reset(listener);
-
-        // Unregister the listener, add a candidate for entity3 and verify listener isn't notified
-
-        leader.tell(new UnregisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        leader.tell(new RegisterCandidateLocal(entity3), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verifyOwner(leader, ENTITY_TYPE, entity3.getIdentifier(), LOCAL_MEMBER_NAME);
-        Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-        verify(listener, never()).ownershipChanged(any(DOMEntityOwnershipChange.class));
-
-        // Re-register the listener and verify it gets notified of currently owned entities
-
-        reset(listener);
-
-        leader.tell(new RegisterListenerLocal(listener, ENTITY_TYPE), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        verify(listener, timeout(5000).times(2)).ownershipChanged(or(ownershipChange(entity2, false, true, true),
-                ownershipChange(entity3, false, true, true)));
-        Uninterruptibles.sleepUninterruptibly(300, TimeUnit.MILLISECONDS);
-        verify(listener, never()).ownershipChanged(ownershipChange(entity4));
-        verify(listener, times(1)).ownershipChanged(ownershipChange(entity1));
-
-        testLog.info("testListenerRegistration ending");
-    }
-
-    @Test
-    public void testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived() {
-        testLog.info("testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived starting");
-
-        ShardTestKit kit = new ShardTestKit(getSystem());
-        EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder()
-                .addStrategy(ENTITY_TYPE, LastCandidateSelectionStrategy.class, 500);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId = newShardId(PEER_MEMBER_1_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId, peerMap(leaderId.toString()), PEER_MEMBER_1_NAME)), peerId.toString());
-        peer.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId.toString()), LOCAL_MEMBER_NAME, builder.build()),
-                leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
-        // Add a remote candidate
-
-        peer.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Register local
-
-        leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Verify the local candidate becomes owner
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        testLog.info("testDelayedEntityOwnerSelectionWhenMaxPeerRequestsReceived ending");
-    }
-
-    @Test
-    public void testDelayedEntityOwnerSelection() {
-        testLog.info("testDelayedEntityOwnerSelection starting");
-
-        final ShardTestKit kit = new ShardTestKit(getSystem());
-        EntityOwnerSelectionStrategyConfig.Builder builder = EntityOwnerSelectionStrategyConfig.newBuilder()
-                .addStrategy(ENTITY_TYPE, LastCandidateSelectionStrategy.class, 500);
-
-        dataStoreContextBuilder.shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(2);
-
-        ShardIdentifier leaderId = newShardId(LOCAL_MEMBER_NAME);
-        ShardIdentifier peerId1 = newShardId(PEER_MEMBER_1_NAME);
-        ShardIdentifier peerId2 = newShardId(PEER_MEMBER_2_NAME);
-
-        TestActorRef<TestEntityOwnershipShard> peer1 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId1, peerMap(leaderId.toString(), peerId2.toString()), PEER_MEMBER_1_NAME)),
-                    peerId1.toString());
-        peer1.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<TestEntityOwnershipShard> peer2 = actorFactory.createTestActor(TestEntityOwnershipShard.props(
-                newShardBuilder(peerId2, peerMap(leaderId.toString(), peerId1.toString()), PEER_MEMBER_2_NAME)),
-                    peerId2.toString());
-        peer2.underlyingActor().startDroppingMessagesOfType(ElectionTimeout.class);
-
-        TestActorRef<EntityOwnershipShard> leader = actorFactory.createTestActor(
-                newShardProps(leaderId, peerMap(peerId1.toString(), peerId2.toString()), LOCAL_MEMBER_NAME,
-                        builder.build()), leaderId.toString());
-
-        ShardTestKit.waitUntilLeader(leader);
-
-        DOMEntity entity = new DOMEntity(ENTITY_TYPE, ENTITY_ID1);
-
-        // Add a remote candidate
-
-        peer1.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Register local
-
-        leader.tell(new RegisterCandidateLocal(entity), kit.getRef());
-        kit.expectMsgClass(SuccessReply.class);
-
-        // Verify the local candidate becomes owner
-
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), PEER_MEMBER_1_NAME);
-        verifyCommittedEntityCandidate(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-        verifyOwner(leader, entity.getType(), entity.getIdentifier(), LOCAL_MEMBER_NAME);
-
-        testLog.info("testDelayedEntityOwnerSelection ending");
-    }
-
-    private Props newLocalShardProps() {
-        return newShardProps(newShardId(LOCAL_MEMBER_NAME), Collections.<String,String>emptyMap(), LOCAL_MEMBER_NAME);
-    }
-
-    private Props newShardProps(final ShardIdentifier shardId, final Map<String,String> peers,
-            final String memberName) {
-        return newShardProps(shardId, peers, memberName, EntityOwnerSelectionStrategyConfig.newBuilder().build());
-    }
-
-    private Props newShardProps(final ShardIdentifier shardId, final Map<String,String> peers, final String memberName,
-                                final EntityOwnerSelectionStrategyConfig config) {
-        return newShardBuilder(shardId, peers, memberName).ownerSelectionStrategyConfig(config).props()
-                    .withDispatcher(Dispatchers.DefaultDispatcherId());
-    }
-
-    private EntityOwnershipShard.Builder newShardBuilder(final ShardIdentifier shardId, final Map<String, String> peers,
-            final String memberName) {
-        return EntityOwnershipShard.newBuilder().id(shardId).peerAddresses(peers).datastoreContext(
-                dataStoreContextBuilder.build()).schemaContextProvider(() -> SCHEMA_CONTEXT).localMemberName(
-                        MemberName.forName(memberName)).ownerSelectionStrategyConfig(
-                                EntityOwnerSelectionStrategyConfig.newBuilder().build());
-    }
-
-    private Map<String, String> peerMap(final String... peerIds) {
-        ImmutableMap.Builder<String, String> builder = ImmutableMap.<String, String>builder();
-        for (String peerId: peerIds) {
-            builder.put(peerId, actorFactory.createTestActorPath(peerId)).build();
-        }
-
-        return builder.build();
-    }
-
-    private static class TestEntityOwnershipShard extends EntityOwnershipShard {
-        private final ActorRef collectorActor;
-        private final Map<Class<?>, Predicate<?>> dropMessagesOfType = new ConcurrentHashMap<>();
-
-        TestEntityOwnershipShard(final Builder builder, final ActorRef collectorActor) {
-            super(builder);
-            this.collectorActor = collectorActor;
-        }
-
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        @Override
-        public void handleCommand(final Object message) {
-            Predicate drop = dropMessagesOfType.get(message.getClass());
-            if (drop == null || !drop.test(message)) {
-                super.handleCommand(message);
-            }
-
-            if (collectorActor != null) {
-                collectorActor.tell(message, ActorRef.noSender());
-            }
-        }
-
-        void startDroppingMessagesOfType(final Class<?> msgClass) {
-            dropMessagesOfType.put(msgClass, msg -> true);
-        }
-
-        <T> void startDroppingMessagesOfType(final Class<T> msgClass, final Predicate<T> filter) {
-            dropMessagesOfType.put(msgClass, filter);
-        }
-
-        void stopDroppingMessagesOfType(final Class<?> msgClass) {
-            dropMessagesOfType.remove(msgClass);
-        }
-
-        ActorRef collectorActor() {
-            return collectorActor;
-        }
-
-        static Props props(final Builder builder) {
-            return props(builder, null);
-        }
-
-        static Props props(final Builder builder, final ActorRef collectorActor) {
-            return Props.create(TestEntityOwnershipShard.class, builder, collectorActor)
-                    .withDispatcher(Dispatchers.DefaultDispatcherId());
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatisticsTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipStatisticsTest.java
deleted file mode 100644 (file)
index 652c527..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership;
-
-import static org.junit.Assert.assertEquals;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.ENTITY_OWNERS_PATH;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityEntryWithOwner;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityOwnersWithCandidate;
-import static org.opendaylight.controller.cluster.datastore.entityownership.EntityOwnersModel.entityPath;
-
-import java.util.Map;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.Shard;
-import org.opendaylight.controller.cluster.datastore.ShardDataTree;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
-
-public class EntityOwnershipStatisticsTest extends AbstractActorTest {
-    private static final String LOCAL_MEMBER_NAME = "member-1";
-    private static final String REMOTE_MEMBER_NAME1 = "member-2";
-    private static final String REMOTE_MEMBER_NAME2 = "member-3";
-    private static final String ENTITY_TYPE = "test";
-    private static final YangInstanceIdentifier ENTITY_ID1 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity1"));
-    private static final YangInstanceIdentifier ENTITY_ID2 =
-            YangInstanceIdentifier.of(QName.create("test", "2015-08-14", "entity2"));
-
-    private final Shard mockShard = Mockito.mock(Shard.class);
-
-    private final ShardDataTree shardDataTree = new ShardDataTree(mockShard, SchemaContextHelper.entityOwners(),
-        TreeType.OPERATIONAL);
-    private EntityOwnershipStatistics ownershipStatistics;
-
-    @Before
-    public void setup() {
-        ownershipStatistics = new EntityOwnershipStatistics();
-        ownershipStatistics.init(shardDataTree);
-    }
-
-    @Test
-    public void testOnDataTreeChanged() throws Exception {
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, LOCAL_MEMBER_NAME));
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID2, LOCAL_MEMBER_NAME));
-
-        // Write local member as owner for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
-        assertStatistics(ownershipStatistics.all(), LOCAL_MEMBER_NAME, 1L);
-
-        // Add remote member 1 as candidate for entity 1 - ownershipStatistics support should not get notified
-
-        writeNode(ENTITY_OWNERS_PATH, entityOwnersWithCandidate(ENTITY_TYPE, ENTITY_ID1, REMOTE_MEMBER_NAME1));
-        assertStatistics(ownershipStatistics.all(), LOCAL_MEMBER_NAME, 1L);
-
-        // Change owner to remote member 1 for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME1));
-        Map<String, Map<String, Long>> statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 1L);
-
-        // Change owner to remote member 2 for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, REMOTE_MEMBER_NAME2));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 1L);
-
-        // Clear the owner for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, ""));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Change owner to the local member for entity 1
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID1), entityEntryWithOwner(ENTITY_ID1, LOCAL_MEMBER_NAME));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Change owner to remote member 1 for entity 2
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, REMOTE_MEMBER_NAME1));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 1L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Change owner to the local member for entity 2
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 2L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Write local member owner for entity 2 again - expect no change
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, LOCAL_MEMBER_NAME));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 2L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Clear the owner for entity 2
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, ""));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-        // Clear the owner for entity 2 again - expect no change
-
-        writeNode(entityPath(ENTITY_TYPE, ENTITY_ID2), entityEntryWithOwner(ENTITY_ID2, ""));
-        statistics = ownershipStatistics.all();
-        assertStatistics(statistics, LOCAL_MEMBER_NAME, 1L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME1, 0L);
-        assertStatistics(statistics, REMOTE_MEMBER_NAME2, 0L);
-
-    }
-
-    private static void assertStatistics(final Map<String, Map<String, Long>> statistics, final String memberName,
-            final long val) {
-        assertEquals(val, statistics.get(ENTITY_TYPE).get(memberName).longValue());
-    }
-
-    private void writeNode(final YangInstanceIdentifier path, final NormalizedNode<?, ?> node)
-            throws DataValidationFailedException {
-        AbstractEntityOwnershipTest.writeNode(path, node, shardDataTree);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReaderTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/EntityOwnerSelectionStrategyConfigReaderTest.java
deleted file mode 100644 (file)
index 1f04e5f..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.MockitoAnnotations;
-
-public class EntityOwnerSelectionStrategyConfigReaderTest {
-
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @Test
-    public void testReadStrategies() {
-        final Map<Object, Object> props = new java.util.HashMap<>();
-        props.put("entity.type.test", "org.opendaylight.controller.cluster.datastore.entityownership."
-                + "selectionstrategy.LastCandidateSelectionStrategy,100");
-
-
-        final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
-                .loadStrategyWithConfig(props);
-
-        assertTrue(config.isStrategyConfigured("test"));
-
-        final EntityOwnerSelectionStrategy strategy = config.createStrategy("test",
-                Collections.<String, Long>emptyMap());
-        assertTrue(strategy.toString(), strategy instanceof LastCandidateSelectionStrategy);
-        assertEquals(100L, strategy.getSelectionDelayInMillis());
-
-        final EntityOwnerSelectionStrategy strategy1 = config.createStrategy("test", Collections.emptyMap());
-        assertEquals(strategy, strategy1);
-
-        config.clearStrategies();
-
-        final EntityOwnerSelectionStrategy strategy2 = config.createStrategy("test", Collections.emptyMap());
-        assertNotEquals(strategy1, strategy2);
-    }
-
-    @Test
-    public void testReadStrategiesWithEmptyConfiguration() {
-
-        final Map<Object, Object> props = new HashMap<>();
-        final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
-                .loadStrategyWithConfig(props);
-
-        assertFalse(config.isStrategyConfigured("test"));
-    }
-
-    @Test
-    public void testReadStrategiesWithNullConfiguration() {
-        final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
-                .loadStrategyWithConfig(null);
-        assertFalse(config.isStrategyConfigured("test"));
-    }
-
-    @Test(expected = IllegalArgumentException.class)
-    public void testReadStrategiesInvalidDelay() {
-        final Map<Object, Object> props = new HashMap<>();
-        props.put("entity.type.test", "org.opendaylight.controller.cluster.datastore.entityownership."
-                + "selectionstrategy.LastCandidateSelectionStrategy,foo");
-        EntityOwnerSelectionStrategyConfigReader.loadStrategyWithConfig(props);
-    }
-
-    @Test(expected = IllegalArgumentException.class)
-    public void testReadStrategiesInvalidClassType() {
-        final Map<Object, Object> props = new HashMap<>();
-        props.put("entity.type.test", "String,100");
-        EntityOwnerSelectionStrategyConfigReader.loadStrategyWithConfig(props);
-    }
-
-    @Test
-    public void testReadStrategiesMissingDelay() {
-        final Map<Object, Object> props = new HashMap<>();
-        props.put("entity.type.test", "org.opendaylight.controller.cluster.datastore.entityownership."
-                + "selectionstrategy.LastCandidateSelectionStrategy,100");
-        props.put("entity.type.test1", "org.opendaylight.controller.cluster.datastore.entityownership."
-                + "selectionstrategy.LastCandidateSelectionStrategy");
-
-
-        final EntityOwnerSelectionStrategyConfig config = EntityOwnerSelectionStrategyConfigReader
-                .loadStrategyWithConfig(props);
-
-        assertEquals(100, config.createStrategy("test", Collections.emptyMap()).getSelectionDelayInMillis());
-        assertEquals(0, config.createStrategy("test2", Collections.emptyMap()).getSelectionDelayInMillis());
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LastCandidateSelectionStrategy.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LastCandidateSelectionStrategy.java
deleted file mode 100644 (file)
index 3c22cda..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import com.google.common.collect.Iterables;
-import java.util.Collection;
-import java.util.Map;
-
-public class LastCandidateSelectionStrategy extends AbstractEntityOwnerSelectionStrategy {
-    public LastCandidateSelectionStrategy(long selectionDelayInMillis, Map<String, Long> initialStatistics) {
-        super(selectionDelayInMillis, initialStatistics);
-    }
-
-    @Override
-    public String newOwner(String currentOwner, Collection<String> viableCandidates) {
-        return Iterables.getLast(viableCandidates);
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategyTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/entityownership/selectionstrategy/LeastLoadedCandidateSelectionStrategyTest.java
deleted file mode 100644 (file)
index a7f1657..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.datastore.entityownership.selectionstrategy;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import org.junit.Test;
-
-public class LeastLoadedCandidateSelectionStrategyTest {
-
-    @Test
-    public void testLeastLoadedStrategy() {
-        LeastLoadedCandidateSelectionStrategy strategy = new LeastLoadedCandidateSelectionStrategy(
-                0L, Collections.<String, Long>emptyMap());
-
-        String owner = strategy.newOwner(null, prepareViableCandidates(3));
-        assertEquals("member-1", owner);
-
-        Map<String, Long> localStatistics = strategy.getLocalStatistics();
-        assertEquals(1L, (long) localStatistics.get("member-1"));
-
-        // member-2 has least load
-        strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(5,2,4));
-        owner = strategy.newOwner(null, prepareViableCandidates(3));
-        assertEquals("member-2", owner);
-
-        assertStatistics(strategy.getLocalStatistics(), 5,3,4);
-
-        // member-3 has least load
-        strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(5,7,4));
-        owner = strategy.newOwner(null, prepareViableCandidates(3));
-        assertEquals("member-3", owner);
-
-        assertStatistics(strategy.getLocalStatistics(), 5,7,5);
-
-        // member-1 has least load
-        strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(1,7,4));
-        owner = strategy.newOwner(null, prepareViableCandidates(3));
-        assertEquals("member-1", owner);
-
-        assertStatistics(strategy.getLocalStatistics(), 2,7,4);
-
-        // Let member-3 become the owner
-        strategy = new LeastLoadedCandidateSelectionStrategy(0L, prepareStatistics(3,3,0));
-        owner = strategy.newOwner(null, prepareViableCandidates(3));
-        assertEquals("member-3", owner);
-
-        assertStatistics(strategy.getLocalStatistics(), 3,3,1);
-
-        // member-3 is no longer viable so choose a new owner
-        owner = strategy.newOwner("member-3", prepareViableCandidates(2));
-        assertEquals("member-1", owner);
-
-        assertStatistics(strategy.getLocalStatistics(), 4,3,0);
-
-    }
-
-    private static Map<String, Long> prepareStatistics(long... count) {
-        Map<String, Long> statistics = new HashMap<>();
-        for (int i = 0; i < count.length; i++) {
-            statistics.put("member-" + (i + 1), count[i]);
-        }
-        return statistics;
-    }
-
-    private static Collection<String> prepareViableCandidates(int count) {
-        Collection<String> viableCandidates = new ArrayList<>();
-        for (int i = 0; i < count; i++) {
-            viableCandidates.add("member-" + (i + 1));
-        }
-        return viableCandidates;
-    }
-
-    private static void assertStatistics(Map<String, Long> statistics, long... count) {
-        for (int i = 0; i < count.length; i++) {
-            assertEquals(count[i], (long) statistics.get("member-" + (i + 1)));
-        }
-    }
-}
index 49d33921636f204782946772c268b102d2629d69..1b033546211e21b1f64e854ac77083939a6adc71 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class AbortTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         AbortTransactionReply expected = AbortTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
index dd1d2be95b7dbee5dde86a6d5566bf421582f775..c0300e667b5c6fb4db67495740b8ee72e7821f7a 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.MockIdentifiers;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.MockIdentifiers;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class AbortTransactionTest {
-
     @Test
     public void testSerialization() {
         AbortTransaction expected = new AbortTransaction(
index 994070ce62d82873f26a772feb4dc3b3a9756ba1..69450b7efba6db3a70e8ab1aee2418374570aa08 100644 (file)
@@ -15,7 +15,7 @@ import com.google.common.collect.ImmutableSortedSet;
 import java.io.Serializable;
 import java.util.Optional;
 import java.util.SortedSet;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
@@ -25,27 +25,30 @@ import org.opendaylight.controller.cluster.datastore.modification.MergeModificat
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for BatchedModifications.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class BatchedModificationsTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
@@ -100,8 +103,7 @@ public class BatchedModificationsTest extends AbstractTest {
         assertEquals("getTransactionID", tx2, clone.getTransactionId());
         assertTrue("isReady", clone.isReady());
         assertTrue("isDoCommitOnReady", clone.isDoCommitOnReady());
-        assertTrue("participatingShardNames present", clone.getParticipatingShardNames().isPresent());
-        assertEquals("participatingShardNames", shardNames, clone.getParticipatingShardNames().get());
+        assertEquals("participatingShardNames", Optional.of(shardNames), clone.getParticipatingShardNames());
         assertEquals("getModifications size", 0, clone.getModifications().size());
 
         // Test not ready.
@@ -114,20 +116,6 @@ public class BatchedModificationsTest extends AbstractTest {
         assertEquals("getTransactionID", tx2, clone.getTransactionId());
         assertFalse("isReady", clone.isReady());
         assertEquals("getModifications size", 0, clone.getModifications().size());
-
-        // Test pre-Flourine
-
-        batched = new BatchedModifications(tx2, DataStoreVersions.BORON_VERSION);
-        batched.addModification(new WriteModification(writePath, writeData));
-        batched.setReady(Optional.of(ImmutableSortedSet.of("one", "two")));
-
-        clone = (BatchedModifications) SerializationUtils.clone((Serializable) batched.toSerializable());
-
-        assertEquals("getVersion", DataStoreVersions.BORON_VERSION, clone.getVersion());
-        assertEquals("getTransactionID", tx2, clone.getTransactionId());
-        assertTrue("isReady", clone.isReady());
-        assertFalse("participatingShardNames present", clone.getParticipatingShardNames().isPresent());
-        assertEquals("getModifications size", 1, clone.getModifications().size());
     }
 
     @Test
index 21c4d2f673d97fd4018e7eaecca88022282ad527..5ebc282cebe0f55ec2ff470458ea51e3ae52cb25 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CanCommitTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         testSerialization(CanCommitTransactionReply.yes(DataStoreVersions.CURRENT_VERSION),
@@ -31,7 +31,7 @@ public class CanCommitTransactionReplyTest {
                 CanCommitTransactionReply.class);
     }
 
-    private static void testSerialization(CanCommitTransactionReply expected, Class<?> expSerialized) {
+    private static void testSerialization(final CanCommitTransactionReply expected, final Class<?> expSerialized) {
         Object serialized = expected.toSerializable();
         assertEquals("Serialized type", expSerialized, serialized.getClass());
 
index 8950c50518b8ac2d0ef59c1d8d868fe819ec65c2..806c504c5219a9297cbbf932b85846a310e83092 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CanCommitTransactionTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CanCommitTransaction expected = new CanCommitTransaction(nextTransactionId(),
index 6f857112a4f5d8bf8834b79d66e3636c4f98b625..e18acec0d2c8c84931533806258209cead6327c7 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,17 +22,20 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionChainTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
-        CloseTransactionChain expected = new CloseTransactionChain(nextHistoryId(), DataStoreVersions.CURRENT_VERSION);
+        CloseTransactionChain expected = new CloseTransactionChain(newHistoryId(1), DataStoreVersions.CURRENT_VERSION);
 
-        Object serialized = expected.toSerializable();
+        var serialized = (Serializable) expected.toSerializable();
         assertEquals("Serialized type", CloseTransactionChain.class, serialized.getClass());
 
+        final byte[] bytes = SerializationUtils.serialize(serialized);
+        assertEquals(241, bytes.length);
+
         CloseTransactionChain actual = CloseTransactionChain.fromSerializable(
-                SerializationUtils.clone((Serializable) serialized));
+                SerializationUtils.deserialize(bytes));
         assertEquals("getIdentifier", expected.getIdentifier(), actual.getIdentifier());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
     }
index 8c35babbdaed6b4446e40c7ee8dc28e7477082e6..db53db4b89ec0a0ef7fe86e434b8e8a2f3412a01 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import static org.junit.Assert.assertEquals;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CloseTransactionTest {
     @Test
     public void testCloseTransactionSerialization() {
index 423411d8213ac6579b53cf611c5775b1c52465d0..1017772bd05c7a2eb6ca9a860ee0fa6c94543d35 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CommitTransactionReplyTest {
-
     @Test
     public void testSerialization() {
         CommitTransactionReply expected = CommitTransactionReply.instance(DataStoreVersions.CURRENT_VERSION);
index 1fccfbdeae93d35b0b977fd4ac0b3ab3b0a48255..2ab6ca7b3a204c41900f72510130ab3c4933e423 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,6 +22,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CommitTransactionTest extends AbstractTest {
 
     @Test
index a44e71a1654993114ceaa3ace998e9d019a3daa9..5acacfe8b182587fca05202589f3f5a289b3843b 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CreateTransactionReplyTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CreateTransactionReply expected = new CreateTransactionReply("txPath", nextTransactionId(),
index 2e83a33e0ab2996b23be8c760532e5673900aea5..9d573e94883b9babc69831245d4957ffb7a0a7d1 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class CreateTransactionTest extends AbstractTest {
-
     @Test
     public void testSerialization() {
         CreateTransaction expected = new CreateTransaction(nextTransactionId(), 2, DataStoreVersions.CURRENT_VERSION);
index 8f33773210151cc0b0bee9e0a461dd90c650ae62..61eb8b16a13d6934609a6238357563ac72e29bef 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -21,8 +21,8 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsReplyTest {
-
     @Test
     public void testSerialization() {
         DataExistsReply expected = new DataExistsReply(true, DataStoreVersions.CURRENT_VERSION);
index b55b3effd33eb8778cbb282d1ff08edef358f15e..bcd732215036152e8199ed79f28978e305ba4247 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DataExistsTest {
-
     @Test
     public void testSerialization() {
         DataExists expected = new DataExists(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
index caf55737be8df8e294fd8eba801edf986d0078eb..9c31e1909a8cbcd30f4cb57408d1604a1304d6e6 100644 (file)
@@ -12,27 +12,28 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for ReadDataReply.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataReplyTest {
 
     @Test
     public void testSerialization() {
-        NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode data = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         ReadDataReply expected = new ReadDataReply(data, DataStoreVersions.CURRENT_VERSION);
 
index 5e06a60217b73361571d8756990671db6c89fe86..df692b59d5a83e3b7f3d3a5f577333049cb1efdf 100644 (file)
@@ -12,7 +12,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
@@ -22,8 +22,8 @@ import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadDataTest {
-
     @Test
     public void testSerialization() {
         ReadData expected = new ReadData(TestModel.TEST_PATH, DataStoreVersions.CURRENT_VERSION);
index fc09965ccc027b43b74f5b4cf84af987c5711c69..2abb05d360a088fe2c206d1f55f2f20d51565a7e 100644 (file)
@@ -28,19 +28,19 @@ import org.opendaylight.controller.cluster.datastore.modification.WriteModificat
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
 
 /**
  * Unit tests for ReadyLocalTransactionSerializer.
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyLocalTransactionSerializerTest extends AbstractTest {
-
     @Test
     public void testToAndFromBinary() throws NotSerializableException {
         DataTree dataTree = new InMemoryDataTreeFactory().create(
@@ -74,8 +74,7 @@ public class ReadyLocalTransactionSerializerTest extends AbstractTest {
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, batched.getVersion());
         assertTrue("isReady", batched.isReady());
         assertTrue("isDoCommitOnReady", batched.isDoCommitOnReady());
-        assertTrue("participatingShardNames present", batched.getParticipatingShardNames().isPresent());
-        assertEquals("participatingShardNames", shardNames, batched.getParticipatingShardNames().get());
+        assertEquals("participatingShardNames", Optional.of(shardNames), batched.getParticipatingShardNames());
 
         List<Modification> batchedMods = batched.getModifications();
         assertEquals("getModifications size", 2, batchedMods.size());
index 049c17cb437f635c293f91ec9515d727420b01b4..38eea9af5433ad59a8c5130671087ca21a6e0958 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import static org.junit.Assert.assertEquals;
 
 import java.io.Serializable;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 
@@ -19,6 +19,7 @@ import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
  *
  * @author Thomas Pantelis
  */
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class ReadyTransactionReplyTest {
 
     @Test
index e3bca7e16b351b2225b58f3bacb316b23135d070..9d11ea986a901b1279e24a696e41fb5174230e89 100644 (file)
@@ -5,26 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
-import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public abstract class AbstractModificationTest {
-    private static SchemaContext TEST_SCHEMA_CONTEXT;
+    private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
+
+    static final @NonNull ContainerNode TEST_CONTAINER = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
 
     protected InMemoryDOMDataStore store;
 
@@ -41,7 +44,7 @@ public abstract class AbstractModificationTest {
     @Before
     public void setUp() {
         store = new InMemoryDOMDataStore("test", MoreExecutors.newDirectExecutorService());
-        store.onGlobalContextUpdated(TEST_SCHEMA_CONTEXT);
+        store.onModelContextUpdated(TEST_SCHEMA_CONTEXT);
     }
 
     protected void commitTransaction(final DOMStoreWriteTransaction transaction) {
@@ -50,9 +53,9 @@ public abstract class AbstractModificationTest {
         cohort.commit();
     }
 
-    protected Optional<NormalizedNode<?, ?>> readData(final YangInstanceIdentifier path) throws Exception {
-        DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> future = transaction.read(path);
-        return future.get();
+    protected Optional<NormalizedNode> readData(final YangInstanceIdentifier path) throws Exception {
+        try (var transaction = store.newReadOnlyTransaction()) {
+            return transaction.read(path).get();
+        }
     }
 }
index ac0d7f93887b4ce1865af17f1fe7943c89323590..f1cb4870a3ba7c8fcce94846281ff37f7c66b222 100644 (file)
@@ -5,36 +5,30 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class DeleteModificationTest extends AbstractModificationTest {
-
     @Test
     public void testApply() throws Exception {
         // Write something into the datastore
         DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
-        WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
-                ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+        WriteModification writeModification = new WriteModification(TestModel.TEST_PATH, TEST_CONTAINER);
         writeModification.apply(writeTransaction);
         commitTransaction(writeTransaction);
 
         // Check if it's in the datastore
-        Optional<NormalizedNode<?, ?>> data = readData(TestModel.TEST_PATH);
-        Assert.assertTrue(data.isPresent());
+        assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
 
         // Delete stuff from the datastore
         DOMStoreWriteTransaction deleteTransaction = store.newWriteOnlyTransaction();
@@ -42,8 +36,7 @@ public class DeleteModificationTest extends AbstractModificationTest {
         deleteModification.apply(deleteTransaction);
         commitTransaction(deleteTransaction);
 
-        data = readData(TestModel.TEST_PATH);
-        Assert.assertFalse(data.isPresent());
+        assertEquals(Optional.empty(), readData(TestModel.TEST_PATH));
     }
 
     @Test
@@ -52,7 +45,7 @@ public class DeleteModificationTest extends AbstractModificationTest {
 
         DeleteModification expected = new DeleteModification(path);
 
-        DeleteModification clone = (DeleteModification) SerializationUtils.clone(expected);
+        DeleteModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
     }
 }
index bfdcf0edf882d55851077b0fd1cb848223c44399..ff22645f92054c29a28af0b2f7a2368b00917c74 100644 (file)
@@ -5,24 +5,21 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MergeModificationTest extends AbstractModificationTest {
-
     @Test
     public void testApply() throws Exception {
         //TODO : Need to write a better test for this
@@ -35,21 +32,17 @@ public class MergeModificationTest extends AbstractModificationTest {
         commitTransaction(writeTransaction);
 
         //Check if it's in the datastore
-        Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
-        Assert.assertTrue(data.isPresent());
-
+        assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
     }
 
     @Test
     public void testSerialization() {
-        YangInstanceIdentifier path = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-
-        MergeModification expected = new MergeModification(path, data);
+        MergeModification expected = new MergeModification(TestModel.TEST_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
 
-        MergeModification clone = (MergeModification) SerializationUtils.clone(expected);
+        MergeModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
         assertEquals("getData", expected.getData(), clone.getData());
     }
index 96438c42090d3b901137d12fbaa68fd2c36fceaa..3d58db873d99878c3412626c6efc6331ff2469cf 100644 (file)
@@ -5,28 +5,25 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
 
-import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class MutableCompositeModificationTest extends AbstractModificationTest {
-
     @Test
     public void testApply() throws Exception {
-
         MutableCompositeModification compositeModification = new MutableCompositeModification();
         compositeModification.addModification(new WriteModification(TestModel.TEST_PATH,
             ImmutableNodes.containerNode(TestModel.TEST_QNAME)));
@@ -35,32 +32,77 @@ public class MutableCompositeModificationTest extends AbstractModificationTest {
         compositeModification.apply(transaction);
         commitTransaction(transaction);
 
-        Optional<NormalizedNode<?, ?>> data = readData(TestModel.TEST_PATH);
-
-        assertNotNull(data.get());
-        assertEquals(TestModel.TEST_QNAME, data.get().getNodeType());
+        assertEquals(TestModel.TEST_QNAME, readData(TestModel.TEST_PATH).orElseThrow().name().getNodeType());
     }
 
     @Test
     public void testSerialization() {
         YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
-        NormalizedNode<?, ?> mergeData = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
 
         YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
 
-        MutableCompositeModification compositeModification = new MutableCompositeModification();
+        MutableCompositeModification compositeModification =
+            new MutableCompositeModification(DataStoreVersions.POTASSIUM_VERSION);
+        compositeModification.addModification(new WriteModification(writePath, writeData));
+        compositeModification.addModification(new MergeModification(mergePath, mergeData));
+        compositeModification.addModification(new DeleteModification(deletePath));
+
+        final byte[] bytes = SerializationUtils.serialize(compositeModification);
+        assertEquals(360, bytes.length);
+        MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.deserialize(bytes);
+
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, clone.getVersion());
+
+        assertEquals("getModifications size", 3, clone.getModifications().size());
+
+        WriteModification write = (WriteModification)clone.getModifications().get(0);
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, write.getVersion());
+        assertEquals("getPath", writePath, write.getPath());
+        assertEquals("getData", writeData, write.getData());
+
+        MergeModification merge = (MergeModification)clone.getModifications().get(1);
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, merge.getVersion());
+        assertEquals("getPath", mergePath, merge.getPath());
+        assertEquals("getData", mergeData, merge.getData());
+
+        DeleteModification delete = (DeleteModification)clone.getModifications().get(2);
+        assertEquals("getVersion", DataStoreVersions.POTASSIUM_VERSION, delete.getVersion());
+        assertEquals("getPath", deletePath, delete.getPath());
+    }
+
+    @Test
+    public void testSerializationModern() {
+        YangInstanceIdentifier writePath = TestModel.TEST_PATH;
+        ContainerNode writeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
+
+        YangInstanceIdentifier mergePath = TestModel.OUTER_LIST_PATH;
+        ContainerNode mergeData = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME))
+            .build();
+
+        YangInstanceIdentifier deletePath = TestModel.TEST_PATH;
+
+        MutableCompositeModification compositeModification =
+            new MutableCompositeModification();
         compositeModification.addModification(new WriteModification(writePath, writeData));
         compositeModification.addModification(new MergeModification(mergePath, mergeData));
         compositeModification.addModification(new DeleteModification(deletePath));
 
-        MutableCompositeModification clone = (MutableCompositeModification)
-                SerializationUtils.clone(compositeModification);
+        final byte[] bytes = SerializationUtils.serialize(compositeModification);
+        assertEquals(360, bytes.length);
+        MutableCompositeModification clone = (MutableCompositeModification) SerializationUtils.deserialize(bytes);
 
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
 
index 5218cee1b3864832f64fca287562cf8ea9f2faf1..82c8f757e8571ce8bb8b75e3c02343b1331e6c3a 100644 (file)
@@ -5,48 +5,41 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.modification;
 
 import static org.junit.Assert.assertEquals;
 
 import java.util.Optional;
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Assert;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
+@Deprecated(since = "9.0.0", forRemoval = true)
 public class WriteModificationTest extends AbstractModificationTest {
-
     @Test
     public void testApply() throws Exception {
         //Write something into the datastore
         DOMStoreReadWriteTransaction writeTransaction = store.newReadWriteTransaction();
-        WriteModification writeModification = new WriteModification(TestModel.TEST_PATH,
-                ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+        WriteModification writeModification = new WriteModification(TestModel.TEST_PATH, TEST_CONTAINER);
         writeModification.apply(writeTransaction);
         commitTransaction(writeTransaction);
 
         //Check if it's in the datastore
-        Optional<NormalizedNode<?,?>> data = readData(TestModel.TEST_PATH);
-        Assert.assertTrue(data.isPresent());
+        assertEquals(Optional.of(TEST_CONTAINER), readData(TestModel.TEST_PATH));
     }
 
     @Test
     public void testSerialization() {
-        YangInstanceIdentifier path = TestModel.TEST_PATH;
-        NormalizedNode<?, ?> data = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-
-        WriteModification expected = new WriteModification(path, data);
+        WriteModification expected = new WriteModification(TestModel.TEST_PATH, Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
 
-        WriteModification clone = (WriteModification) SerializationUtils.clone(expected);
+        WriteModification clone = SerializationUtils.clone(expected);
         assertEquals("getPath", expected.getPath(), clone.getPath());
         assertEquals("getData", expected.getData(), clone.getData());
     }
index 8453368cb18e323da401188a0569721ca911f19c..62ec2d0c8a0b30116228d74fd07961d26ca1d54b 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class AbortTransactionPayloadTest extends AbstractIdentifiablePayloadTest<AbortTransactionPayload> {
-
-    @Override
-    AbortTransactionPayload object() {
-        return AbortTransactionPayload.create(nextTransactionId(), 512);
+    public AbortTransactionPayloadTest() {
+        super(AbortTransactionPayload.create(newTransactionId(0), 512), 125);
     }
 }
index a04c16919876075fb2d2c6b8e77b907a8518fcd6..5b82a478a74973dc1c2786302586af5296f5ae39 100644 (file)
@@ -7,19 +7,27 @@
  */
 package org.opendaylight.controller.cluster.datastore.persisted;
 
+import static java.util.Objects.requireNonNull;
+import static org.junit.Assert.assertEquals;
+
 import org.apache.commons.lang3.SerializationUtils;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
 
-public abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+abstract class AbstractIdentifiablePayloadTest<T extends AbstractIdentifiablePayload<?>> extends AbstractTest {
+    private final T object;
+    private final int expectedSize;
 
-    abstract T object();
+    AbstractIdentifiablePayloadTest(final T object, final int expectedSize) {
+        this.object = requireNonNull(object);
+        this.expectedSize = expectedSize;
+    }
 
     @Test
     public void testSerialization() {
-        final T object = object();
-        final T cloned = SerializationUtils.clone(object);
-        Assert.assertEquals(object.getIdentifier(), cloned.getIdentifier());
+        final byte[] bytes = SerializationUtils.serialize(object);
+        assertEquals(expectedSize, bytes.length);
+        final T cloned = SerializationUtils.deserialize(bytes);
+        assertEquals(object.getIdentifier(), cloned.getIdentifier());
     }
 }
index eeed0612b7f3bbc4314f386b14e4380a0c281cf6..071914bfc90fda3150d447892a7dbb6261480218 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class CloseLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CloseLocalHistoryPayload> {
-
-    @Override
-    CloseLocalHistoryPayload object() {
-        return CloseLocalHistoryPayload.create(nextHistoryId(), 512);
+    public CloseLocalHistoryPayloadTest() {
+        super(CloseLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index 09273b363cf37c9420ea7fa76715d277cead61c1..215c47d4f5766bde17dbf81029038d887b5aaf23 100644 (file)
@@ -18,24 +18,22 @@ import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.AbstractTest;
+import org.opendaylight.controller.cluster.datastore.persisted.CommitTransactionPayload.CandidateTransaction;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.tree.spi.DataTreeCandidates;
 
 public class CommitTransactionPayloadTest extends AbstractTest {
     static final QName LEAF_SET = QName.create(TestModel.TEST_QNAME, "leaf-set");
@@ -45,7 +43,7 @@ public class CommitTransactionPayloadTest extends AbstractTest {
     private static DataTreeCandidateNode findNode(final Collection<DataTreeCandidateNode> nodes,
             final PathArgument arg) {
         for (DataTreeCandidateNode node : nodes) {
-            if (arg.equals(node.getIdentifier())) {
+            if (arg.equals(node.name())) {
                 return node;
             }
         }
@@ -56,40 +54,41 @@ public class CommitTransactionPayloadTest extends AbstractTest {
             final Collection<DataTreeCandidateNode> actual) {
         // Make sure all expected nodes are there
         for (DataTreeCandidateNode exp : expected) {
-            final DataTreeCandidateNode act = findNode(actual, exp.getIdentifier());
+            final DataTreeCandidateNode act = findNode(actual, exp.name());
             assertNotNull("missing expected child", act);
             assertCandidateNodeEquals(exp, act);
         }
         // Make sure no nodes are present which are not in the expected set
         for (DataTreeCandidateNode act : actual) {
-            final DataTreeCandidateNode exp = findNode(expected, act.getIdentifier());
+            final DataTreeCandidateNode exp = findNode(expected, act.name());
             assertNull("unexpected child", exp);
         }
     }
 
-    private static void assertCandidateEquals(final DataTreeCandidate expected, final DataTreeCandidate actual) {
-        assertEquals("root path", expected.getRootPath(), actual.getRootPath());
-        assertCandidateNodeEquals(expected.getRootNode(), actual.getRootNode());
+    private static void assertCandidateEquals(final DataTreeCandidate expected, final CandidateTransaction actual) {
+        final var candidate = actual.candidate();
+        assertEquals("root path", expected.getRootPath(), candidate.getRootPath());
+        assertCandidateNodeEquals(expected.getRootNode(), candidate.getRootNode());
     }
 
     private static void assertCandidateNodeEquals(final DataTreeCandidateNode expected,
             final DataTreeCandidateNode actual) {
-        assertEquals("child type", expected.getModificationType(), actual.getModificationType());
+        assertEquals("child type", expected.modificationType(), actual.modificationType());
 
-        switch (actual.getModificationType()) {
+        switch (actual.modificationType()) {
             case DELETE:
             case WRITE:
-                assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
-                assertEquals("child data", expected.getDataAfter(), actual.getDataAfter());
+                assertEquals("child identifier", expected.name(), actual.name());
+                assertEquals("child data", expected.dataAfter(), actual.dataAfter());
                 break;
             case SUBTREE_MODIFIED:
-                assertEquals("child identifier", expected.getIdentifier(), actual.getIdentifier());
-                assertChildrenEquals(expected.getChildNodes(), actual.getChildNodes());
+                assertEquals("child identifier", expected.name(), actual.name());
+                assertChildrenEquals(expected.childNodes(), actual.childNodes());
                 break;
             case UNMODIFIED:
                 break;
             default:
-                fail("Unexpect root type " + actual.getModificationType());
+                fail("Unexpect root type " + actual.modificationType());
                 break;
         }
     }
@@ -97,88 +96,75 @@ public class CommitTransactionPayloadTest extends AbstractTest {
     @Before
     public void setUp() {
         setUpStatic();
-        final YangInstanceIdentifier writePath = TestModel.TEST_PATH;
-        final NormalizedNode<?, ?> writeData = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
-        candidate = DataTreeCandidates.fromNormalizedNode(writePath, writeData);
+        candidate = DataTreeCandidates.fromNormalizedNode(TestModel.TEST_PATH, ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build());
     }
 
     @Test
     public void testCandidateSerialization() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertEquals("payload size", 181, payload.size());
+        assertEquals("payload size", 156, payload.size());
+        assertEquals("serialized size", 242, SerializationUtils.serialize(payload).length);
     }
 
     @Test
     public void testCandidateSerDes() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
     public void testPayloadSerDes() throws IOException {
         final CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate().getValue());
+        assertCandidateEquals(candidate, SerializationUtils.clone(payload).getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testLeafSetEntryNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
+        NodeWithValue<String> entryPathArg = new NodeWithValue<>(LEAF_SET, "one");
         YangInstanceIdentifier leafSetEntryPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET)
                 .node(entryPathArg).build();
 
-        NormalizedNode<?, ?> leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, leafSetEntryNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetEntryPath, ImmutableNodes.leafSetEntry(entryPathArg));
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testLeafSetNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
         YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
 
-        LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-        NormalizedNode<?, ?> leafSetNode = Builders.leafSetBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newSystemLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+            .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+            .build());
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
-    @SuppressWarnings({ "rawtypes", "unchecked" })
     @Test
     public void testOrderedLeafSetNodeCandidate() throws Exception {
-        YangInstanceIdentifier.NodeWithValue entryPathArg = new YangInstanceIdentifier.NodeWithValue(LEAF_SET, "one");
         YangInstanceIdentifier leafSetPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(LEAF_SET).build();
 
-        LeafSetEntryNode leafSetEntryNode = Builders.leafSetEntryBuilder().withNodeIdentifier(entryPathArg)
-                .withValue("one").build();
-        NormalizedNode<?, ?> leafSetNode = Builders.orderedLeafSetBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(LEAF_SET)).withChild(leafSetEntryNode).build();
-
-        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, leafSetNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafSetPath, ImmutableNodes.newUserLeafSetBuilder()
+            .withNodeIdentifier(new NodeIdentifier(LEAF_SET))
+            .withChild(ImmutableNodes.leafSetEntry(LEAF_SET, "one"))
+            .build());
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
     public void testLeafNodeCandidate() throws Exception {
         YangInstanceIdentifier leafPath = YangInstanceIdentifier.builder(TestModel.TEST_PATH)
                 .node(TestModel.DESC_QNAME).build();
-        LeafNode<Object> leafNode = Builders.leafBuilder().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TestModel.DESC_QNAME)).withValue("test").build();
 
-        candidate = DataTreeCandidates.fromNormalizedNode(leafPath, leafNode);
+        candidate = DataTreeCandidates.fromNormalizedNode(leafPath,
+            ImmutableNodes.leafNode(TestModel.DESC_QNAME, "test"));
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 
     @Test
@@ -191,6 +177,6 @@ public class CommitTransactionPayloadTest extends AbstractTest {
         candidate = dataTree.prepare(modification);
 
         CommitTransactionPayload payload = CommitTransactionPayload.create(nextTransactionId(), candidate);
-        assertCandidateEquals(candidate, payload.getCandidate().getValue());
+        assertCandidateEquals(candidate, payload.getCandidate());
     }
 }
index e0aef362e94ab1ee6e37507918dc3b341192c3aa..83941812294831d9f9d0b3640d2b45149a790b12 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class CreateLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<CreateLocalHistoryPayload> {
-
-    @Override
-    CreateLocalHistoryPayload object() {
-        return CreateLocalHistoryPayload.create(nextHistoryId(), 512);
+    public CreateLocalHistoryPayloadTest() {
+        super(CreateLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index f8734850a5f4528286d653103f720c2f7eaab1df..556772456c284c582c96ae7d44c5766466ada6ca 100644 (file)
@@ -10,12 +10,9 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
 
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Range;
-import com.google.common.collect.RangeSet;
-import com.google.common.collect.TreeRangeSet;
 import com.google.common.primitives.UnsignedLong;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -23,8 +20,6 @@ import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -33,32 +28,35 @@ import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier;
 import org.opendaylight.controller.cluster.access.concepts.FrontendType;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
+import org.opendaylight.controller.cluster.datastore.utils.ImmutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap;
 
 public class FrontendShardDataTreeSnapshotMetadataTest {
 
-    @Test(expected = NullPointerException.class)
-    public final void testCreateMetadataSnapshotNullInput() {
-        new FrontendShardDataTreeSnapshotMetadata(null);
+    @Test
+    public void testCreateMetadataSnapshotNullInput() {
+        assertThrows(NullPointerException.class, () -> new FrontendShardDataTreeSnapshotMetadata(null));
     }
 
     @Test
-    public final void testCreateMetadataSnapshotEmptyInput() throws Exception {
+    public void testCreateMetadataSnapshotEmptyInput() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata emptyOrigSnapshot = createEmptyMetadataSnapshot();
-        final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot);
+        final FrontendShardDataTreeSnapshotMetadata emptyCopySnapshot = copy(emptyOrigSnapshot, 86);
         testMetadataSnapshotEqual(emptyOrigSnapshot, emptyCopySnapshot);
     }
 
     @Test
-    public final void testSerializeMetadataSnapshotWithOneClient() throws Exception {
+    public void testSerializeMetadataSnapshotWithOneClient() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(1);
-        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot);
+        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 121);
         testMetadataSnapshotEqual(origSnapshot, copySnapshot);
     }
 
     @Test
-    public final void testSerializeMetadataSnapshotWithMoreClients() throws Exception {
+    public void testSerializeMetadataSnapshotWithMoreClients() throws Exception {
         final FrontendShardDataTreeSnapshotMetadata origSnapshot = createMetadataSnapshot(5);
-        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot);
+        final FrontendShardDataTreeSnapshotMetadata copySnapshot = copy(origSnapshot, 273);
         testMetadataSnapshotEqual(origSnapshot, copySnapshot);
     }
 
@@ -68,26 +66,26 @@ public class FrontendShardDataTreeSnapshotMetadataTest {
         final List<FrontendClientMetadata> origClientList = origSnapshot.getClients();
         final List<FrontendClientMetadata> copyClientList = copySnapshot.getClients();
 
-        assertTrue(origClientList.size() == copyClientList.size());
+        assertEquals(origClientList.size(), copyClientList.size());
 
         final Map<ClientIdentifier, FrontendClientMetadata> origIdent = new HashMap<>();
         final Map<ClientIdentifier, FrontendClientMetadata> copyIdent = new HashMap<>();
-        origClientList.forEach(client -> origIdent.put(client.getIdentifier(), client));
-        origClientList.forEach(client -> copyIdent.put(client.getIdentifier(), client));
+        origClientList.forEach(client -> origIdent.put(client.clientId(), client));
+        origClientList.forEach(client -> copyIdent.put(client.clientId(), client));
 
         assertTrue(origIdent.keySet().containsAll(copyIdent.keySet()));
         assertTrue(copyIdent.keySet().containsAll(origIdent.keySet()));
 
         origIdent.values().forEach(client -> {
-            final FrontendClientMetadata copyClient = copyIdent.get(client.getIdentifier());
-            testObject(client.getIdentifier(), copyClient.getIdentifier());
-            assertTrue(client.getPurgedHistories().equals(copyClient.getPurgedHistories()));
-            assertTrue(client.getCurrentHistories().equals(copyClient.getCurrentHistories()));
+            final var copyClient = copyIdent.get(client.clientId());
+            testObject(client.clientId(), copyClient.clientId());
+            assertEquals(client.getPurgedHistories(), copyClient.getPurgedHistories());
+            assertEquals(client.getCurrentHistories(), copyClient.getCurrentHistories());
         });
     }
 
     private static FrontendShardDataTreeSnapshotMetadata createEmptyMetadataSnapshot() {
-        return new FrontendShardDataTreeSnapshotMetadata(Collections.<FrontendClientMetadata>emptyList());
+        return new FrontendShardDataTreeSnapshotMetadata(List.of());
     }
 
     private static FrontendShardDataTreeSnapshotMetadata createMetadataSnapshot(final int size) {
@@ -104,15 +102,11 @@ public class FrontendShardDataTreeSnapshotMetadataTest {
         final FrontendIdentifier frontendIdentifier = FrontendIdentifier.create(MemberName.forName(indexName),
                 FrontendType.forName(index));
         final ClientIdentifier clientIdentifier = ClientIdentifier.create(frontendIdentifier, num);
+        final ImmutableUnsignedLongSet purgedHistories = MutableUnsignedLongSet.of(0).immutableCopy();
 
-        final RangeSet<UnsignedLong> purgedHistories = TreeRangeSet.create();
-        purgedHistories.add(Range.closed(UnsignedLong.ZERO, UnsignedLong.ONE));
-
-        final Collection<FrontendHistoryMetadata> currentHistories = Collections.singleton(
-            new FrontendHistoryMetadata(num, num, true, ImmutableMap.of(UnsignedLong.ZERO, Boolean.TRUE),
-                purgedHistories));
-
-        return new FrontendClientMetadata(clientIdentifier, purgedHistories, currentHistories);
+        return new FrontendClientMetadata(clientIdentifier, purgedHistories, List.of(
+            new FrontendHistoryMetadata(num, num, true,
+                UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, Boolean.TRUE)), purgedHistories)));
     }
 
     private static <T> void testObject(final T object, final T equalObject) {
@@ -124,13 +118,16 @@ public class FrontendShardDataTreeSnapshotMetadataTest {
     }
 
     @SuppressWarnings("unchecked")
-    private static <T> T copy(final T obj) throws IOException, ClassNotFoundException {
+    private static <T> T copy(final T obj, final int expectedSize) throws IOException, ClassNotFoundException {
         final ByteArrayOutputStream bos = new ByteArrayOutputStream();
         try (ObjectOutputStream oos = new ObjectOutputStream(bos)) {
             oos.writeObject(obj);
         }
 
-        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) {
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(expectedSize, bytes.length);
+
+        try (ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
             return (T) ois.readObject();
         }
     }
index 3a3ded1af7e049218f2cc7a4064de2d57f5f0f93..cca22204133d02814328781e2cd8617a6e82b078 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class PurgeLocalHistoryPayloadTest extends AbstractIdentifiablePayloadTest<PurgeLocalHistoryPayload> {
-
-    @Override
-    PurgeLocalHistoryPayload object() {
-        return PurgeLocalHistoryPayload.create(nextHistoryId(), 512);
+    public PurgeLocalHistoryPayloadTest() {
+        super(PurgeLocalHistoryPayload.create(newHistoryId(0), 512), 124);
     }
 }
index cf59654467d8629dd9236fb845744cd25900caae..cfae341829575c5bc9da1475ab1b714c7db9702a 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 public class PurgeTransactionPayloadTest extends AbstractIdentifiablePayloadTest<PurgeTransactionPayload> {
-
-    @Override
-    PurgeTransactionPayload object() {
-        return PurgeTransactionPayload.create(nextTransactionId(), 512);
+    public PurgeTransactionPayloadTest() {
+        super(PurgeTransactionPayload.create(newTransactionId(0), 512), 125);
     }
 }
index 333f3e9b247528a5d79805359f44c1965d7e9288..9d172e653a69f6e7adeac56bf1b50a06f1f5cdb2 100644 (file)
@@ -8,9 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.persisted;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
-import com.google.common.collect.ImmutableMap;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.Externalizable;
@@ -23,10 +21,9 @@ import java.util.Map;
 import java.util.Optional;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 /**
  * Unit tests for ShardDataTreeSnapshot.
@@ -34,11 +31,10 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableCo
  * @author Thomas Pantelis
  */
 public class ShardDataTreeSnapshotTest {
-
     @Test
     public void testShardDataTreeSnapshotWithNoMetadata() throws Exception {
-        NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+        ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
                 .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode);
@@ -47,51 +43,54 @@ public class ShardDataTreeSnapshotTest {
             snapshot.serialize(out);
         }
 
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(202, bytes.length);
+
         ShardDataTreeSnapshot deserialized;
-        try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) {
-            deserialized = ShardDataTreeSnapshot.deserialize(in);
+        try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
+            deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
         }
 
-        Optional<NormalizedNode<?, ?>> actualNode = deserialized.getRootNode();
-        assertTrue("rootNode present", actualNode.isPresent());
-        assertEquals("rootNode", expectedNode, actualNode.get());
+        assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
         assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
         assertEquals("Metadata size", 0, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata().size());
     }
 
     @Test
     public void testShardDataTreeSnapshotWithMetadata() throws Exception {
-        NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
+        ContainerNode expectedNode = ImmutableNodes.newContainerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
                 .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
 
         Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> expMetadata =
-                ImmutableMap.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
+                Map.of(TestShardDataTreeSnapshotMetadata.class, new TestShardDataTreeSnapshotMetadata("test"));
         MetadataShardDataTreeSnapshot snapshot = new MetadataShardDataTreeSnapshot(expectedNode, expMetadata);
         ByteArrayOutputStream bos = new ByteArrayOutputStream();
         try (ObjectOutputStream out = new ObjectOutputStream(bos)) {
             snapshot.serialize(out);
         }
 
+        final byte[] bytes = bos.toByteArray();
+        assertEquals(350, bytes.length);
+
         ShardDataTreeSnapshot deserialized;
-        try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()))) {
-            deserialized = ShardDataTreeSnapshot.deserialize(in);
+        try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
+            deserialized = ShardDataTreeSnapshot.deserialize(in).getSnapshot();
         }
 
-        Optional<NormalizedNode<?, ?>> actualNode = deserialized.getRootNode();
-        assertTrue("rootNode present", actualNode.isPresent());
-        assertEquals("rootNode", expectedNode, actualNode.get());
+        assertEquals("rootNode", Optional.of(expectedNode), deserialized.getRootNode());
         assertEquals("Deserialized type", MetadataShardDataTreeSnapshot.class, deserialized.getClass());
         assertEquals("Metadata", expMetadata, ((MetadataShardDataTreeSnapshot)deserialized).getMetadata());
     }
 
     static class TestShardDataTreeSnapshotMetadata
             extends ShardDataTreeSnapshotMetadata<TestShardDataTreeSnapshotMetadata> {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         private final String data;
 
-        TestShardDataTreeSnapshotMetadata(String data) {
+        TestShardDataTreeSnapshotMetadata(final String data) {
             this.data = data;
         }
 
@@ -111,29 +110,31 @@ public class ShardDataTreeSnapshotTest {
         }
 
         @Override
-        public boolean equals(Object obj) {
-            return obj instanceof TestShardDataTreeSnapshotMetadata
-                    && data.equals(((TestShardDataTreeSnapshotMetadata)obj).data);
+        public boolean equals(final Object obj) {
+            return obj instanceof TestShardDataTreeSnapshotMetadata other && data.equals(other.data);
         }
 
         private static class Proxy implements Externalizable {
+            @java.io.Serial
+            private static final long serialVersionUID = 7534948936595056176L;
+
             private String data;
 
             @SuppressWarnings("checkstyle:RedundantModifier")
             public Proxy() {
             }
 
-            Proxy(String data) {
+            Proxy(final String data) {
                 this.data = data;
             }
 
             @Override
-            public void writeExternal(ObjectOutput out) throws IOException {
+            public void writeExternal(final ObjectOutput out) throws IOException {
                 out.writeObject(data);
             }
 
             @Override
-            public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+            public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
                 data = (String) in.readObject();
             }
 
index 26d08e6c5ca6a88b3eb0250f6a1cbe7e83abdbd7..c1c09afa10b836b4473a797bf2ee5ae517b010e0 100644 (file)
@@ -10,8 +10,7 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static org.junit.Assert.assertEquals;
 
 import java.util.Arrays;
-import java.util.Collections;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 
 /**
@@ -24,8 +23,8 @@ public class ShardManagerSnapshotTest {
     @Test
     public void testSerialization() {
         ShardManagerSnapshot expected =
-                new ShardManagerSnapshot(Arrays.asList("shard1", "shard2"), Collections.emptyMap());
-        ShardManagerSnapshot cloned = (ShardManagerSnapshot) SerializationUtils.clone(expected);
+                new ShardManagerSnapshot(Arrays.asList("shard1", "shard2"));
+        ShardManagerSnapshot cloned = SerializationUtils.clone(expected);
 
         assertEquals("getShardList", expected.getShardList(), cloned.getShardList());
     }
index 82b9f455656080078131f574e7a1ee3644193e16..5a3019cd9f4f0acc6885e4c933bf433f79f8817e 100644 (file)
@@ -10,13 +10,14 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 
-import org.apache.commons.lang.SerializationUtils;
+import java.util.Optional;
+import org.apache.commons.lang3.SerializationUtils;
 import org.junit.Test;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 
 /**
  * Unit tests for ShardSnapshotState.
@@ -24,19 +25,19 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableCo
  * @author Thomas Pantelis
  */
 public class ShardSnapshotStateTest {
-
     @Test
     public void testSerialization() {
-        NormalizedNode<?, ?> expectedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo")).build();
+        ContainerNode expectedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .withChild(ImmutableNodes.leafNode(TestModel.DESC_QNAME, "foo"))
+            .build();
 
         ShardSnapshotState expected = new ShardSnapshotState(new MetadataShardDataTreeSnapshot(expectedNode));
-        ShardSnapshotState cloned = (ShardSnapshotState) SerializationUtils.clone(expected);
+        ShardSnapshotState cloned = SerializationUtils.clone(expected);
 
         assertNotNull("getSnapshot is null", cloned.getSnapshot());
         assertEquals("getSnapshot type", MetadataShardDataTreeSnapshot.class, cloned.getSnapshot().getClass());
-        assertEquals("getRootNode", expectedNode,
-                ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode().get());
+        assertEquals("getRootNode", Optional.of(expectedNode),
+                ((MetadataShardDataTreeSnapshot)cloned.getSnapshot()).getRootNode());
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayloadTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/persisted/SkipTransactionsPayloadTest.java
new file mode 100644 (file)
index 0000000..818c189
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.persisted;
+
+import org.opendaylight.controller.cluster.datastore.utils.MutableUnsignedLongSet;
+
+public class SkipTransactionsPayloadTest extends AbstractIdentifiablePayloadTest<SkipTransactionsPayload> {
+    public SkipTransactionsPayloadTest() {
+        super(SkipTransactionsPayload.create(newHistoryId(0), MutableUnsignedLongSet.of(42).immutableCopy(), 512), 131);
+    }
+}
index d289e02d448a5a58070ae99919e60a15347616c8..e68bfe43acce87d5e9cfbda731f794f4ead10891 100644 (file)
@@ -45,7 +45,7 @@ public class ShardManagerGetSnapshotReplyActorTest extends AbstractActorTest {
         TestKit kit = new TestKit(getSystem());
 
         List<String> shardList = Arrays.asList("shard1", "shard2", "shard3");
-        ShardManagerSnapshot shardManagerSnapshot = new ShardManagerSnapshot(shardList, Collections.emptyMap());
+        ShardManagerSnapshot shardManagerSnapshot = new ShardManagerSnapshot(shardList);
         ActorRef replyActor = getSystem().actorOf(ShardManagerGetSnapshotReplyActor.props(
                 shardList, "config", shardManagerSnapshot, kit.getRef(),
                 "shard-manager", FiniteDuration.create(100, TimeUnit.SECONDS)), "testSuccess");
index 63af6be93632b54f1511678be0a584ef4d4f9e6a..889f1d47e1f8c2c55437d28180e73b684d1629f1 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.cluster.datastore.shardmanager;
 
+import static org.awaitility.Awaitility.await;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -17,16 +18,15 @@ import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.reset;
 import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.AddressFromURIString;
+import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.actor.Status;
 import akka.actor.Status.Failure;
@@ -43,13 +43,12 @@ import akka.serialization.Serialization;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
-import com.google.common.base.Function;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
-import java.net.URI;
 import java.time.Duration;
 import java.util.AbstractMap;
 import java.util.Arrays;
@@ -64,18 +63,24 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.function.Consumer;
+import java.util.function.Function;
 import java.util.stream.Collectors;
+import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.datastore.AbstractShardManagerTest;
+import org.opendaylight.controller.cluster.databroker.ClientBackedDataStore;
+import org.opendaylight.controller.cluster.datastore.AbstractClusterRefActorTest;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapperImpl;
 import org.opendaylight.controller.cluster.datastore.DataStoreVersions;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 import org.opendaylight.controller.cluster.datastore.DatastoreContextFactory;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
 import org.opendaylight.controller.cluster.datastore.Shard;
+import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
 import org.opendaylight.controller.cluster.datastore.config.EmptyModuleShardConfigProvider;
 import org.opendaylight.controller.cluster.datastore.config.ModuleShardConfiguration;
@@ -94,8 +99,6 @@ import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
 import org.opendaylight.controller.cluster.datastore.messages.LocalPrimaryShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
-import org.opendaylight.controller.cluster.datastore.messages.PeerDown;
-import org.opendaylight.controller.cluster.datastore.messages.PeerUp;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryShardInfo;
 import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.RemoveShardReplica;
@@ -112,6 +115,7 @@ import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
 import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
 import org.opendaylight.controller.cluster.notifications.RoleChangeNotification;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.TestActorFactory;
 import org.opendaylight.controller.cluster.raft.base.messages.FollowerInitialSyncUpStatus;
 import org.opendaylight.controller.cluster.raft.base.messages.SwitchBehavior;
 import org.opendaylight.controller.cluster.raft.client.messages.GetSnapshot;
@@ -125,24 +129,39 @@ import org.opendaylight.controller.cluster.raft.messages.ServerChangeReply;
 import org.opendaylight.controller.cluster.raft.messages.ServerChangeStatus;
 import org.opendaylight.controller.cluster.raft.messages.ServerRemoved;
 import org.opendaylight.controller.cluster.raft.policy.DisableElectionsRaftPolicy;
+import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
 import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.concepts.Registration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.XMLNamespace;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
-public class ShardManagerTest extends AbstractShardManagerTest {
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class ShardManagerTest extends AbstractClusterRefActorTest {
     private static final Logger LOG = LoggerFactory.getLogger(ShardManagerTest.class);
+    private static final MemberName MEMBER_1 = MemberName.forName("member-1");
     private static final MemberName MEMBER_2 = MemberName.forName("member-2");
     private static final MemberName MEMBER_3 = MemberName.forName("member-3");
 
-    private static SchemaContext TEST_SCHEMA_CONTEXT;
+    private static int ID_COUNTER = 1;
+    private static ActorRef mockShardActor;
+    private static ShardIdentifier mockShardName;
+    private static SettableFuture<Empty> ready;
+    private static EffectiveModelContext TEST_SCHEMA_CONTEXT;
+
+    private final String shardMrgIDSuffix = "config" + ID_COUNTER++;
+    private final TestActorFactory actorFactory = new TestActorFactory(getSystem());
+    private final DatastoreContext.Builder datastoreContextBuilder = DatastoreContext.newBuilder()
+            .dataStoreName(shardMrgIDSuffix).shardInitializationTimeout(600, TimeUnit.MILLISECONDS)
+            .shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(6);
 
     private final String shardMgrID = ShardManagerIdentifier.builder().type(shardMrgIDSuffix).build().toString();
 
@@ -156,6 +175,50 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TEST_SCHEMA_CONTEXT = null;
     }
 
+    @Before
+    public void setUp() {
+        ready = SettableFuture.create();
+
+        InMemoryJournal.clear();
+        InMemorySnapshotStore.clear();
+
+        if (mockShardActor == null) {
+            mockShardName = ShardIdentifier.create(Shard.DEFAULT_NAME, MEMBER_1, "config");
+            mockShardActor = getSystem().actorOf(MessageCollectorActor.props(), mockShardName.toString());
+        }
+
+        MessageCollectorActor.clearMessages(mockShardActor);
+    }
+
+    @After
+    public void tearDown() {
+        InMemoryJournal.clear();
+        InMemorySnapshotStore.clear();
+
+        mockShardActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        await().atMost(Duration.ofSeconds(10)).until(mockShardActor::isTerminated);
+        mockShardActor = null;
+
+        actorFactory.close();
+    }
+
+    private TestShardManager.Builder newTestShardMgrBuilder() {
+        return TestShardManager.builder(datastoreContextBuilder)
+            .distributedDataStore(mock(ClientBackedDataStore.class));
+    }
+
+    private TestShardManager.Builder newTestShardMgrBuilder(final Configuration config) {
+        return newTestShardMgrBuilder().configuration(config);
+    }
+
+    private Props newShardMgrProps() {
+        return newShardMgrProps(new MockConfiguration());
+    }
+
+    private Props newShardMgrProps(final Configuration config) {
+        return newTestShardMgrBuilder(config).readinessFuture(ready).props();
+    }
+
     private ActorSystem newActorSystem(final String config) {
         return newActorSystem("cluster-test", config);
     }
@@ -169,10 +232,6 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         return system.actorOf(MessageCollectorActor.props(), name);
     }
 
-    private Props newShardMgrProps() {
-        return newShardMgrProps(new MockConfiguration());
-    }
-
     private static DatastoreContextFactory newDatastoreContextFactory(final DatastoreContext datastoreContext) {
         DatastoreContextFactory mockFactory = mock(DatastoreContextFactory.class);
         doReturn(datastoreContext).when(mockFactory).getBaseDatastoreContext();
@@ -185,8 +244,9 @@ public class ShardManagerTest extends AbstractShardManagerTest {
     }
 
     private TestShardManager.Builder newTestShardMgrBuilderWithMockShardActor(final ActorRef shardActor) {
-        return TestShardManager.builder(datastoreContextBuilder).shardActor(shardActor)
-                .distributedDataStore(mock(DistributedDataStore.class));
+        return TestShardManager.builder(datastoreContextBuilder)
+            .shardActor(shardActor)
+            .distributedDataStore(mock(ClientBackedDataStore.class));
     }
 
 
@@ -298,7 +358,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
             }
         }
 
-        final Creator<ShardManager> creator = new Creator<ShardManager>() {
+        final Creator<ShardManager> creator = new Creator<>() {
             private static final long serialVersionUID = 1L;
             @Override
             public ShardManager create() {
@@ -310,7 +370,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         final TestKit kit = new TestKit(getSystem());
 
-        final ActorRef shardManager = actorFactory.createActor(Props.create(
+        final ActorRef shardManager = actorFactory.createActor(Props.create(ShardManager.class,
                 new DelegatingShardManagerCreator(creator)).withDispatcher(Dispatchers.DefaultDispatcherId()));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
@@ -364,7 +424,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         DataTree mockDataTree = mock(DataTree.class);
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mockDataTree,
@@ -393,7 +453,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -417,7 +477,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         MockClusterWrapper.sendMemberUp(shardManager, "member-2", kit.getRef().path().toString());
@@ -455,7 +515,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, false), kit.getRef());
 
@@ -469,7 +529,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
         shardManager.tell(
@@ -512,7 +572,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         kit.expectNoMessage(Duration.ofMillis(150));
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         kit.expectNoMessage(Duration.ofMillis(150));
 
@@ -549,7 +609,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         kit.expectMsgClass(Duration.ofSeconds(2), NotInitializedException.class);
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         kit.expectNoMessage(Duration.ofMillis(200));
 
@@ -563,7 +623,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
             RaftState.Candidate.name()), mockShardActor);
 
@@ -581,7 +641,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix, null,
             RaftState.IsolatedLeader.name()), mockShardActor);
 
@@ -599,7 +659,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindPrimary(Shard.DEFAULT_NAME, true), kit.getRef());
 
@@ -644,7 +704,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-astronauts-" + shardMrgIDSuffix;
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
@@ -712,8 +772,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final TestKit kit = new TestKit(system1);
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager1.tell(new ActorInitialized(), mockShardActor1);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -739,16 +799,11 @@ public class ShardManagerTest extends AbstractShardManagerTest {
             kit.getRef());
 
         shardManager1.underlyingActor().waitForUnreachableMember();
-
-        PeerDown peerDown = MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
-        assertEquals("getMemberName", MEMBER_2, peerDown.getMemberName());
         MessageCollectorActor.clearMessages(mockShardActor1);
 
         shardManager1.tell(MockClusterWrapper.createMemberRemoved("member-2", "akka://cluster-test@127.0.0.1:2558"),
             kit.getRef());
 
-        MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerDown.class);
-
         shardManager1.tell(new FindPrimary("default", true), kit.getRef());
 
         kit.expectMsgClass(Duration.ofSeconds(5), NoShardLeaderException.class);
@@ -758,10 +813,6 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         shardManager1.underlyingActor().waitForReachableMember();
 
-        PeerUp peerUp = MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
-        assertEquals("getMemberName", MEMBER_2, peerUp.getMemberName());
-        MessageCollectorActor.clearMessages(mockShardActor1);
-
         shardManager1.tell(new FindPrimary("default", true), kit.getRef());
 
         RemotePrimaryShardFound found1 = kit.expectMsgClass(Duration.ofSeconds(5), RemotePrimaryShardFound.class);
@@ -771,8 +822,6 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         shardManager1.tell(MockClusterWrapper.createMemberUp("member-2", "akka://cluster-test@127.0.0.1:2558"),
             kit.getRef());
 
-        MessageCollectorActor.expectFirstMatching(mockShardActor1, PeerUp.class);
-
         // Test FindPrimary wait succeeds after reachable member event.
 
         shardManager1.tell(MockClusterWrapper.createUnreachableMember("member-2",
@@ -829,8 +878,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final TestKit kit = new TestKit(system1);
         shardManager1.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager1.tell(new ActorInitialized(), mockShardActor1);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager1.tell(new ActorInitialized(mockShardActor1), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId2 = "member-2-shard-default-" + shardMrgIDSuffix;
         String memberId1 = "member-1-shard-default-" + shardMrgIDSuffix;
@@ -925,8 +974,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final TestKit kit256 = new TestKit(system256);
         shardManager256.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
         shardManager2.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit256.getRef());
-        shardManager256.tell(new ActorInitialized(), mockShardActor256);
-        shardManager2.tell(new ActorInitialized(), mockShardActor2);
+        shardManager256.tell(new ActorInitialized(mockShardActor256), ActorRef.noSender());
+        shardManager2.tell(new ActorInitialized(mockShardActor2), ActorRef.noSender());
 
         String memberId256 = "member-256-shard-default-" + shardMrgIDSuffix;
         String memberId2   = "member-2-shard-default-"   + shardMrgIDSuffix;
@@ -1000,7 +1049,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new FindLocalShard(Shard.DEFAULT_NAME, false), kit.getRef());
 
@@ -1034,7 +1083,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         Future<Object> future = Patterns.ask(shardManager, new FindLocalShard(Shard.DEFAULT_NAME, true),
             new Timeout(5, TimeUnit.SECONDS));
 
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         Object resp = Await.result(future, kit.duration("5 seconds"));
         assertTrue("Expected: LocalShardFound, Actual: " + resp, resp instanceof LocalShardFound);
@@ -1047,15 +1096,13 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TestShardManager shardManager = newTestShardManager();
 
         String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(
+        shardManager.handleCommand(new RoleChangeNotification(
                 memberId, RaftState.Candidate.name(), RaftState.Leader.name()));
+        assertFalse(ready.isDone());
 
-        verify(ready, never()).countDown();
-
-        shardManager.onReceiveCommand(new ShardLeaderStateChanged(memberId, memberId,
+        shardManager.handleCommand(new ShardLeaderStateChanged(memberId, memberId,
                 mock(DataTree.class), DataStoreVersions.CURRENT_VERSION));
-
-        verify(ready, times(1)).countDown();
+        assertTrue(ready.isDone());
     }
 
     @Test
@@ -1064,17 +1111,15 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TestShardManager shardManager = newTestShardManager();
 
         String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(memberId, null, RaftState.Follower.name()));
+        shardManager.handleCommand(new RoleChangeNotification(memberId, null, RaftState.Follower.name()));
+        assertFalse(ready.isDone());
 
-        verify(ready, never()).countDown();
+        shardManager.handleCommand(MockClusterWrapper.createMemberUp("member-2", kit.getRef().path().toString()));
 
-        shardManager.onReceiveCommand(MockClusterWrapper.createMemberUp("member-2", kit.getRef().path().toString()));
-
-        shardManager.onReceiveCommand(
+        shardManager.handleCommand(
             new ShardLeaderStateChanged(memberId, "member-2-shard-default-" + shardMrgIDSuffix,
                 mock(DataTree.class), DataStoreVersions.CURRENT_VERSION));
-
-        verify(ready, times(1)).countDown();
+        assertTrue(ready.isDone());
     }
 
     @Test
@@ -1083,27 +1128,24 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TestShardManager shardManager = newTestShardManager();
 
         String memberId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(memberId, null, RaftState.Follower.name()));
+        shardManager.handleCommand(new RoleChangeNotification(memberId, null, RaftState.Follower.name()));
+        assertFalse(ready.isDone());
 
-        verify(ready, never()).countDown();
-
-        shardManager.onReceiveCommand(
+        shardManager.handleCommand(
             new ShardLeaderStateChanged(memberId, "member-2-shard-default-" + shardMrgIDSuffix,
                 mock(DataTree.class), DataStoreVersions.CURRENT_VERSION));
 
-        shardManager.onReceiveCommand(MockClusterWrapper.createMemberUp("member-2", kit.getRef().path().toString()));
-
-        verify(ready, times(1)).countDown();
+        shardManager.handleCommand(MockClusterWrapper.createMemberUp("member-2", kit.getRef().path().toString()));
+        assertTrue(ready.isDone());
     }
 
     @Test
     public void testRoleChangeNotificationDoNothingForUnknownShard() throws Exception {
         TestShardManager shardManager = newTestShardManager();
 
-        shardManager.onReceiveCommand(new RoleChangeNotification("unknown", RaftState.Candidate.name(),
+        shardManager.handleCommand(new RoleChangeNotification("unknown", RaftState.Candidate.name(),
             RaftState.Leader.name()));
-
-        verify(ready, never()).countDown();
+        assertFalse(ready.isDone());
     }
 
     @Test
@@ -1117,7 +1159,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
     public void testWhenShardIsLeaderSyncStatusIsTrue() throws Exception {
         TestShardManager shardManager = newTestShardManager();
 
-        shardManager.onReceiveCommand(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix,
+        shardManager.handleCommand(new RoleChangeNotification("member-1-shard-default-" + shardMrgIDSuffix,
                 RaftState.Follower.name(), RaftState.Leader.name()));
 
         assertTrue(shardManager.getMBean().getSyncStatus());
@@ -1128,13 +1170,13 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TestShardManager shardManager = newTestShardManager();
 
         String shardId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(shardId,
+        shardManager.handleCommand(new RoleChangeNotification(shardId,
                 RaftState.Follower.name(), RaftState.Candidate.name()));
 
         assertFalse(shardManager.getMBean().getSyncStatus());
 
         // Send a FollowerInitialSyncStatus with status = true for the replica whose current state is candidate
-        shardManager.onReceiveCommand(new FollowerInitialSyncUpStatus(
+        shardManager.handleCommand(new FollowerInitialSyncUpStatus(
                 true, shardId));
 
         assertFalse(shardManager.getMBean().getSyncStatus());
@@ -1145,19 +1187,19 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         TestShardManager shardManager = newTestShardManager();
 
         String shardId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(shardId,
+        shardManager.handleCommand(new RoleChangeNotification(shardId,
                 RaftState.Candidate.name(), RaftState.Follower.name()));
 
         // Initially will be false
         assertFalse(shardManager.getMBean().getSyncStatus());
 
         // Send status true will make sync status true
-        shardManager.onReceiveCommand(new FollowerInitialSyncUpStatus(true, shardId));
+        shardManager.handleCommand(new FollowerInitialSyncUpStatus(true, shardId));
 
         assertTrue(shardManager.getMBean().getSyncStatus());
 
         // Send status false will make sync status false
-        shardManager.onReceiveCommand(new FollowerInitialSyncUpStatus(false, shardId));
+        shardManager.handleCommand(new FollowerInitialSyncUpStatus(false, shardId));
 
         assertFalse(shardManager.getMBean().getSyncStatus());
     }
@@ -1177,7 +1219,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Make default shard leader
         String defaultShardId = "member-1-shard-default-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(defaultShardId,
+        shardManager.handleCommand(new RoleChangeNotification(defaultShardId,
                 RaftState.Follower.name(), RaftState.Leader.name()));
 
         // default = Leader, astronauts is unknown so sync status remains false
@@ -1185,21 +1227,21 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         // Make astronauts shard leader as well
         String astronautsShardId = "member-1-shard-astronauts-" + shardMrgIDSuffix;
-        shardManager.onReceiveCommand(new RoleChangeNotification(astronautsShardId,
+        shardManager.handleCommand(new RoleChangeNotification(astronautsShardId,
                 RaftState.Follower.name(), RaftState.Leader.name()));
 
         // Now sync status should be true
         assertTrue(shardManager.getMBean().getSyncStatus());
 
         // Make astronauts a Follower
-        shardManager.onReceiveCommand(new RoleChangeNotification(astronautsShardId,
+        shardManager.handleCommand(new RoleChangeNotification(astronautsShardId,
                 RaftState.Leader.name(), RaftState.Follower.name()));
 
         // Sync status is not true
         assertFalse(shardManager.getMBean().getSyncStatus());
 
         // Make the astronauts follower sync status true
-        shardManager.onReceiveCommand(new FollowerInitialSyncUpStatus(true, astronautsShardId));
+        shardManager.handleCommand(new FollowerInitialSyncUpStatus(true, astronautsShardId));
 
         // Sync status is now true
         assertTrue(shardManager.getMBean().getSyncStatus());
@@ -1213,7 +1255,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         shardManager.tell(new SwitchShardBehavior(mockShardName, RaftState.Leader, 1000), kit.getRef());
 
@@ -1238,14 +1280,14 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 .createActor(newShardMgrProps(new ConfigurationImpl(new EmptyModuleShardConfigProvider()))
                     .withDispatcher(Dispatchers.DefaultDispatcherId()));
 
-        SchemaContext schemaContext = TEST_SCHEMA_CONTEXT;
+        EffectiveModelContext schemaContext = TEST_SCHEMA_CONTEXT;
         shardManager.tell(new UpdateSchemaContext(schemaContext), ActorRef.noSender());
 
         DatastoreContext datastoreContext = DatastoreContext.newBuilder().shardElectionTimeoutFactor(100)
                 .persistent(false).build();
         Shard.Builder shardBuilder = Shard.builder();
 
-        ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+        ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
             "foo", null, members("member-1", "member-5", "member-6"));
         shardManager.tell(new CreateShard(config, shardBuilder, datastoreContext), kit.getRef());
 
@@ -1290,7 +1332,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), ActorRef.noSender());
 
         Shard.Builder shardBuilder = Shard.builder();
-        ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+        ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
             "foo", null, members("member-5", "member-6"));
 
         shardManager.tell(new CreateShard(config, shardBuilder, null), kit.getRef());
@@ -1316,13 +1358,13 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         Shard.Builder shardBuilder = Shard.builder();
 
-        ModuleShardConfiguration config = new ModuleShardConfiguration(URI.create("foo-ns"), "foo-module",
+        ModuleShardConfiguration config = new ModuleShardConfiguration(XMLNamespace.of("foo-ns"), "foo-module",
             "foo", null, members("member-1"));
         shardManager.tell(new CreateShard(config, shardBuilder, null), kit.getRef());
 
         kit.expectMsgClass(Duration.ofSeconds(5), Success.class);
 
-        SchemaContext schemaContext = TEST_SCHEMA_CONTEXT;
+        EffectiveModelContext schemaContext = TEST_SCHEMA_CONTEXT;
         shardManager.tell(new UpdateSchemaContext(schemaContext), ActorRef.noSender());
 
         shardManager.tell(new FindLocalShard("foo", true), kit.getRef());
@@ -1363,10 +1405,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         assertEquals("getType", shardMrgIDSuffix, datastoreSnapshot.getType());
         assertNull("Expected null ShardManagerSnapshot", datastoreSnapshot.getShardManagerSnapshot());
 
-        Function<ShardSnapshot, String> shardNameTransformer = ShardSnapshot::getName;
-
         assertEquals("Shard names", Sets.newHashSet("shard1", "shard2"), Sets.newHashSet(
-            datastoreSnapshot.getShardSnapshots().stream().map(shardNameTransformer).collect(Collectors.toSet())));
+            datastoreSnapshot.getShardSnapshots().stream().map(ShardSnapshot::getName).collect(Collectors.toSet())));
 
         // Add a new replica
 
@@ -1387,7 +1427,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         datastoreSnapshot = expectMsgClassOrFailure(DatastoreSnapshot.class, kit, "GetSnapshot");
 
         assertEquals("Shard names", Sets.newHashSet("shard1", "shard2", "astronauts"), Sets.newHashSet(
-                Lists.transform(datastoreSnapshot.getShardSnapshots(), shardNameTransformer)));
+                Lists.transform(datastoreSnapshot.getShardSnapshots(), ShardSnapshot::getName)));
 
         ShardManagerSnapshot snapshot = datastoreSnapshot.getShardManagerSnapshot();
         assertNotNull("Expected ShardManagerSnapshot", snapshot);
@@ -1410,7 +1450,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 .put("astronauts", Collections.<String>emptyList()).build());
 
         ShardManagerSnapshot snapshot =
-                new ShardManagerSnapshot(Arrays.asList("shard1", "shard2", "astronauts"), Collections.emptyMap());
+                new ShardManagerSnapshot(Arrays.asList("shard1", "shard2", "astronauts"));
         DatastoreSnapshot restoreFromSnapshot = new DatastoreSnapshot(shardMrgIDSuffix, snapshot,
                 Collections.<ShardSnapshot>emptyList());
         TestActorRef<TestShardManager> shardManager = actorFactory.createTestActor(newTestShardMgrBuilder(mockConfig)
@@ -1491,7 +1531,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+        leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
 
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
         leaderShardManager.tell(
@@ -1508,7 +1548,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         // persisted.
         String[] restoredShards = { "default", "people" };
         ShardManagerSnapshot snapshot =
-                new ShardManagerSnapshot(Arrays.asList(restoredShards), Collections.emptyMap());
+                new ShardManagerSnapshot(Arrays.asList(restoredShards));
         InMemorySnapshotStore.addSnapshot(shardManagerID, snapshot);
         Uninterruptibles.sleepUninterruptibly(2, TimeUnit.MILLISECONDS);
 
@@ -1542,7 +1582,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                 .createTestActor(newPropsShardMgrWithMockShardActor(), shardMgrID);
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         String leaderId = "leader-member-shard-default-" + shardMrgIDSuffix;
         AddServerReply addServerReply = new AddServerReply(ServerChangeStatus.ALREADY_EXISTS, null);
@@ -1600,7 +1640,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -1715,7 +1755,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -1787,8 +1827,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         newReplicaShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
         leaderShardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
 
-        leaderShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
-        newReplicaShardManager.tell(new ActorInitialized(), mockShardLeaderActor);
+        leaderShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
+        newReplicaShardManager.tell(new ActorInitialized(mockShardLeaderActor), ActorRef.noSender());
 
         short leaderVersion = DataStoreVersions.CURRENT_VERSION - 1;
         leaderShardManager.tell(
@@ -1909,7 +1949,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         shardManager.underlyingActor().waitForRecoveryComplete();
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), shard);
+        shardManager.tell(new ActorInitialized(shard), ActorRef.noSender());
 
         waitForShardInitialized(shardManager, "people", kit);
         waitForShardInitialized(shardManager, "default", kit);
@@ -1935,7 +1975,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
                     .put("people", Arrays.asList("member-1", "member-2")).build());
         String[] restoredShards = {"default", "astronauts"};
         ShardManagerSnapshot snapshot =
-                new ShardManagerSnapshot(Arrays.asList(restoredShards), Collections.emptyMap());
+                new ShardManagerSnapshot(Arrays.asList(restoredShards));
         InMemorySnapshotStore.addSnapshot("shard-manager-" + shardMrgIDSuffix, snapshot);
 
         // create shardManager to come up with restored data
@@ -1978,8 +2018,8 @@ public class ShardManagerTest extends AbstractShardManagerTest {
             .addShardActor("shard1", shard1).addShardActor("shard2", shard2).props());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), shard1);
-        shardManager.tell(new ActorInitialized(), shard2);
+        shardManager.tell(new ActorInitialized(shard1), ActorRef.noSender());
+        shardManager.tell(new ActorInitialized(shard2), ActorRef.noSender());
 
         FiniteDuration duration = FiniteDuration.create(5, TimeUnit.SECONDS);
         Future<Boolean> stopFuture = Patterns.gracefulStop(shardManager, duration, Shutdown.INSTANCE);
@@ -2015,7 +2055,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new ShardLeaderStateChanged(memberId, memberId, mock(DataTree.class),
             DataStoreVersions.CURRENT_VERSION), kit.getRef());
         shardManager.tell(
@@ -2047,7 +2087,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         ActorRef shardManager = getSystem().actorOf(newPropsShardMgrWithMockShardActor(respondActor));
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), respondActor);
+        shardManager.tell(new ActorInitialized(respondActor), ActorRef.noSender());
         shardManager.tell(new RoleChangeNotification(memberId, null, RaftState.Follower.name()), respondActor);
 
         shardManager.tell(
@@ -2070,7 +2110,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
         final ActorRef shardManager = actorFactory.createActor(newPropsShardMgrWithMockShardActor());
 
         shardManager.tell(new UpdateSchemaContext(TEST_SCHEMA_CONTEXT), kit.getRef());
-        shardManager.tell(new ActorInitialized(), mockShardActor);
+        shardManager.tell(new ActorInitialized(mockShardActor), ActorRef.noSender());
 
         final Consumer<String> mockCallback = mock(Consumer.class);
         shardManager.tell(new RegisterForShardAvailabilityChanges(mockCallback), kit.getRef());
@@ -2226,7 +2266,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
             }
 
             Builder shardActor(final ActorRef newShardActor) {
-                this.shardActor = newShardActor;
+                shardActor = newShardActor;
                 return this;
             }
 
@@ -2269,7 +2309,7 @@ public class ShardManagerTest extends AbstractShardManagerTest {
 
         AbstractGenericCreator(final Class<C> shardManagerClass) {
             this.shardManagerClass = shardManagerClass;
-            cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).waitTillReadyCountDownLatch(ready)
+            cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).readinessFuture(ready)
                     .primaryShardInfoCache(new PrimaryShardInfoFutureCache());
         }
 
index 0c18e799a5092a73bb266c26f6065eff021ce92b..e4482f5e25d0159b214bdf73206b70e3d8e42df2 100644 (file)
@@ -87,7 +87,7 @@ public class ShardPeerAddressResolverTest {
 
         String peerId = ShardIdentifier.create("default", MEMBER_2, type).toString();
 
-        String address = "akka.tcp://opendaylight-cluster-data@127.0.0.1:2550/user/shardmanager-" + type
+        String address = "akka://opendaylight-cluster-data@127.0.0.1:2550/user/shardmanager-" + type
                 + "/" + MEMBER_2.getName() + "-shard-default-" + type;
 
         resolver.setResolved(peerId, address);
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/TestShardManager.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/TestShardManager.java
new file mode 100644 (file)
index 0000000..337c4c9
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.shardmanager;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import java.util.Map;
+import org.opendaylight.controller.cluster.datastore.DatastoreContext;
+import org.opendaylight.controller.cluster.datastore.TestShard;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.DatastoreSnapshot;
+
+public class TestShardManager extends ShardManager {
+    TestShardManager(AbstractShardManagerCreator<?> builder) {
+        super(builder);
+    }
+
+    @Override
+    public void handleCommand(Object message) throws Exception {
+        if (GetLocalShards.INSTANCE.equals(message)) {
+            sender().tell(new GetLocalShardsReply(localShards), null);
+        } else {
+            super.handleCommand(message);
+        }
+    }
+
+    /**
+     * Plug into shard actor creation to replace info with our testing one.
+     * @param info shard info.
+     * @return actor for replaced shard info.
+     */
+    @Override
+    protected ActorRef newShardActor(ShardInformation info) {
+        Map<String, String> peerAddresses = getPeerAddresses(info.getShardName());
+        ShardInformation newInfo = new ShardInformation(info.getShardName(),
+                info.getShardId(), peerAddresses,
+                info.getDatastoreContext(),
+                TestShard.builder()
+                        .restoreFromSnapshot(info.getBuilder().getRestoreFromSnapshot()),
+                peerAddressResolver);
+        newInfo.setSchemaContext(info.getSchemaContext());
+        newInfo.setActiveMember(info.isActiveMember());
+
+
+        localShards.put(info.getShardName(), info);
+        return getContext().actorOf(newInfo.newProps().withDispatcher(shardDispatcherPath),
+                info.getShardId().toString());
+    }
+
+    @Override
+    ShardInformation createShardInfoFor(String shardName, ShardIdentifier shardId,
+                                        Map<String, String> peerAddresses,
+                                        DatastoreContext datastoreContext,
+                                        Map<String, DatastoreSnapshot.ShardSnapshot> shardSnapshots) {
+        return new ShardInformation(shardName, shardId, peerAddresses,
+                datastoreContext, TestShard.builder().restoreFromSnapshot(shardSnapshots.get(shardName)),
+                peerAddressResolver);
+    }
+
+    public static class TestShardManagerCreator extends AbstractShardManagerCreator<TestShardManagerCreator> {
+        @Override
+        public Props props() {
+            verify();
+            return Props.create(TestShardManager.class, this);
+        }
+    }
+
+    public static final class GetLocalShards {
+        public static final GetLocalShards INSTANCE = new GetLocalShards();
+
+        private GetLocalShards() {
+
+        }
+    }
+
+    public static class GetLocalShardsReply {
+
+        private final Map<String, ShardInformation> localShards;
+
+        public GetLocalShardsReply(Map<String, ShardInformation> localShards) {
+            this.localShards = localShards;
+        }
+
+        public Map<String, ShardInformation> getLocalShards() {
+            return localShards;
+        }
+    }
+}
index 036e881166883730cf012e67e04b2c811c9bf8c9..fd42b6287c7bac04e83553de8242b636c640b428 100644 (file)
@@ -5,15 +5,12 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardstrategy;
 
 import static org.junit.Assert.assertEquals;
 
 import org.junit.BeforeClass;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.opendaylight.controller.cluster.datastore.config.Configuration;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
@@ -21,9 +18,6 @@ import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 public class ModuleShardStrategyTest {
-    @Rule
-    public ExpectedException expectedEx = ExpectedException.none();
-
     private static Configuration configuration;
 
     @BeforeClass
@@ -31,14 +25,10 @@ public class ModuleShardStrategyTest {
         configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
     }
 
-
     @Test
     public void testFindShard() {
-        ModuleShardStrategy moduleShardStrategy =
-            new ModuleShardStrategy("cars", configuration);
-
+        ModuleShardStrategy moduleShardStrategy = new ModuleShardStrategy("cars", configuration);
         String shard = moduleShardStrategy.findShard(CarsModel.BASE_PATH);
-
         assertEquals("cars-1", shard);
     }
 
@@ -50,12 +40,8 @@ public class ModuleShardStrategyTest {
 
         final YangInstanceIdentifier BASE_PATH = YangInstanceIdentifier.of(baseQName);
 
-        ModuleShardStrategy moduleShardStrategy =
-            new ModuleShardStrategy("missing", configuration);
-
+        ModuleShardStrategy moduleShardStrategy = new ModuleShardStrategy("missing", configuration);
         String shard = moduleShardStrategy.findShard(BASE_PATH);
-
         assertEquals(DefaultShardStrategy.DEFAULT_SHARD, shard);
-
     }
 }
index e7b70e8c1e84171f63328d2050d1ecae86bcbc03..a7310903a7c7d206bc96ada0affe67ae84a3fe35 100644 (file)
@@ -5,33 +5,26 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.shardstrategy;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
 
 import org.junit.Before;
-import org.junit.Rule;
 import org.junit.Test;
-import org.junit.rules.ExpectedException;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 public class ShardStrategyFactoryTest {
-
-    ShardStrategyFactory factory;
-
-    @Rule
-    public ExpectedException expectedEx = ExpectedException.none();
+    private ShardStrategyFactory factory;
 
     @Before
     public void setUp() {
-        factory = new ShardStrategyFactory(
-                new ConfigurationImpl("module-shards.conf", "modules.conf"), LogicalDatastoreType.CONFIGURATION);
+        factory = new ShardStrategyFactory(new ConfigurationImpl("module-shards.conf", "modules.conf"));
     }
 
     @Test
@@ -46,12 +39,9 @@ public class ShardStrategyFactoryTest {
         assertTrue(strategy instanceof ModuleShardStrategy);
     }
 
-
     @Test
     public void testGetStrategyNullPointerExceptionWhenPathIsNull() {
-        expectedEx.expect(NullPointerException.class);
-        expectedEx.expectMessage("path should not be null");
-
-        factory.getStrategy(null);
+        final NullPointerException ex = assertThrows(NullPointerException.class, () -> factory.getStrategy(null));
+        assertEquals("path should not be null", ex.getMessage());
     }
 }
index 919b49f305ad6707792553b721eb33c1d22d222a..daef2143a2df5c88b893b72701499457001a391a 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
 import static org.junit.Assert.assertEquals;
@@ -30,13 +29,13 @@ import akka.japi.Creator;
 import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
-import com.google.common.base.Optional;
-import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 import com.typesafe.config.ConfigFactory;
 import java.time.Duration;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.TimeUnit;
 import org.junit.Assert;
 import org.junit.Test;
@@ -60,7 +59,7 @@ import org.opendaylight.controller.cluster.datastore.messages.RemotePrimaryShard
 import org.opendaylight.controller.cluster.raft.utils.EchoActor;
 import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Await;
@@ -68,17 +67,16 @@ import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
 public class ActorUtilsTest extends AbstractActorTest {
-
     static final Logger LOG = LoggerFactory.getLogger(ActorUtilsTest.class);
 
-    private static class TestMessage {
+    private static final class TestMessage {
+
     }
 
     private static final class MockShardManager extends UntypedAbstractActor {
-
+        private final Map<String,Object> findPrimaryResponses = new HashMap<>();
         private final boolean found;
         private final ActorRef actorRef;
-        private final Map<String,Object> findPrimaryResponses = Maps.newHashMap();
 
         private MockShardManager(final boolean found, final ActorRef actorRef) {
 
@@ -87,8 +85,7 @@ public class ActorUtilsTest extends AbstractActorTest {
         }
 
         @Override public void onReceive(final Object message) {
-            if (message instanceof FindPrimary) {
-                FindPrimary fp = (FindPrimary)message;
+            if (message instanceof FindPrimary fp) {
                 Object resp = findPrimaryResponses.get(fp.getShardName());
                 if (resp == null) {
                     LOG.error("No expected FindPrimary response found for shard name {}", fp.getShardName());
@@ -111,11 +108,11 @@ public class ActorUtilsTest extends AbstractActorTest {
         }
 
         private static Props props(final boolean found, final ActorRef actorRef) {
-            return Props.create(new MockShardManagerCreator(found, actorRef));
+            return Props.create(MockShardManager.class, new MockShardManagerCreator(found, actorRef));
         }
 
         private static Props props() {
-            return Props.create(new MockShardManagerCreator());
+            return Props.create(MockShardManager.class, new MockShardManagerCreator());
         }
 
         @SuppressWarnings("serial")
@@ -124,8 +121,8 @@ public class ActorUtilsTest extends AbstractActorTest {
             final ActorRef actorRef;
 
             MockShardManagerCreator() {
-                this.found = false;
-                this.actorRef = null;
+                found = false;
+                actorRef = null;
             }
 
             MockShardManagerCreator(final boolean found, final ActorRef actorRef) {
@@ -151,9 +148,7 @@ public class ActorUtilsTest extends AbstractActorTest {
             ActorUtils actorUtils = new ActorUtils(getSystem(), shardManagerActorRef,
                 mock(ClusterWrapper.class), mock(Configuration.class));
 
-            Optional<ActorRef> out = actorUtils.findLocalShard("default");
-
-            assertEquals(shardActorRef, out.get());
+            assertEquals(Optional.of(shardActorRef), actorUtils.findLocalShard("default"));
 
             testKit.expectNoMessage();
             return null;
@@ -381,7 +376,7 @@ public class ActorUtilsTest extends AbstractActorTest {
 
         assertNotNull(actual);
         assertTrue("LocalShardDataTree present", actual.getLocalShardDataTree().isPresent());
-        assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().get());
+        assertSame("LocalShardDataTree", mockDataTree, actual.getLocalShardDataTree().orElseThrow());
         assertTrue("Unexpected PrimaryShardActor path " + actual.getPrimaryShardActor().path(),
                 expPrimaryPath.endsWith(actual.getPrimaryShardActor().pathString()));
         assertEquals("getPrimaryShardVersion", DataStoreVersions.CURRENT_VERSION, actual.getPrimaryShardVersion());
index 36ac74b0c4d6766415ee0b0710387a37379b6ddd..e644759256d59fde647ea3837c17544fa254d100 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
 import akka.actor.ActorRef;
@@ -18,9 +17,10 @@ import akka.cluster.ClusterEvent.UnreachableMember;
 import akka.cluster.Member;
 import akka.cluster.MemberStatus;
 import akka.cluster.UniqueAddress;
+import akka.util.Version;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
-import scala.collection.immutable.Set;
+import scala.collection.immutable.Set.Set1;
 
 public class MockClusterWrapper implements ClusterWrapper {
 
@@ -63,34 +63,29 @@ public class MockClusterWrapper implements ClusterWrapper {
 
     public static MemberRemoved createMemberRemoved(final String memberName, final String address) {
         UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
-        Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), setOf(memberName));
+        Member member = new Member(uniqueAddress, 1, MemberStatus.removed(), new Set1<>(memberName), Version.Zero());
 
         return new MemberRemoved(member, MemberStatus.up());
     }
 
     public static MemberUp createMemberUp(final String memberName, final String address) {
         UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
-        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), setOf(memberName));
+        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
 
         return new MemberUp(member);
     }
 
     public static UnreachableMember createUnreachableMember(final String memberName, final String address) {
         UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
-        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), setOf(memberName));
+        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
 
         return new UnreachableMember(member);
     }
 
     public static ReachableMember createReachableMember(final String memberName, final String address) {
         UniqueAddress uniqueAddress = new UniqueAddress(AddressFromURIString.parse(address), 55L);
-        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), setOf(memberName));
+        Member member = new Member(uniqueAddress, 1, MemberStatus.up(), new Set1<>(memberName), Version.Zero());
 
         return new ReachableMember(member);
     }
-
-    @SuppressWarnings("unchecked")
-    private static Set<String> setOf(final String str) {
-        return scala.collection.immutable.Set$.MODULE$.<String>newBuilder().$plus$eq(str).result();
-    }
 }
index ec64075fa555fd105f9494554115c6206a0c77e8..ac046884260c82cdbadf23b088b3b269975f2e35 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
 import java.util.Arrays;
@@ -14,12 +13,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
-import javax.annotation.Nonnull;
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationImpl;
 import org.opendaylight.controller.cluster.datastore.config.ModuleConfig;
-import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 
 public class MockConfiguration extends ConfigurationImpl {
     public MockConfiguration() {
@@ -39,9 +35,4 @@ public class MockConfiguration extends ConfigurationImpl {
             return retMap;
         });
     }
-
-    @Override
-    public ShardStrategy getStrategyForPrefix(@Nonnull final DOMDataTreeIdentifier prefix) {
-        return null;
-    }
 }
index de41609ba178e41bab48bc8530eff491cdfe2e26..9ba38c2616ae5ce0cf22b80bcf54f6e7c937cf12 100644 (file)
@@ -15,7 +15,6 @@ import static org.junit.Assert.fail;
 import com.google.common.util.concurrent.Uninterruptibles;
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Optional;
@@ -23,13 +22,12 @@ import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
-import javax.annotation.Nonnull;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.DistinctNodeContainer;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodeContainer;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 
 public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
@@ -47,14 +45,14 @@ public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
     public void reset(final int newExpChangeEventCount) {
         changeLatch = new CountDownLatch(newExpChangeEventCount);
-        this.expChangeEventCount = newExpChangeEventCount;
+        expChangeEventCount = newExpChangeEventCount;
         synchronized (changeList) {
             changeList.clear();
         }
     }
 
     @Override
-    public void onDataTreeChanged(@Nonnull final Collection<DataTreeCandidate> changes) {
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
         if (changeLatch.getCount() > 0) {
             synchronized (changeList) {
                 changeList.addAll(changes);
@@ -90,27 +88,26 @@ public class MockDataTreeChangeListener implements DOMDataTreeChangeListener {
 
         for (int i = 0; i < expPaths.length; i++) {
             final DataTreeCandidate candidate = changeList.get(i);
-            final Optional<NormalizedNode<?, ?>> maybeDataAfter = candidate.getRootNode().getDataAfter();
-            if (!maybeDataAfter.isPresent()) {
+            final NormalizedNode dataAfter = candidate.getRootNode().dataAfter();
+            if (dataAfter == null) {
                 fail(String.format("Change %d does not contain data after. Actual: %s", i + 1,
-                        candidate.getRootNode()));
+                    candidate.getRootNode()));
             }
 
-            final NormalizedNode<?, ?> dataAfter = maybeDataAfter.get();
             final Optional<YangInstanceIdentifier> relativePath = expPaths[i].relativeTo(candidate.getRootPath());
             if (!relativePath.isPresent()) {
                 assertEquals(String.format("Change %d does not contain %s. Actual: %s", i + 1, expPaths[i],
-                        dataAfter), expPaths[i].getLastPathArgument(), dataAfter.getIdentifier());
+                        dataAfter), expPaths[i].getLastPathArgument(), dataAfter.name());
             } else {
-                NormalizedNode<?, ?> nextChild = dataAfter;
-                for (PathArgument pathArg: relativePath.get().getPathArguments()) {
+                NormalizedNode nextChild = dataAfter;
+                for (PathArgument pathArg: relativePath.orElseThrow().getPathArguments()) {
                     boolean found = false;
-                    if (nextChild instanceof NormalizedNodeContainer) {
-                        Optional<NormalizedNode<?, ?>> maybeChild = ((NormalizedNodeContainer)nextChild)
-                                .getChild(pathArg);
+                    if (nextChild instanceof DistinctNodeContainer) {
+                        Optional<NormalizedNode> maybeChild = ((DistinctNodeContainer)nextChild)
+                                .findChildByArg(pathArg);
                         if (maybeChild.isPresent()) {
                             found = true;
-                            nextChild = maybeChild.get();
+                            nextChild = maybeChild.orElseThrow();
                         }
                     }
 
index fd2fa48f3c89b424955637e79bb4434717fbae28..d8bbdcf71d05af6d4af0da4989da708db51b7263 100644 (file)
@@ -5,14 +5,12 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.FluentFuture;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.concurrent.ExecutionException;
@@ -28,36 +26,40 @@ import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
 import org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 
 public class NormalizedNodeAggregatorTest {
 
     @Test
-    public void testAggregate() throws InterruptedException, ExecutionException,
-        DataValidationFailedException {
-        SchemaContext schemaContext = SchemaContextHelper.full();
-        NormalizedNode<?, ?> expectedNode1 = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        NormalizedNode<?, ?> expectedNode2 = ImmutableNodes.containerNode(CarsModel.CARS_QNAME);
-
-        Optional<NormalizedNode<?, ?>> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.EMPTY,
+    public void testAggregate() throws InterruptedException, ExecutionException, DataValidationFailedException {
+        EffectiveModelContext schemaContext = SchemaContextHelper.full();
+        NormalizedNode expectedNode1 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
+            .build();
+        NormalizedNode expectedNode2 = ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(CarsModel.CARS_QNAME))
+            .build();
+
+        Optional<NormalizedNode> optional = NormalizedNodeAggregator.aggregate(YangInstanceIdentifier.of(),
                 ImmutableList.of(
-                        Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode1, schemaContext)),
-                        Optional.<NormalizedNode<?, ?>>of(getRootNode(expectedNode2, schemaContext))),
+                        Optional.<NormalizedNode>of(getRootNode(expectedNode1, schemaContext)),
+                        Optional.<NormalizedNode>of(getRootNode(expectedNode2, schemaContext))),
                 schemaContext, LogicalDatastoreType.CONFIGURATION);
 
 
-        NormalizedNode<?,?> normalizedNode = optional.get();
+        NormalizedNode normalizedNode = optional.orElseThrow();
 
-        assertTrue("Expect value to be a Collection", normalizedNode.getValue() instanceof Collection);
+        assertTrue("Expect value to be a Collection", normalizedNode.body() instanceof Collection);
 
         @SuppressWarnings("unchecked")
-        Collection<NormalizedNode<?,?>> collection = (Collection<NormalizedNode<?,?>>) normalizedNode.getValue();
+        Collection<NormalizedNode> collection = (Collection<NormalizedNode>) normalizedNode.body();
 
-        for (NormalizedNode<?,?> node : collection) {
+        for (NormalizedNode node : collection) {
             assertTrue("Expected " + node + " to be a ContainerNode", node instanceof ContainerNode);
         }
 
@@ -73,14 +75,14 @@ public class NormalizedNodeAggregatorTest {
 
     }
 
-    public static NormalizedNode<?, ?> getRootNode(NormalizedNode<?, ?> moduleNode, SchemaContext schemaContext)
-            throws ExecutionException, InterruptedException {
+    public static NormalizedNode getRootNode(final NormalizedNode moduleNode,
+            final EffectiveModelContext schemaContext) throws ExecutionException, InterruptedException {
         try (InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", Executors.newSingleThreadExecutor())) {
-            store.onGlobalContextUpdated(schemaContext);
+            store.onModelContextUpdated(schemaContext);
 
             DOMStoreWriteTransaction writeTransaction = store.newWriteOnlyTransaction();
 
-            writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.getNodeType()), moduleNode);
+            writeTransaction.merge(YangInstanceIdentifier.of(moduleNode.name().getNodeType()), moduleNode);
 
             DOMStoreThreePhaseCommitCohort ready = writeTransaction.ready();
 
@@ -90,17 +92,14 @@ public class NormalizedNodeAggregatorTest {
 
             DOMStoreReadTransaction readTransaction = store.newReadOnlyTransaction();
 
-            FluentFuture<Optional<NormalizedNode<?, ?>>> read = readTransaction.read(YangInstanceIdentifier.EMPTY);
-
-            Optional<NormalizedNode<?, ?>> nodeOptional = read.get();
-
-            return nodeOptional.get();
+            return readTransaction.read(YangInstanceIdentifier.of()).get().orElseThrow();
         }
     }
 
-    public static NormalizedNode<?,?> findChildWithQName(Collection<NormalizedNode<?, ?>> collection, QName qname) {
-        for (NormalizedNode<?, ?> node : collection) {
-            if (node.getNodeType().equals(qname)) {
+    public static NormalizedNode findChildWithQName(final Collection<NormalizedNode> collection,
+            final QName qname) {
+        for (NormalizedNode node : collection) {
+            if (node.name().getNodeType().equals(qname)) {
                 return node;
             }
         }
index 18949ac21ecd5ec3d491933b9f15e0041a8f79fe..4c5c06e3fadff4ebd955badd3545bed7874f4347 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
 import static org.junit.Assert.assertEquals;
@@ -27,42 +26,48 @@ import com.google.common.reflect.Reflection;
 import java.lang.reflect.InvocationTargetException;
 import java.util.Optional;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
 import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.datastore.ShardDataTree;
+import org.opendaylight.controller.cluster.datastore.node.utils.transformer.ReusableNormalizedNodePruner;
 import org.opendaylight.controller.md.cluster.datastore.model.CarsModel;
 import org.opendaylight.controller.md.cluster.datastore.model.PeopleModel;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModificationCursor;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.TreeType;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.SchemaValidationFailedException;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
+import org.opendaylight.yangtools.yang.data.tree.api.DataTree;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeConfiguration;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModification;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeModificationCursor;
+import org.opendaylight.yangtools.yang.data.tree.api.DataValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.ModificationType;
+import org.opendaylight.yangtools.yang.data.tree.api.SchemaValidationFailedException;
+import org.opendaylight.yangtools.yang.data.tree.api.TreeType;
+import org.opendaylight.yangtools.yang.data.tree.impl.di.InMemoryDataTreeFactory;
+import org.opendaylight.yangtools.yang.data.util.DataSchemaContextTree;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
+
+@RunWith(MockitoJUnitRunner.class)
 public class PruningDataTreeModificationTest {
-    static final SchemaContext SCHEMA_CONTEXT = SchemaContextHelper.select(SchemaContextHelper.CARS_YANG,
-            SchemaContextHelper.ODL_DATASTORE_TEST_YANG);
-
     static final QName INVALID_TEST_QNAME = QName.create(TestModel.TEST_QNAME, "invalid");
     static final YangInstanceIdentifier INVALID_TEST_PATH = YangInstanceIdentifier.of(INVALID_TEST_QNAME);
 
+    private static EffectiveModelContext SCHEMA_CONTEXT;
+    private static DataSchemaContextTree CONTEXT_TREE;
+
     @Mock
     private DataTreeModification mockModification;
 
@@ -71,11 +76,16 @@ public class PruningDataTreeModificationTest {
     private DataTreeModification proxyModification;
     private PruningDataTreeModification pruningDataTreeModification;
 
+    @BeforeClass
+    public static void beforeClass() {
+        SCHEMA_CONTEXT = SchemaContextHelper.select(SchemaContextHelper.CARS_YANG,
+            SchemaContextHelper.ODL_DATASTORE_TEST_YANG);
+        CONTEXT_TREE = DataSchemaContextTree.from(SCHEMA_CONTEXT);
+    }
+
     @Before
     @SuppressWarnings("checkstyle:avoidHidingCauseException")
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
         dataTree = new InMemoryDataTreeFactory().create(DataTreeConfiguration.DEFAULT_CONFIGURATION,
             SCHEMA_CONTEXT);
 
@@ -89,7 +99,9 @@ public class PruningDataTreeModificationTest {
             }
         });
 
-        pruningDataTreeModification = new PruningDataTreeModification(proxyModification, dataTree, SCHEMA_CONTEXT);
+        pruningDataTreeModification = new PruningDataTreeModification.Reactive(proxyModification, dataTree,
+            // Cannot reuse with parallel tests
+            ReusableNormalizedNodePruner.forDataSchemaContext(CONTEXT_TREE));
     }
 
     @Test
@@ -112,7 +124,7 @@ public class PruningDataTreeModificationTest {
 
     @Test
     public void testMerge() {
-        NormalizedNode<?, ?> normalizedNode = CarsModel.create();
+        NormalizedNode normalizedNode = CarsModel.create();
         YangInstanceIdentifier path = CarsModel.BASE_PATH;
         pruningDataTreeModification.merge(path, normalizedNode);
 
@@ -121,7 +133,7 @@ public class PruningDataTreeModificationTest {
 
     @Test
     public void testMergeWithInvalidNamespace() throws DataValidationFailedException {
-        NormalizedNode<?, ?> normalizedNode = PeopleModel.emptyContainer();
+        NormalizedNode normalizedNode = PeopleModel.emptyContainer();
         YangInstanceIdentifier path = PeopleModel.BASE_PATH;
 
         pruningDataTreeModification.merge(path, normalizedNode);
@@ -129,19 +141,21 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).merge(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
     public void testMergeWithInvalidChildNodeNames() throws DataValidationFailedException {
-        ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
-                        ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
-        DataContainerChild<?, ?> outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
-        ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug")).build();
+        DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
+        ContainerNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+                .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+                .build())
+            .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+            .build();
 
         YangInstanceIdentifier path = TestModel.TEST_PATH;
 
@@ -149,17 +163,17 @@ public class PruningDataTreeModificationTest {
 
         dataTree.commit(getCandidate());
 
-        ContainerNode prunedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode).build();
+        ContainerNode prunedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .build();
 
-        Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(path);
-        assertTrue("After pruning present", actual.isPresent());
-        assertEquals("After pruning", prunedNode, actual.get());
+        assertEquals("After pruning", Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
     }
 
     @Test
     public void testMergeWithValidNamespaceAndInvalidNodeName() throws DataValidationFailedException {
-        NormalizedNode<?, ?> normalizedNode = ImmutableNodes.containerNode(INVALID_TEST_QNAME);
+        NormalizedNode normalizedNode = ImmutableNodes.containerNode(INVALID_TEST_QNAME);
         YangInstanceIdentifier path = INVALID_TEST_PATH;
 
         pruningDataTreeModification.merge(path, normalizedNode);
@@ -167,12 +181,12 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).merge(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
     public void testWrite() {
-        NormalizedNode<?, ?> normalizedNode = CarsModel.create();
+        NormalizedNode normalizedNode = CarsModel.create();
         YangInstanceIdentifier path = CarsModel.BASE_PATH;
         pruningDataTreeModification.write(path, normalizedNode);
 
@@ -190,13 +204,11 @@ public class PruningDataTreeModificationTest {
         localDataTree.validate(mod);
         localDataTree.commit(localDataTree.prepare(mod));
 
-        NormalizedNode<?, ?> normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY).get();
-        pruningDataTreeModification.write(YangInstanceIdentifier.EMPTY, normalizedNode);
+        NormalizedNode normalizedNode = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()).orElseThrow();
+        pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
         dataTree.commit(getCandidate());
 
-        Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
-        assertTrue("Root present", actual.isPresent());
-        assertEquals("Root node", normalizedNode, actual.get());
+        assertEquals(Optional.of(normalizedNode), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
     }
 
     @Test
@@ -204,23 +216,22 @@ public class PruningDataTreeModificationTest {
         final Shard mockShard = Mockito.mock(Shard.class);
 
         ShardDataTree shardDataTree = new ShardDataTree(mockShard, SCHEMA_CONTEXT, TreeType.CONFIGURATION);
-        NormalizedNode<?, ?> root = shardDataTree.readNode(YangInstanceIdentifier.EMPTY).get();
+        NormalizedNode root = shardDataTree.readNode(YangInstanceIdentifier.of()).orElseThrow();
 
-        NormalizedNode<?, ?> normalizedNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(root.getNodeType())).withChild(
-                        ImmutableNodes.containerNode(AUG_CONTAINER)).build();
-        pruningDataTreeModification.write(YangInstanceIdentifier.EMPTY, normalizedNode);
+        NormalizedNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(root.name().getNodeType()))
+            .withChild(ImmutableNodes.containerNode(AUG_CONTAINER))
+            .build();
+        pruningDataTreeModification.write(YangInstanceIdentifier.of(), normalizedNode);
         dataTree.commit(getCandidate());
 
-        Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(YangInstanceIdentifier.EMPTY);
-        assertEquals("Root present", true, actual.isPresent());
-        assertEquals("Root node", root, actual.get());
+        assertEquals(Optional.of(root), dataTree.takeSnapshot().readNode(YangInstanceIdentifier.of()));
 
     }
 
     @Test
     public void testWriteWithInvalidNamespace() throws DataValidationFailedException {
-        NormalizedNode<?, ?> normalizedNode = PeopleModel.emptyContainer();
+        NormalizedNode normalizedNode = PeopleModel.emptyContainer();
         YangInstanceIdentifier path = PeopleModel.BASE_PATH;
 
         pruningDataTreeModification.write(path, normalizedNode);
@@ -228,20 +239,22 @@ public class PruningDataTreeModificationTest {
         verify(mockModification, times(1)).write(path, normalizedNode);
 
         DataTreeCandidate candidate = getCandidate();
-        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().getModificationType());
+        assertEquals("getModificationType", ModificationType.UNMODIFIED, candidate.getRootNode().modificationType());
     }
 
     @Test
     public void testWriteWithInvalidChildNodeNames() throws DataValidationFailedException {
-        ContainerNode augContainer = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(AUG_CONTAINER)).withChild(
-                        ImmutableNodes.containerNode(AUG_INNER_CONTAINER)).build();
-
-        DataContainerChild<?, ?> outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
-        ContainerNode normalizedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(augContainer).withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
-                .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+        DataContainerChild outerNode = outerNode(outerNodeEntry(1, innerNode("one", "two")));
+        ContainerNode normalizedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(Builders.containerBuilder()
+                .withNodeIdentifier(new NodeIdentifier(AUG_CONTAINER))
+                .withChild(ImmutableNodes.containerNode(AUG_INNER_CONTAINER))
+                .build())
+            .withChild(ImmutableNodes.leafNode(AUG_QNAME, "aug"))
+            .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+            .build();
 
         YangInstanceIdentifier path = TestModel.TEST_PATH;
 
@@ -249,13 +262,13 @@ public class PruningDataTreeModificationTest {
 
         dataTree.commit(getCandidate());
 
-        ContainerNode prunedNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outerNode)
-                .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name")).build();
+        ContainerNode prunedNode = Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outerNode)
+            .withChild(ImmutableNodes.leafNode(NAME_QNAME, "name"))
+            .build();
 
-        Optional<NormalizedNode<?, ?>> actual = dataTree.takeSnapshot().readNode(path);
-        assertTrue("After pruning present", actual.isPresent());
-        assertEquals("After pruning", prunedNode, actual.get());
+        assertEquals(Optional.of(prunedNode), dataTree.takeSnapshot().readNode(path));
     }
 
     @Test
index fef8206885ebb07f842975acffd7a9176e104a43..5c7cdc6789774a51997c4e09bbc724c50de61593 100644 (file)
@@ -5,12 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.cluster.datastore.utils;
 
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.greaterThan;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
@@ -18,37 +17,35 @@ import static org.mockito.Mockito.verify;
 
 import com.codahale.metrics.Snapshot;
 import com.codahale.metrics.Timer;
+import com.google.common.base.Stopwatch;
+import java.time.Duration;
 import java.util.concurrent.TimeUnit;
-import org.apache.commons.lang3.time.StopWatch;
 import org.hamcrest.BaseMatcher;
 import org.hamcrest.Description;
 import org.hamcrest.Matcher;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
 import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnitRunner;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext;
 
+// FIXME: use Strict runner
+@RunWith(MockitoJUnitRunner.Silent.class)
 public class TransactionRateLimiterTest {
-
     @Mock
     public ActorUtils actorUtils;
-
     @Mock
     public DatastoreContext datastoreContext;
-
     @Mock
     public Timer commitTimer;
-
     @Mock
     private Timer.Context commitTimerContext;
-
     @Mock
     private Snapshot commitSnapshot;
 
     @Before
     public void setUp() {
-        MockitoAnnotations.initMocks(this);
         doReturn(datastoreContext).when(actorUtils).getDatastoreContext();
         doReturn(30).when(datastoreContext).getShardTransactionCommitTimeoutInSeconds();
         doReturn(100L).when(datastoreContext).getTransactionCreationInitialRateLimit();
@@ -66,18 +63,14 @@ public class TransactionRateLimiterTest {
         }
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(292));
-
         assertEquals(147, rateLimiter.getPollOnCount());
     }
 
-
     @Test
     public void testAcquirePercentileValueZero() {
-
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
@@ -87,17 +80,14 @@ public class TransactionRateLimiterTest {
         doReturn(TimeUnit.MILLISECONDS.toNanos(0) * 1D).when(commitSnapshot).getValue(0.1);
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(192));
-
         assertEquals(97, rateLimiter.getPollOnCount());
     }
 
     @Test
     public void testAcquireOnePercentileValueVeryHigh() {
-
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
@@ -108,11 +98,9 @@ public class TransactionRateLimiterTest {
         doReturn(TimeUnit.MILLISECONDS.toNanos(10000) * 1D).when(commitSnapshot).getValue(1.0);
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(282));
-
         assertEquals(142, rateLimiter.getPollOnCount());
     }
 
@@ -126,18 +114,15 @@ public class TransactionRateLimiterTest {
         }
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         // The initial rate limit will be retained here because the calculated rate limit was too small
         assertThat(rateLimiter.getTxCreationLimit(), approximately(100));
-
         assertEquals(1, rateLimiter.getPollOnCount());
     }
 
     @Test
     public void testAcquireWithRealPercentileValues() {
-
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
@@ -149,11 +134,9 @@ public class TransactionRateLimiterTest {
         doReturn(TimeUnit.MILLISECONDS.toNanos(200) * 1D).when(commitSnapshot).getValue(1.0);
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
         assertEquals(51, rateLimiter.getPollOnCount());
     }
 
@@ -184,26 +167,20 @@ public class TransactionRateLimiterTest {
         DatastoreContext.getGlobalDatastoreNames().add("operational");
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(292));
-
         assertEquals(147, rateLimiter.getPollOnCount());
     }
 
     @Test
     public void testRateLimiting() {
-
         for (int i = 1; i < 11; i++) {
             doReturn(TimeUnit.SECONDS.toNanos(1) * 1D).when(commitSnapshot).getValue(i * 0.1);
         }
 
-        TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
-        StopWatch watch = new StopWatch();
-
-        watch.start();
+        final TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
+        final Stopwatch watch = Stopwatch.createStarted();
 
         rateLimiter.acquire();
         rateLimiter.acquire();
@@ -211,13 +188,12 @@ public class TransactionRateLimiterTest {
 
         watch.stop();
 
-        assertTrue("did not take as much time as expected rate limit : " + rateLimiter.getTxCreationLimit(),
-                watch.getTime() > 1000);
+        assertThat("did not take as much time as expected rate limit : " + rateLimiter.getTxCreationLimit(),
+            watch.elapsed(), greaterThan(Duration.ofSeconds(1)));
     }
 
     @Test
     public void testRateLimitNotCalculatedUntilPollCountReached() {
-
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
@@ -229,11 +205,9 @@ public class TransactionRateLimiterTest {
         doReturn(TimeUnit.MILLISECONDS.toNanos(200) * 1D).when(commitSnapshot).getValue(1.0);
 
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
         assertEquals(51, rateLimiter.getPollOnCount());
 
         for (int i = 0; i < 49; i++) {
@@ -250,7 +224,6 @@ public class TransactionRateLimiterTest {
 
     @Test
     public void testAcquireNegativeAcquireAndPollOnCount() {
-
         for (int i = 1; i < 11; i++) {
             // Keep on increasing the amount of time it takes to complete transaction for each tenth of a
             // percentile. Essentially this would be 1ms for the 10th percentile, 2ms for 20th percentile and so on.
@@ -264,11 +237,9 @@ public class TransactionRateLimiterTest {
         TransactionRateLimiter rateLimiter = new TransactionRateLimiter(actorUtils);
         rateLimiter.setAcquireCount(Long.MAX_VALUE - 1);
         rateLimiter.setPollOnCount(Long.MAX_VALUE);
-
         rateLimiter.acquire();
 
         assertThat(rateLimiter.getTxCreationLimit(), approximately(101));
-
         assertEquals(-9223372036854775759L, rateLimiter.getPollOnCount());
 
         for (int i = 0; i < 50; i++) {
@@ -276,23 +247,20 @@ public class TransactionRateLimiterTest {
         }
 
         verify(commitTimer, times(2)).getSnapshot();
-
     }
 
     public Matcher<Double> approximately(final double val) {
-        return new BaseMatcher<Double>() {
+        return new BaseMatcher<>() {
             @Override
-            public boolean matches(Object obj) {
+            public boolean matches(final Object obj) {
                 Double value = (Double) obj;
                 return value >= val && value <= val + 1;
             }
 
             @Override
-            public void describeTo(Description description) {
+            public void describeTo(final Description description) {
                 description.appendText("> " + val + " < " + (val + 1));
             }
         };
     }
-
-
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmapTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongBitmapTest.java
new file mode 100644 (file)
index 0000000..fc0b490
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.base.VerifyException;
+import com.google.common.io.ByteStreams;
+import com.google.common.primitives.UnsignedLong;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Map;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
+import org.opendaylight.controller.cluster.datastore.utils.UnsignedLongBitmap.Regular;
+import org.opendaylight.yangtools.concepts.WritableObjects;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class UnsignedLongBitmapTest {
+    @Test
+    public void testEmpty() throws IOException {
+        final var empty = UnsignedLongBitmap.of();
+        assertTrue(empty.isEmpty());
+        assertEquals(empty, empty);
+        assertSame(empty, UnsignedLongBitmap.copyOf(Map.of()));
+        assertEquals(Map.of(), empty.mutableCopy());
+        assertEquals("{}", empty.toString());
+        assertEquals(0, empty.hashCode());
+
+        final var ex = assertThrows(IOException.class, () -> empty.writeEntriesTo(mock(DataOutput.class), 1));
+        assertEquals("Mismatched size: expected 0, got 1", ex.getMessage());
+
+        // Should not do anything
+        empty.writeEntriesTo(mock(DataOutput.class), 0);
+
+        assertSame(empty, assertWriteToReadFrom(empty));
+    }
+
+    @Test
+    public void testSingleton() {
+        final var one = UnsignedLongBitmap.of(0, false);
+        assertFalse(one.isEmpty());
+        assertEquals(1, one.size());
+        assertEquals(one, one);
+        assertEquals(one, UnsignedLongBitmap.of(0, false));
+        assertEquals(one, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false)));
+        assertEquals(Map.of(UnsignedLong.ZERO, false), one.mutableCopy());
+        assertEquals("{0=false}", one.toString());
+        assertEquals(1237, one.hashCode());
+
+        final var ex = assertThrows(IOException.class, () -> one.writeEntriesTo(mock(DataOutput.class), 0));
+        assertEquals("Mismatched size: expected 1, got 0", ex.getMessage());
+
+        assertEquals(one, UnsignedLongBitmap.of(0, false));
+        assertNotEquals(one, UnsignedLongBitmap.of(0, true));
+        assertNotEquals(one, UnsignedLongBitmap.of(1, false));
+        assertNotEquals(UnsignedLongBitmap.of(), one);
+        assertNotEquals(one, UnsignedLongBitmap.of());
+
+        assertWriteToReadFrom(one);
+    }
+
+    @Test
+    public void testRegular() {
+        final var one = UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false, UnsignedLong.ONE, true));
+        assertFalse(one.isEmpty());
+        assertEquals(2, one.size());
+        assertEquals(one, one);
+        assertEquals(one, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ONE, true, UnsignedLong.ZERO, false)));
+        assertEquals(Map.of(UnsignedLong.ZERO, false, UnsignedLong.ONE, true), one.mutableCopy());
+
+        assertNotEquals(one,
+            UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, false, UnsignedLong.valueOf(2), true)));
+        assertEquals("{0=false, 1=true}", one.toString());
+        assertEquals(40345, one.hashCode());
+
+        final var ex = assertThrows(IOException.class, () -> one.writeEntriesTo(mock(DataOutput.class), 1));
+        assertEquals("Mismatched size: expected 2, got 1", ex.getMessage());
+
+        final var two = UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, true, UnsignedLong.ONE, false));
+        assertFalse(two.isEmpty());
+        assertEquals(2, two.size());
+        assertEquals(two, two);
+        assertEquals(two, UnsignedLongBitmap.copyOf(Map.of(UnsignedLong.ZERO, true, UnsignedLong.ONE, false)));
+        assertEquals("{0=true, 1=false}", two.toString());
+        assertEquals(40549, two.hashCode());
+
+        assertNotEquals(one, two);
+        assertNotEquals(two, one);
+
+        assertWriteToReadFrom(one);
+        assertWriteToReadFrom(two);
+    }
+
+    private static UnsignedLongBitmap assertWriteToReadFrom(final UnsignedLongBitmap orig) {
+        final var dos = ByteStreams.newDataOutput();
+        try {
+            orig.writeEntriesTo(dos);
+        } catch (IOException e) {
+            throw new AssertionError(e);
+        }
+
+        final UnsignedLongBitmap copy;
+        try {
+            final var dis = ByteStreams.newDataInput(dos.toByteArray());
+            copy = UnsignedLongBitmap.readFrom(dis, orig.size());
+            assertThrows(IllegalStateException.class, () -> dis.readByte());
+        } catch (IOException e) {
+            throw new AssertionError(e);
+        }
+
+        assertEquals(orig, copy);
+        return copy;
+    }
+
+    @Test
+    public void testKeyOrder() throws IOException {
+        assertInvalidKey(0);
+        assertInvalidKey(1);
+    }
+
+    private static void assertInvalidKey(final long second) throws IOException {
+        final var out = ByteStreams.newDataOutput();
+        WritableObjects.writeLong(out, 1);
+        out.writeBoolean(false);
+        WritableObjects.writeLong(out, second);
+        out.writeBoolean(true);
+
+        final var ex = assertThrows(IOException.class,
+            () -> UnsignedLongBitmap.readFrom(ByteStreams.newDataInput(out.toByteArray()), 2));
+        assertEquals("Key " + second + " may not be used after key 1", ex.getMessage());
+    }
+
+    @Test
+    public void testInvalidArrays() {
+        assertThrows(VerifyException.class, () -> new Regular(new long[0], new boolean[] { false, false }));
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSetTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/UnsignedLongSetTest.java
new file mode 100644 (file)
index 0000000..dbac4cc
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.junit.MockitoJUnitRunner;
+
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
+public class UnsignedLongSetTest {
+    @Test
+    public void testOperations() {
+        final var set = MutableUnsignedLongSet.of();
+        assertEquals("MutableUnsignedLongSet{size=0}", set.toString());
+        assertFalse(set.contains(0));
+
+        set.add(0);
+        assertTrue(set.contains(0));
+        assertRanges("[[0..0]]", set);
+
+        set.add(1);
+        assertTrue(set.contains(1));
+        assertRanges("[[0..1]]", set);
+        set.add(1);
+        assertRanges("[[0..1]]", set);
+
+        set.add(4);
+        assertRanges("[[0..1], [4..4]]", set);
+
+        set.add(3);
+        assertRanges("[[0..1], [3..4]]", set);
+
+        set.add(2);
+        assertRanges("[[0..4]]", set);
+
+        assertTrue(set.contains(2));
+        assertTrue(set.contains(3));
+        assertTrue(set.contains(4));
+
+        set.add(8);
+        assertRanges("[[0..4], [8..8]]", set);
+        set.add(6);
+        assertRanges("[[0..4], [6..6], [8..8]]", set);
+        set.add(7);
+        assertRanges("[[0..4], [6..8]]", set);
+        set.add(5);
+        assertRanges("[[0..8]]", set);
+
+        set.add(11);
+        assertRanges("[[0..8], [11..11]]", set);
+        set.add(9);
+        assertRanges("[[0..9], [11..11]]", set);
+    }
+
+    @Test
+    public void testSerialization() throws IOException {
+
+        final var set = MutableUnsignedLongSet.of(0, 1, 4, 3).immutableCopy();
+
+        final var bos = new ByteArrayOutputStream();
+        try (var out = new DataOutputStream(bos)) {
+            set.writeTo(out);
+        }
+
+        final var bytes = bos.toByteArray();
+        assertArrayEquals(new byte[] { 0, 0, 0, 2, 16, 2, 17, 3, 5 }, bytes);
+
+        final ImmutableUnsignedLongSet read;
+        try (var in = new DataInputStream(new ByteArrayInputStream(bytes))) {
+            read = ImmutableUnsignedLongSet.readFrom(in);
+            assertEquals(0, in.available());
+        }
+
+        assertEquals(set, read);
+    }
+
+    @Test
+    public void testToRangeSet() {
+        final var set = MutableUnsignedLongSet.of(0, 1, 4, 3);
+        assertEquals("[[0..2), [3..5)]", set.toRangeSet().toString());
+    }
+
+    @Test
+    public void testEmptyCopy() {
+        final var orig = MutableUnsignedLongSet.of();
+        assertSame(ImmutableUnsignedLongSet.of(), orig.immutableCopy());
+        final var copy = orig.mutableCopy();
+        assertEquals(orig, copy);
+        assertNotSame(orig, copy);
+    }
+
+    @Test
+    public void testMutableCopy() {
+        final var orig = MutableUnsignedLongSet.of();
+        orig.add(-1);
+        assertEquals("MutableUnsignedLongSet{span=[18446744073709551615..18446744073709551615], size=1}",
+            orig.toString());
+
+        final var copy = orig.mutableCopy();
+        assertEquals(orig, copy);
+        assertNotSame(orig, copy);
+
+        orig.add(-2);
+        assertNotEquals(orig, copy);
+        assertEquals("MutableUnsignedLongSet{span=[18446744073709551614..18446744073709551615], size=1}",
+            orig.toString());
+    }
+
+    @Test
+    public void testWriteRangesTo() throws IOException {
+        ImmutableUnsignedLongSet.of().writeRangesTo(mock(DataOutput.class), 0);
+    }
+
+    @Test
+    public void testWriteRangesToViolation() {
+        final var ex = assertThrows(IOException.class,
+            () -> ImmutableUnsignedLongSet.of().writeRangesTo(mock(DataOutput.class), 1));
+        assertEquals("Mismatched size: expected 0, got 1", ex.getMessage());
+    }
+
+    @Test
+    public void testAddRange() {
+        var set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(1, 2));
+        assertRanges("[[1..2], [5..6], [9..10], [13..14]]", set);
+        set.addAll(MutableUnsignedLongSet.of(3, 4));
+        assertRanges("[[1..6], [9..10], [13..14]]", set);
+        set.addAll(MutableUnsignedLongSet.of(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+        assertRanges("[[1..15]]", set);
+
+        set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(2, 3, 4, 5));
+        assertRanges("[[1..6], [9..10], [13..14]]", set);
+
+        set.addAll(MutableUnsignedLongSet.of(6, 7));
+        assertRanges("[[1..7], [9..10], [13..14]]", set);
+
+        set.addAll(MutableUnsignedLongSet.of(8));
+        assertRanges("[[1..10], [13..14]]", set);
+
+        set = MutableUnsignedLongSet.of();
+        set.addAll(MutableUnsignedLongSet.of(1, 2));
+        assertRanges("[[1..2]]", set);
+
+        set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(4, 5));
+        assertRanges("[[1..2], [4..6], [9..10], [13..14]]", set);
+
+        set.addAll(MutableUnsignedLongSet.of(12, 13, 14, 15));
+        assertRanges("[[1..2], [4..6], [9..10], [12..15]]", set);
+
+        set.addAll(MutableUnsignedLongSet.of(8, 9, 10, 11));
+        assertRanges("[[1..2], [4..6], [8..15]]", set);
+
+        set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16));
+        assertRanges("[[0..16]]", set);
+
+        set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3));
+        assertRanges("[[0..3], [5..6], [9..10], [13..14]]", set);
+
+        set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8));
+        assertRanges("[[0..10], [13..14]]", set);
+
+        set = sparseSet();
+        set.addAll(MutableUnsignedLongSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
+        assertRanges("[[0..10], [13..14]]", set);
+    }
+
+    private static MutableUnsignedLongSet sparseSet() {
+        final var ret = MutableUnsignedLongSet.of(1, 2, 5, 6, 9, 10, 13, 14);
+        assertRanges("[[1..2], [5..6], [9..10], [13..14]]", ret);
+        return ret;
+    }
+
+    private static void assertRanges(final String expected, final UnsignedLongSet set) {
+        assertEquals(expected, set.ranges().toString());
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImplTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/CDSShardAccessImplTest.java
deleted file mode 100644 (file)
index 9470682..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-
-import akka.actor.ActorRef;
-import akka.dispatch.Futures;
-import com.google.common.base.Optional;
-import java.util.concurrent.TimeUnit;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListenerRegistration;
-import org.opendaylight.controller.cluster.raft.LeadershipTransferFailedException;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import scala.concurrent.Future;
-import scala.concurrent.duration.FiniteDuration;
-
-public class CDSShardAccessImplTest extends AbstractActorTest {
-
-    private static final DOMDataTreeIdentifier TEST_ID =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
-    private CDSShardAccessImpl shardAccess;
-    private ActorUtils context;
-
-    @Before
-    public void setUp() {
-        context = mock(ActorUtils.class);
-        final DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
-        doReturn(Optional.of(getSystem().deadLetters())).when(context).findLocalShard(any());
-        doReturn(datastoreContext).when(context).getDatastoreContext();
-        doReturn(getSystem()).when(context).getActorSystem();
-        shardAccess = new CDSShardAccessImpl(TEST_ID, context);
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testRegisterLeaderLocationListener() {
-        final LeaderLocationListener listener1 = mock(LeaderLocationListener.class);
-
-        // first registration should be OK
-        shardAccess.registerLeaderLocationListener(listener1);
-
-        // second registration should fail with IllegalArgumentEx
-        try {
-            shardAccess.registerLeaderLocationListener(listener1);
-            fail("Should throw exception");
-        } catch (final Exception e) {
-            assertTrue(e instanceof IllegalArgumentException);
-        }
-
-        // null listener registration should fail with NPE
-        try {
-            shardAccess.registerLeaderLocationListener(null);
-            fail("Should throw exception");
-        } catch (final Exception e) {
-            assertTrue(e instanceof NullPointerException);
-        }
-
-        // registering listener on closed shard access should fail with IllegalStateEx
-        final LeaderLocationListener listener2 = mock(LeaderLocationListener.class);
-        shardAccess.close();
-        try {
-            shardAccess.registerLeaderLocationListener(listener2);
-            fail("Should throw exception");
-        } catch (final Exception ex) {
-            assertTrue(ex instanceof IllegalStateException);
-        }
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testOnLeaderLocationChanged() {
-        final LeaderLocationListener listener1 = mock(LeaderLocationListener.class);
-        doThrow(new RuntimeException("Failed")).when(listener1).onLeaderLocationChanged(any());
-        final LeaderLocationListener listener2 = mock(LeaderLocationListener.class);
-        doNothing().when(listener2).onLeaderLocationChanged(any());
-        final LeaderLocationListener listener3 = mock(LeaderLocationListener.class);
-        doNothing().when(listener3).onLeaderLocationChanged(any());
-
-        final LeaderLocationListenerRegistration<?> reg1 = shardAccess.registerLeaderLocationListener(listener1);
-        final LeaderLocationListenerRegistration<?> reg2 = shardAccess.registerLeaderLocationListener(listener2);
-        final LeaderLocationListenerRegistration<?> reg3 = shardAccess.registerLeaderLocationListener(listener3);
-
-        // Error in listener1 should not affect dispatching change to other listeners
-        shardAccess.onLeaderLocationChanged(LeaderLocation.LOCAL);
-        verify(listener1).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-        verify(listener2).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-        verify(listener3).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-
-        // Closed listeners shouldn't see new leader location changes
-        reg1.close();
-        reg2.close();
-        shardAccess.onLeaderLocationChanged(LeaderLocation.REMOTE);
-        verify(listener3).onLeaderLocationChanged(eq(LeaderLocation.REMOTE));
-        verifyNoMoreInteractions(listener1);
-        verifyNoMoreInteractions(listener2);
-
-        // Closed shard access should not dispatch any new events
-        shardAccess.close();
-        shardAccess.onLeaderLocationChanged(LeaderLocation.UNKNOWN);
-        verifyNoMoreInteractions(listener1);
-        verifyNoMoreInteractions(listener2);
-        verifyNoMoreInteractions(listener3);
-
-        reg3.close();
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testGetShardIdentifier() {
-        assertEquals(shardAccess.getShardIdentifier(), TEST_ID);
-
-        // closed shard access should throw illegal state
-        shardAccess.close();
-        try {
-            shardAccess.getShardIdentifier();
-            fail("Exception expected");
-        } catch (final Exception e) {
-            assertTrue(e instanceof IllegalStateException);
-        }
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testGetLeaderLocation() {
-        // new shard access does not know anything about leader location
-        assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.UNKNOWN);
-
-        // we start getting leader location changes notifications
-        shardAccess.onLeaderLocationChanged(LeaderLocation.LOCAL);
-        assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.LOCAL);
-
-        shardAccess.onLeaderLocationChanged(LeaderLocation.REMOTE);
-        shardAccess.onLeaderLocationChanged(LeaderLocation.UNKNOWN);
-        assertEquals(shardAccess.getLeaderLocation(), LeaderLocation.UNKNOWN);
-
-        // closed shard access throws illegal state
-        shardAccess.close();
-        try {
-            shardAccess.getLeaderLocation();
-            fail("Should have failed with IllegalStateEx");
-        } catch (Exception e) {
-            assertTrue(e instanceof IllegalStateException);
-        }
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testMakeLeaderLocal() throws Exception {
-        final FiniteDuration timeout = new FiniteDuration(5, TimeUnit.SECONDS);
-        final ActorRef localShardRef = mock(ActorRef.class);
-        final Future<ActorRef> localShardRefFuture = Futures.successful(localShardRef);
-        doReturn(localShardRefFuture).when(context).findLocalShardAsync(any());
-
-        // MakeLeaderLocal will reply with success
-        doReturn(Futures.successful(null)).when(context).executeOperationAsync((ActorRef) any(), any(), any());
-        doReturn(getSystem().dispatcher()).when(context).getClientDispatcher();
-        assertEquals(waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout), null);
-
-        // MakeLeaderLocal will reply with failure
-        doReturn(Futures.failed(new LeadershipTransferFailedException("Failure")))
-                .when(context).executeOperationAsync((ActorRef) any(), any(), any());
-
-        try {
-            waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout);
-            fail("makeLeaderLocal operation should not be successful");
-        } catch (final Exception e) {
-            assertTrue(e instanceof LeadershipTransferFailedException);
-        }
-
-        // we don't even find local shard
-        doReturn(Futures.failed(new LocalShardNotFoundException("Local shard not found")))
-                .when(context).findLocalShardAsync(any());
-
-        try {
-            waitOnAsyncTask(shardAccess.makeLeaderLocal(), timeout);
-            fail("makeLeaderLocal operation should not be successful");
-        } catch (final Exception e) {
-            assertTrue(e instanceof LeadershipTransferFailedException);
-            assertTrue(e.getCause() instanceof LocalShardNotFoundException);
-        }
-
-        // closed shard access should throw IllegalStateEx
-        shardAccess.close();
-        try {
-            shardAccess.makeLeaderLocal();
-            fail("Should have thrown IllegalStateEx. ShardAccess is closed");
-        } catch (final Exception e) {
-            assertTrue(e instanceof IllegalStateException);
-        }
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontendTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardFrontendTest.java
deleted file mode 100644 (file)
index fb701de..0000000
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright (c) 2016, 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.mdsal.dom.broker.ShardedDOMDataTree;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-
-public class DistributedShardFrontendTest {
-
-    private static final DOMDataTreeIdentifier ROOT =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
-    private static final ListenableFuture<Object> SUCCESS_FUTURE = Futures.immediateFuture(null);
-
-    private ShardedDOMDataTree shardedDOMDataTree;
-
-    private DataStoreClient client;
-    private ClientLocalHistory clientHistory;
-    private ClientTransaction clientTransaction;
-    private DOMDataTreeWriteCursor cursor;
-
-    private static final YangInstanceIdentifier OUTER_LIST_YID = TestModel.OUTER_LIST_PATH.node(
-            new NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1));
-    private static final DOMDataTreeIdentifier OUTER_LIST_ID =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, OUTER_LIST_YID);
-
-    @Captor
-    private ArgumentCaptor<YangInstanceIdentifier.PathArgument> pathArgumentCaptor;
-    @Captor
-    private ArgumentCaptor<NormalizedNode<?, ?>> nodeCaptor;
-
-    private DOMStoreThreePhaseCommitCohort commitCohort;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        shardedDOMDataTree = new ShardedDOMDataTree();
-        client = mock(DataStoreClient.class);
-        cursor = mock(DOMDataTreeWriteCursor.class);
-        clientTransaction = mock(ClientTransaction.class);
-        clientHistory = mock(ClientLocalHistory.class);
-        commitCohort = mock(DOMStoreThreePhaseCommitCohort.class);
-
-        doReturn(SUCCESS_FUTURE).when(commitCohort).canCommit();
-        doReturn(SUCCESS_FUTURE).when(commitCohort).preCommit();
-        doReturn(SUCCESS_FUTURE).when(commitCohort).commit();
-        doReturn(SUCCESS_FUTURE).when(commitCohort).abort();
-
-        doReturn(clientTransaction).when(client).createTransaction();
-        doReturn(clientTransaction).when(clientHistory).createTransaction();
-        doNothing().when(clientHistory).close();
-
-        doNothing().when(client).close();
-        doReturn(clientHistory).when(client).createLocalHistory();
-
-        doReturn(cursor).when(clientTransaction).openCursor();
-        doNothing().when(cursor).close();
-        doNothing().when(cursor).write(any(), any());
-        doNothing().when(cursor).merge(any(), any());
-        doNothing().when(cursor).delete(any());
-
-        doReturn(commitCohort).when(clientTransaction).ready();
-    }
-
-    @Test
-    public void testClientTransaction() throws Exception {
-
-        final DistributedDataStore distributedDataStore = mock(DistributedDataStore.class);
-        final ActorUtils context = mock(ActorUtils.class);
-        doReturn(context).when(distributedDataStore).getActorUtils();
-        doReturn(SchemaContextHelper.full()).when(context).getSchemaContext();
-
-        final DistributedShardFrontend rootShard = new DistributedShardFrontend(distributedDataStore, client, ROOT);
-
-        try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT))) {
-            shardedDOMDataTree.registerDataTreeShard(ROOT, rootShard, producer);
-        }
-
-        final DataStoreClient outerListClient = mock(DataStoreClient.class);
-        final ClientTransaction outerListClientTransaction = mock(ClientTransaction.class);
-        final ClientLocalHistory outerListClientHistory = mock(ClientLocalHistory.class);
-        final DOMDataTreeWriteCursor outerListCursor = mock(DOMDataTreeWriteCursor.class);
-
-        doNothing().when(outerListCursor).close();
-        doNothing().when(outerListCursor).write(any(), any());
-        doNothing().when(outerListCursor).merge(any(), any());
-        doNothing().when(outerListCursor).delete(any());
-
-        doReturn(outerListCursor).when(outerListClientTransaction).openCursor();
-        doReturn(outerListClientTransaction).when(outerListClient).createTransaction();
-        doReturn(outerListClientHistory).when(outerListClient).createLocalHistory();
-        doReturn(outerListClientTransaction).when(outerListClientHistory).createTransaction();
-
-        doReturn(commitCohort).when(outerListClientTransaction).ready();
-
-        doNothing().when(outerListClientHistory).close();
-        doNothing().when(outerListClient).close();
-
-        final DistributedShardFrontend outerListShard = new DistributedShardFrontend(
-                distributedDataStore, outerListClient, OUTER_LIST_ID);
-        try (DOMDataTreeProducer producer =
-                     shardedDOMDataTree.createProducer(Collections.singletonList(OUTER_LIST_ID))) {
-            shardedDOMDataTree.registerDataTreeShard(OUTER_LIST_ID, outerListShard, producer);
-        }
-
-        final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT));
-        final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
-        final DOMDataTreeWriteCursor txCursor = tx.createCursor(ROOT);
-
-        assertNotNull(txCursor);
-        txCursor.write(TestModel.TEST_PATH.getLastPathArgument(), createCrossShardContainer());
-
-        //check the lower shard got the correct modification
-        verify(outerListCursor, times(2)).write(pathArgumentCaptor.capture(), nodeCaptor.capture());
-
-        final YangInstanceIdentifier.PathArgument expectedYid = new NodeIdentifier(TestModel.ID_QNAME);
-        final YangInstanceIdentifier.PathArgument actualIdYid = pathArgumentCaptor.getAllValues().get(0);
-        assertEquals(expectedYid, actualIdYid);
-
-        final YangInstanceIdentifier.PathArgument expectedInnerYid = new NodeIdentifier(TestModel.INNER_LIST_QNAME);
-        final YangInstanceIdentifier.PathArgument actualInnerListYid = pathArgumentCaptor.getAllValues().get(1);
-        assertEquals(expectedInnerYid, actualInnerListYid);
-
-        final LeafNode<Integer> actualIdNode = (LeafNode<Integer>) nodeCaptor.getAllValues().get(0);
-        assertEquals(ImmutableNodes.leafNode(TestModel.ID_QNAME, 1), actualIdNode);
-
-        final MapNode actualInnerListNode = (MapNode) nodeCaptor.getAllValues().get(1);
-        assertEquals(createInnerMapNode(1), actualInnerListNode);
-
-        txCursor.close();
-        tx.commit().get();
-
-        verify(commitCohort, times(2)).canCommit();
-        verify(commitCohort, times(2)).preCommit();
-        verify(commitCohort, times(2)).commit();
-    }
-
-    private static MapNode createInnerMapNode(final int id) {
-        final MapEntryNode listEntry = ImmutableNodes
-                .mapEntryBuilder(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, "name-" + id)
-                .withChild(ImmutableNodes.leafNode(TestModel.NAME_QNAME, "name-" + id))
-                .withChild(ImmutableNodes.leafNode(TestModel.VALUE_QNAME, "value-" + id))
-                .build();
-
-        return ImmutableNodes.mapNodeBuilder(TestModel.INNER_LIST_QNAME).withChild(listEntry).build();
-    }
-
-    private static ContainerNode createCrossShardContainer() {
-
-        final MapEntryNode outerListEntry1 =
-                ImmutableNodes.mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)
-                        .withChild(createInnerMapNode(1))
-                        .build();
-        final MapEntryNode outerListEntry2 =
-                ImmutableNodes.mapEntryBuilder(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2)
-                        .withChild(createInnerMapNode(2))
-                        .build();
-
-        final MapNode outerList = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-                .withChild(outerListEntry1)
-                .withChild(outerListEntry2)
-                .build();
-
-        final ContainerNode testContainer = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                .withChild(outerList)
-                .build();
-
-        return testContainer;
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeRemotingTest.java
deleted file mode 100644 (file)
index a4bda16..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doReturn;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Address;
-import akka.actor.AddressFromURIString;
-import akka.cluster.Cluster;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.Lists;
-import com.typesafe.config.ConfigFactory;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardedDOMDataTreeRemotingTest extends AbstractTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
-
-    private static final Address MEMBER_1_ADDRESS =
-            AddressFromURIString.parse("akka://cluster-test@127.0.0.1:2558");
-
-    private static final DOMDataTreeIdentifier TEST_ID =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
-    private static final String MODULE_SHARDS_CONFIG = "module-shards-default.conf";
-
-    private ActorSystem leaderSystem;
-    private ActorSystem followerSystem;
-
-
-    private final Builder leaderDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
-
-    private final DatastoreContext.Builder followerDatastoreContextBuilder =
-            DatastoreContext.newBuilder().shardHeartbeatIntervalInMillis(100).shardElectionTimeoutFactor(5);
-
-    private DistributedDataStore leaderConfigDatastore;
-    private DistributedDataStore leaderOperDatastore;
-
-    private DistributedDataStore followerConfigDatastore;
-    private DistributedDataStore followerOperDatastore;
-
-
-    private IntegrationTestKit followerTestKit;
-    private IntegrationTestKit leaderTestKit;
-    private DistributedShardedDOMDataTree leaderShardFactory;
-
-    private DistributedShardedDOMDataTree followerShardFactory;
-    private ActorSystemProvider leaderSystemProvider;
-    private ActorSystemProvider followerSystemProvider;
-
-    @Before
-    public void setUp() {
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-
-        leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
-        Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
-
-        followerSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member2"));
-        Cluster.get(followerSystem).join(MEMBER_1_ADDRESS);
-
-        leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
-        doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
-
-        followerSystemProvider = Mockito.mock(ActorSystemProvider.class);
-        doReturn(followerSystem).when(followerSystemProvider).getActorSystem();
-
-    }
-
-    @After
-    public void tearDown() {
-        if (leaderConfigDatastore != null) {
-            leaderConfigDatastore.close();
-        }
-        if (leaderOperDatastore != null) {
-            leaderOperDatastore.close();
-        }
-
-        if (followerConfigDatastore != null) {
-            followerConfigDatastore.close();
-        }
-        if (followerOperDatastore != null) {
-            followerOperDatastore.close();
-        }
-
-        TestKit.shutdownActorSystem(leaderSystem, true);
-        TestKit.shutdownActorSystem(followerSystem, true);
-
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-    }
-
-    private void initEmptyDatastores() throws Exception {
-        initEmptyDatastores(MODULE_SHARDS_CONFIG);
-    }
-
-    private void initEmptyDatastores(final String moduleShardsConfig) throws Exception {
-        leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
-
-        leaderConfigDatastore = leaderTestKit.setupDistributedDataStore(
-                "config", moduleShardsConfig, true,
-                SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-        leaderOperDatastore = leaderTestKit.setupDistributedDataStore(
-                "operational", moduleShardsConfig, true,
-                SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
-        leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
-                leaderOperDatastore,
-                leaderConfigDatastore);
-
-        followerTestKit = new IntegrationTestKit(followerSystem, followerDatastoreContextBuilder);
-
-        followerConfigDatastore = followerTestKit.setupDistributedDataStore(
-                "config", moduleShardsConfig, true, SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-        followerOperDatastore = followerTestKit.setupDistributedDataStore(
-                "operational", moduleShardsConfig, true,
-                SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
-        followerShardFactory = new DistributedShardedDOMDataTree(followerSystemProvider,
-                followerOperDatastore,
-                followerConfigDatastore);
-
-        followerTestKit.waitForMembersUp("member-1");
-
-        LOG.info("Initializing leader DistributedShardedDOMDataTree");
-        leaderShardFactory.init();
-
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
-
-        leaderTestKit.waitUntilLeader(leaderOperDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
-
-        LOG.info("Initializing follower DistributedShardedDOMDataTree");
-        followerShardFactory.init();
-    }
-
-    @Test
-    public void testProducerRegistrations() throws Exception {
-        LOG.info("testProducerRegistrations starting");
-        initEmptyDatastores();
-
-        leaderTestKit.waitForMembersUp("member-2");
-
-        // TODO refactor shard creation and verification to own method
-        final DistributedShardRegistration shardRegistration =
-                waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                        TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                        DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
-        final ActorRef leaderShardManager = leaderConfigDatastore.getActorUtils().getShardManager();
-
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
-
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
-
-        final Set<String> peers  = new HashSet<>();
-        IntegrationTestKit.verifyShardState(leaderConfigDatastore,
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
-                        peers.addAll(onDemandShardState.getPeerAddresses().values()));
-        assertEquals(peers.size(), 1);
-
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-        try {
-            followerShardFactory.createProducer(Collections.singleton(TEST_ID));
-            fail("Producer should be already registered on the other node");
-        } catch (final IllegalArgumentException e) {
-            assertTrue(e.getMessage().contains("is attached to producer"));
-        }
-
-        producer.close();
-
-        final DOMDataTreeProducer followerProducer =
-                followerShardFactory.createProducer(Collections.singleton(TEST_ID));
-        try {
-            leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-            fail("Producer should be already registered on the other node");
-        } catch (final IllegalArgumentException e) {
-            assertTrue(e.getMessage().contains("is attached to producer"));
-        }
-
-        followerProducer.close();
-        // try to create a shard on an already registered prefix on follower
-        try {
-            waitOnAsyncTask(followerShardFactory.createDistributedShard(
-                    TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                    DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-            fail("This prefix already should have a shard registration that was forwarded from the other node");
-        } catch (final DOMDataTreeShardingConflictException e) {
-            assertTrue(e.getMessage().contains("is already occupied by another shard"));
-        }
-
-        shardRegistration.close().toCompletableFuture().get();
-
-        LOG.info("testProducerRegistrations ending");
-    }
-
-    @Test
-    public void testWriteIntoMultipleShards() throws Exception {
-        LOG.info("testWriteIntoMultipleShards starting");
-        initEmptyDatastores();
-
-        leaderTestKit.waitForMembersUp("member-2");
-
-        LOG.debug("registering first shard");
-        final DistributedShardRegistration shardRegistration =
-                waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                        TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                        DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-        findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
-        final Set<String> peers  = new HashSet<>();
-        IntegrationTestKit.verifyShardState(leaderConfigDatastore,
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
-                        peers.addAll(onDemandShardState.getPeerAddresses().values()));
-        assertEquals(peers.size(), 1);
-
-        LOG.debug("Got after waiting for nonleader");
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-
-        final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
-        final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
-        Assert.assertNotNull(cursor);
-        final YangInstanceIdentifier nameId =
-                YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
-        cursor.write(nameId.getLastPathArgument(),
-                ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
-                        new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build());
-
-        cursor.close();
-        LOG.warn("Got to pre submit");
-
-        tx.commit().get();
-
-        shardRegistration.close().toCompletableFuture().get();
-
-        LOG.info("testWriteIntoMultipleShards ending");
-    }
-
-    @Test
-    public void testMultipleShardRegistrations() throws Exception {
-        LOG.info("testMultipleShardRegistrations starting");
-        initEmptyDatastores();
-
-        final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH),
-                Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH),
-                Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH),
-                Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-        leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
-        // check leader has local shards
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
-
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
-
-        assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
-
-        // check follower has local shards
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
-
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
-
-        assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
-
-        LOG.debug("Closing registrations");
-
-        reg1.close().toCompletableFuture().get();
-        reg2.close().toCompletableFuture().get();
-        reg3.close().toCompletableFuture().get();
-        reg4.close().toCompletableFuture().get();
-
-        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-
-        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-
-        waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
-        LOG.debug("All leader shards gone");
-
-        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
-
-        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
-
-        waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
-
-        LOG.debug("All follower shards gone");
-        LOG.info("testMultipleShardRegistrations ending");
-    }
-
-    @Test
-    public void testMultipleRegistrationsAtOnePrefix() throws Exception {
-        LOG.info("testMultipleRegistrationsAtOnePrefix starting");
-        initEmptyDatastores();
-
-        for (int i = 0; i < 5; i++) {
-            LOG.info("Round {}", i);
-            final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                    TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)),
-                    DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-            leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-            assertNotNull(findLocalShard(leaderConfigDatastore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-            assertNotNull(findLocalShard(followerConfigDatastore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-
-            final Set<String> peers  = new HashSet<>();
-            IntegrationTestKit.verifyShardState(leaderConfigDatastore,
-                    ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState ->
-                            peers.addAll(onDemandShardState.getPeerAddresses().values()));
-            assertEquals(peers.size(), 1);
-
-            waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-            waitUntilShardIsDown(leaderConfigDatastore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-            waitUntilShardIsDown(followerConfigDatastore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-        }
-
-        LOG.info("testMultipleRegistrationsAtOnePrefix ending");
-    }
-
-    @Test
-    public void testInitialBootstrappingWithNoModuleShards() throws Exception {
-        LOG.info("testInitialBootstrappingWithNoModuleShards starting");
-        initEmptyDatastores("module-shards-default-member-1.conf");
-
-        // We just verify the DistributedShardedDOMDataTree initialized without error.
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/DistributedShardedDOMDataTreeTest.java
deleted file mode 100644 (file)
index d8551ae..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.sharding;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.anyCollection;
-import static org.mockito.ArgumentMatchers.anyMap;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.findLocalShard;
-import static org.opendaylight.controller.cluster.datastore.IntegrationTestKit.waitUntilShardIsDown;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Address;
-import akka.actor.AddressFromURIString;
-import akka.actor.Props;
-import akka.cluster.Cluster;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.Lists;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.typesafe.config.ConfigFactory;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import java.util.concurrent.CompletionStage;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
-import org.opendaylight.controller.cluster.datastore.AbstractTest;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext;
-import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
-import org.opendaylight.controller.cluster.datastore.DistributedDataStore;
-import org.opendaylight.controller.cluster.datastore.IntegrationTestKit;
-import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.controller.cluster.raft.utils.InMemoryJournal;
-import org.opendaylight.controller.cluster.raft.utils.InMemorySnapshotStore;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
-import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DistributedShardedDOMDataTreeTest extends AbstractTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DistributedShardedDOMDataTreeRemotingTest.class);
-
-    private static final Address MEMBER_1_ADDRESS =
-            AddressFromURIString.parse("akka.tcp://cluster-test@127.0.0.1:2558");
-
-    private static final DOMDataTreeIdentifier TEST_ID =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
-    private static final DOMDataTreeIdentifier INNER_LIST_ID =
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
-                    YangInstanceIdentifier.create(getOuterListIdFor(0).getPathArguments())
-                            .node(TestModel.INNER_LIST_QNAME));
-    private static final Set<MemberName> SINGLE_MEMBER = Collections.singleton(AbstractTest.MEMBER_NAME);
-
-    private static final String MODULE_SHARDS_CONFIG = "module-shards-default-member-1.conf";
-
-    private ActorSystem leaderSystem;
-
-    private final Builder leaderDatastoreContextBuilder =
-            DatastoreContext.newBuilder()
-                    .shardHeartbeatIntervalInMillis(100)
-                    .shardElectionTimeoutFactor(2)
-                    .logicalStoreType(LogicalDatastoreType.CONFIGURATION);
-
-    private DistributedDataStore leaderDistributedDataStore;
-    private DistributedDataStore operDistributedDatastore;
-    private IntegrationTestKit leaderTestKit;
-
-    private DistributedShardedDOMDataTree leaderShardFactory;
-
-    @Captor
-    private ArgumentCaptor<Collection<DataTreeCandidate>> captorForChanges;
-    @Captor
-    private ArgumentCaptor<Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>>> captorForSubtrees;
-
-    private ActorSystemProvider leaderSystemProvider;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-
-        leaderSystem = ActorSystem.create("cluster-test", ConfigFactory.load().getConfig("Member1"));
-        Cluster.get(leaderSystem).join(MEMBER_1_ADDRESS);
-
-        leaderSystemProvider = Mockito.mock(ActorSystemProvider.class);
-        doReturn(leaderSystem).when(leaderSystemProvider).getActorSystem();
-    }
-
-    @After
-    public void tearDown() {
-        if (leaderDistributedDataStore != null) {
-            leaderDistributedDataStore.close();
-        }
-
-        if (operDistributedDatastore != null) {
-            operDistributedDatastore.close();
-        }
-
-        TestKit.shutdownActorSystem(leaderSystem);
-
-        InMemoryJournal.clear();
-        InMemorySnapshotStore.clear();
-    }
-
-    private void initEmptyDatastores() throws Exception {
-        leaderTestKit = new IntegrationTestKit(leaderSystem, leaderDatastoreContextBuilder);
-
-        leaderDistributedDataStore = leaderTestKit.setupDistributedDataStore(
-                "config", MODULE_SHARDS_CONFIG, "empty-modules.conf", true,
-                SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
-        operDistributedDatastore = leaderTestKit.setupDistributedDataStore(
-                "operational", MODULE_SHARDS_CONFIG, "empty-modules.conf",true,
-                SchemaContextHelper.distributedShardedDOMDataTreeSchemaContext());
-
-        leaderShardFactory = new DistributedShardedDOMDataTree(leaderSystemProvider,
-                operDistributedDatastore,
-                leaderDistributedDataStore);
-
-        leaderShardFactory.init();
-    }
-
-
-    @Test
-    public void testWritesIntoDefaultShard() throws Exception {
-        initEmptyDatastores();
-
-        final DOMDataTreeIdentifier configRoot =
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
-
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(configRoot));
-
-        final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
-        final DOMDataTreeWriteCursor cursor =
-                tx.createCursor(new DOMDataTreeIdentifier(
-                        LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY));
-        Assert.assertNotNull(cursor);
-
-        final ContainerNode test =
-                ImmutableContainerNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).build();
-
-        cursor.write(test.getIdentifier(), test);
-        cursor.close();
-
-        tx.commit().get();
-    }
-
-    @Test
-    public void testSingleNodeWritesAndRead() throws Exception {
-        initEmptyDatastores();
-
-        final DistributedShardRegistration shardRegistration = waitOnAsyncTask(
-                leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
-
-        final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
-        final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
-        Assert.assertNotNull(cursor);
-        final YangInstanceIdentifier nameId =
-                YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
-        final LeafNode<String> valueToCheck = ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(
-                new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build();
-        LOG.debug("Writing data {} at {}, cursor {}", nameId.getLastPathArgument(), valueToCheck, cursor);
-        cursor.write(nameId.getLastPathArgument(),
-                valueToCheck);
-
-        cursor.close();
-        LOG.debug("Got to pre submit");
-
-        tx.commit().get();
-
-        final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
-        doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
-        leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID),
-                true, Collections.emptyList());
-
-        verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(),
-                captorForSubtrees.capture());
-        final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
-
-        final Optional<NormalizedNode<?, ?>> dataAfter =
-                capturedValue.get(0).iterator().next().getRootNode().getDataAfter();
-
-        final NormalizedNode<?,?> expected = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(valueToCheck).build();
-        assertEquals(expected, dataAfter.get());
-
-        verifyNoMoreInteractions(mockedDataTreeListener);
-
-        final String shardName = ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier());
-        LOG.debug("Creating distributed datastore client for shard {}", shardName);
-
-        final ActorUtils actorUtils = leaderDistributedDataStore.getActorUtils();
-        final Props distributedDataStoreClientProps =
-                SimpleDataStoreClientActor.props(actorUtils.getCurrentMemberName(), "Shard-" + shardName, actorUtils,
-                    shardName);
-
-        final ActorRef clientActor = leaderSystem.actorOf(distributedDataStoreClientProps);
-        final DataStoreClient distributedDataStoreClient = SimpleDataStoreClientActor
-                    .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
-
-        final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
-        final ClientTransaction tx2 = localHistory.createTransaction();
-        final FluentFuture<Optional<NormalizedNode<?, ?>>> read = tx2.read(YangInstanceIdentifier.EMPTY);
-
-        final Optional<NormalizedNode<?, ?>> optional = read.get();
-        tx2.abort();
-        localHistory.close();
-
-        shardRegistration.close().toCompletableFuture().get();
-
-    }
-
-    @Test
-    public void testMultipleWritesIntoSingleMapEntry() throws Exception {
-        initEmptyDatastores();
-
-        final DistributedShardRegistration shardRegistration = waitOnAsyncTask(
-                leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
-
-        LOG.warn("Got after waiting for nonleader");
-        final ActorRef leaderShardManager = leaderDistributedDataStore.getActorUtils().getShardManager();
-
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-        final YangInstanceIdentifier oid1 = getOuterListIdFor(0);
-        final DOMDataTreeIdentifier outerListPath = new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, oid1);
-
-        final DistributedShardRegistration outerListShardReg = waitOnAsyncTask(
-                leaderShardFactory.createDistributedShard(outerListPath, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(outerListPath.getRootIdentifier()));
-
-        final DOMDataTreeProducer shardProducer = leaderShardFactory.createProducer(
-                Collections.singletonList(outerListPath));
-
-        final DOMDataTreeCursorAwareTransaction tx = shardProducer.createTransaction(false);
-        final DOMDataTreeWriteCursor cursor =
-                tx.createCursor(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, oid1));
-        assertNotNull(cursor);
-
-        MapNode innerList = ImmutableMapNodeBuilder
-                .create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
-                .build();
-
-        cursor.write(new NodeIdentifier(TestModel.INNER_LIST_QNAME), innerList);
-        cursor.close();
-        tx.commit().get();
-
-        final ArrayList<ListenableFuture<?>> futures = new ArrayList<>();
-        for (int i = 0; i < 1000; i++) {
-            final Collection<MapEntryNode> innerListMapEntries = createInnerListMapEntries(1000, "run-" + i);
-            for (final MapEntryNode innerListMapEntry : innerListMapEntries) {
-                final DOMDataTreeCursorAwareTransaction tx1 = shardProducer.createTransaction(false);
-                final DOMDataTreeWriteCursor cursor1 = tx1.createCursor(
-                        new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
-                                oid1.node(new NodeIdentifier(TestModel.INNER_LIST_QNAME))));
-                cursor1.write(innerListMapEntry.getIdentifier(), innerListMapEntry);
-                cursor1.close();
-                futures.add(tx1.commit());
-            }
-        }
-
-        futures.get(futures.size() - 1).get();
-
-        final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
-        doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
-        leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(INNER_LIST_ID),
-                true, Collections.emptyList());
-
-        verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(),
-                captorForSubtrees.capture());
-        verifyNoMoreInteractions(mockedDataTreeListener);
-        final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
-
-        final NormalizedNode<?,?> expected =
-                ImmutableMapNodeBuilder
-                        .create()
-                        .withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
-                                // only the values from the last run should be present
-                        .withValue(createInnerListMapEntries(1000, "run-999"))
-                        .build();
-
-        assertEquals("List values dont match the expected values from the last run",
-                expected, capturedValue.get(0).iterator().next().getRootNode().getDataAfter().get());
-
-    }
-
-    // top level shard at TEST element, with subshards on each outer-list map entry
-    @Test
-    @Ignore
-    public void testMultipleShardLevels() throws Exception {
-        initEmptyDatastores();
-
-        final DistributedShardRegistration testShardReg = waitOnAsyncTask(
-                leaderShardFactory.createDistributedShard(TEST_ID, SINGLE_MEMBER),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        final ArrayList<DistributedShardRegistration> registrations = new ArrayList<>();
-        final int listSize = 5;
-        for (int i = 0; i < listSize; i++) {
-            final YangInstanceIdentifier entryYID = getOuterListIdFor(i);
-            final CompletionStage<DistributedShardRegistration> future = leaderShardFactory.createDistributedShard(
-                    new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, entryYID), SINGLE_MEMBER);
-
-            registrations.add(waitOnAsyncTask(future, DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION));
-        }
-
-        final DOMDataTreeIdentifier rootId =
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singletonList(
-                rootId));
-
-        DOMDataTreeCursorAwareTransaction transaction = producer.createTransaction(false);
-
-        DOMDataTreeWriteCursor cursor = transaction.createCursor(rootId);
-        assertNotNull(cursor);
-
-        final MapNode outerList =
-                ImmutableMapNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
-
-        final ContainerNode testNode =
-                ImmutableContainerNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                        .withChild(outerList)
-                        .build();
-
-        cursor.write(testNode.getIdentifier(), testNode);
-
-        cursor.close();
-        transaction.commit().get();
-
-        final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
-        doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
-
-        final MapNode wholeList = ImmutableMapNodeBuilder.create(outerList)
-                .withValue(createOuterEntries(listSize, "testing-values")).build();
-
-        transaction = producer.createTransaction(false);
-        cursor = transaction.createCursor(TEST_ID);
-        assertNotNull(cursor);
-
-        cursor.write(wholeList.getIdentifier(), wholeList);
-        cursor.close();
-
-        transaction.commit().get();
-
-        leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID),
-                true, Collections.emptyList());
-
-        verify(mockedDataTreeListener, timeout(35000).atLeast(2)).onDataTreeChanged(captorForChanges.capture(),
-                captorForSubtrees.capture());
-        verifyNoMoreInteractions(mockedDataTreeListener);
-        final List<Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>>> allSubtrees = captorForSubtrees.getAllValues();
-
-        final Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> lastSubtree = allSubtrees.get(allSubtrees.size() - 1);
-
-        final NormalizedNode<?, ?> actual = lastSubtree.get(TEST_ID);
-        assertNotNull(actual);
-
-        final NormalizedNode<?, ?> expected =
-                ImmutableContainerNodeBuilder.create()
-                        .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                        .withChild(ImmutableMapNodeBuilder.create(outerList)
-                                .withValue(createOuterEntries(listSize, "testing-values")).build())
-                        .build();
-
-
-        for (final DistributedShardRegistration registration : registrations) {
-            waitOnAsyncTask(registration.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-        }
-
-        waitOnAsyncTask(testShardReg.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        assertEquals(expected, actual);
-    }
-
-    @Test
-    public void testMultipleRegistrationsAtOnePrefix() throws Exception {
-        initEmptyDatastores();
-
-        for (int i = 0; i < 10; i++) {
-            LOG.debug("Round {}", i);
-            final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                    TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
-                    DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-            leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-            assertNotNull(findLocalShard(leaderDistributedDataStore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-            waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-            waitUntilShardIsDown(leaderDistributedDataStore.getActorUtils(),
-                    ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-        }
-    }
-
-    @Test
-    public void testCDSDataTreeProducer() throws Exception {
-        initEmptyDatastores();
-
-        final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(
-                TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)),
-                DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-
-        leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
-
-        assertNotNull(findLocalShard(leaderDistributedDataStore.getActorUtils(),
-                ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
-
-
-        final DOMDataTreeIdentifier configRoot =
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
-        final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(configRoot));
-
-        assertTrue(producer instanceof CDSDataTreeProducer);
-
-        final CDSDataTreeProducer cdsProducer = (CDSDataTreeProducer) producer;
-        CDSShardAccess shardAccess = cdsProducer.getShardAccess(TEST_ID);
-        assertEquals(shardAccess.getShardIdentifier(), TEST_ID);
-
-        shardAccess = cdsProducer.getShardAccess(INNER_LIST_ID);
-        assertEquals(TEST_ID, shardAccess.getShardIdentifier());
-
-        shardAccess = cdsProducer.getShardAccess(configRoot);
-        assertEquals(configRoot, shardAccess.getShardIdentifier());
-
-        waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
-    }
-
-    private static Collection<MapEntryNode> createOuterEntries(final int amount, final String valuePrefix) {
-        final Collection<MapEntryNode> ret = new ArrayList<>();
-        for (int i = 0; i < amount; i++) {
-            ret.add(ImmutableNodes.mapEntryBuilder()
-                    .withNodeIdentifier(new NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME,
-                            QName.create(TestModel.OUTER_LIST_QNAME, "id"), i))
-                    .withChild(ImmutableNodes
-                            .leafNode(QName.create(TestModel.OUTER_LIST_QNAME, "id"), i))
-                    .withChild(createWholeInnerList(amount, "outer id: " + i + " " + valuePrefix))
-                    .build());
-        }
-
-        return ret;
-    }
-
-    private static MapNode createWholeInnerList(final int amount, final String valuePrefix) {
-        return ImmutableMapNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.INNER_LIST_QNAME))
-                .withValue(createInnerListMapEntries(amount, valuePrefix)).build();
-    }
-
-    private static Collection<MapEntryNode> createInnerListMapEntries(final int amount, final String valuePrefix) {
-        final Collection<MapEntryNode> ret = new ArrayList<>();
-        for (int i = 0; i < amount; i++) {
-            ret.add(ImmutableNodes.mapEntryBuilder()
-                    .withNodeIdentifier(new NodeIdentifierWithPredicates(TestModel.INNER_LIST_QNAME,
-                            QName.create(TestModel.INNER_LIST_QNAME, "name"), Integer.toString(i)))
-                    .withChild(ImmutableNodes
-                            .leafNode(QName.create(TestModel.INNER_LIST_QNAME, "value"), valuePrefix + "-" + i))
-                    .build());
-        }
-
-        return ret;
-    }
-
-    private static YangInstanceIdentifier getOuterListIdFor(final int id) {
-        return TestModel.OUTER_LIST_PATH.node(new NodeIdentifierWithPredicates(
-                TestModel.OUTER_LIST_QNAME, QName.create(TestModel.OUTER_LIST_QNAME, "id"), id));
-    }
-}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActorTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/sharding/RoleChangeListenerActorTest.java
deleted file mode 100644 (file)
index ca8c05d..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.cluster.sharding;
-
-import static akka.actor.ActorRef.noSender;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.javadsl.TestKit;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.AbstractActorTest;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocation;
-import org.opendaylight.controller.cluster.dom.api.LeaderLocationListener;
-import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
-import org.opendaylight.controller.cluster.notifications.RegisterRoleChangeListener;
-
-public class RoleChangeListenerActorTest extends AbstractActorTest {
-
-    @Test
-    public void testRegisterRoleChangeListenerOnStart() {
-        final TestKit testKit = new TestKit(getSystem());
-        final LeaderLocationListener listener = mock(LeaderLocationListener.class);
-        final Props props = RoleChangeListenerActor.props(testKit.getRef(), listener);
-
-        getSystem().actorOf(props, "testRegisterRoleChangeListenerOnStart");
-        testKit.expectMsgClass(RegisterRoleChangeListener.class);
-    }
-
-    @Test
-    public void testOnDataTreeChanged() {
-        final LeaderLocationListener listener = mock(LeaderLocationListener.class);
-        doNothing().when(listener).onLeaderLocationChanged(any());
-        final Props props = RoleChangeListenerActor.props(getSystem().deadLetters(), listener);
-
-        final ActorRef subject = getSystem().actorOf(props, "testDataTreeChangedChanged");
-
-        subject.tell(new LeaderStateChanged("member-1", null, (short) 0), noSender());
-        verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.UNKNOWN));
-
-        subject.tell(new LeaderStateChanged("member-1", "member-1", (short) 0), noSender());
-        verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.LOCAL));
-
-        subject.tell(new LeaderStateChanged("member-1", "member-2", (short) 0), noSender());
-        verify(listener, timeout(5000)).onLeaderLocationChanged(eq(LeaderLocation.REMOTE));
-    }
-}
\ No newline at end of file
index f633804e66be3f1a5c1b6fe1a8c942f202732765..4b379461617eabc5c9cb18041be0579119fbb8bb 100644 (file)
@@ -7,17 +7,17 @@
  */
 package org.opendaylight.controller.md.cluster.datastore.model;
 
-import java.math.BigInteger;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.Uint64;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
 
 public final class CarsModel {
     public static final QName BASE_QNAME = QName.create(
@@ -32,53 +32,41 @@ public final class CarsModel {
     public static final YangInstanceIdentifier CAR_LIST_PATH = BASE_PATH.node(CAR_QNAME);
 
     private CarsModel() {
-
+        // Hidden on purpose
     }
 
-    public static NormalizedNode<?, ?> create() {
-
-        // Create a list builder
-        CollectionNodeBuilder<MapEntryNode, MapNode> cars =
-            ImmutableMapNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(
-                    CAR_QNAME));
-
-        // Create an entry for the car altima
-        MapEntryNode altima =
-            ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
-                .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
-                .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, new BigInteger("1000")))
-                .build();
-
-        // Create an entry for the car accord
-        MapEntryNode honda =
-            ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
-                .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
-                .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, new BigInteger("2000")))
-                .build();
-
-        cars.withChild(altima);
-        cars.withChild(honda);
-
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .withChild(cars.build())
+    public static ContainerNode create() {
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(Builders.mapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(CAR_QNAME))
+                // Create an entry for the car altima
+                .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "altima")
+                    .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "altima"))
+                    .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf(1000)))
+                    .build())
+                // Create an entry for the car accord
+                .withChild(ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, "accord")
+                    .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, "accord"))
+                    .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, Uint64.valueOf("2000")))
+                    .build())
+                .build())
             .build();
-
     }
 
-    public static NormalizedNode<?, ?> createEmptyCarsList() {
+    public static NormalizedNode createEmptyCarsList() {
         return newCarsNode(newCarsMapNode());
     }
 
     public static ContainerNode newCarsNode(final MapNode carsList) {
-        return ImmutableContainerNodeBuilder.create().withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(
-                BASE_QNAME)).withChild(carsList).build();
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(carsList)
+            .build();
     }
 
     public static MapNode newCarsMapNode(final MapEntryNode... carEntries) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> builder = ImmutableMapNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CAR_QNAME));
+        var builder = Builders.mapBuilder().withNodeIdentifier(new NodeIdentifier(CAR_QNAME));
         for (MapEntryNode e : carEntries) {
             builder.withChild(e);
         }
@@ -86,17 +74,15 @@ public final class CarsModel {
         return builder.build();
     }
 
-    public static NormalizedNode<?, ?> emptyContainer() {
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .build();
+    public static ContainerNode emptyContainer() {
+        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
     }
 
-    public static NormalizedNode<?, ?> newCarMapNode() {
+    public static SystemMapNode newCarMapNode() {
         return ImmutableNodes.mapNodeBuilder(CAR_QNAME).build();
     }
 
-    public static MapEntryNode newCarEntry(final String name, final BigInteger price) {
+    public static MapEntryNode newCarEntry(final String name, final Uint64 price) {
         return ImmutableNodes.mapEntryBuilder(CAR_QNAME, CAR_NAME_QNAME, name)
                 .withChild(ImmutableNodes.leafNode(CAR_NAME_QNAME, name))
                 .withChild(ImmutableNodes.leafNode(CAR_PRICE_QNAME, price)).build();
index 17f1c618bc730854f32701b14a1af2df80e42d75..582c5f99761fa148b79d8d2207ae7b4d918aabee 100644 (file)
@@ -10,36 +10,23 @@ package org.opendaylight.controller.md.cluster.datastore.model;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntry;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
 import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapNodeBuilder;
+import static org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes.leafNode;
 
-import java.util.HashSet;
-import java.util.Set;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetEntryNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafSetNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
 public final class CompositeModel {
+    public static final QName TEST_QNAME = QName.create(
+        "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13", "test");
 
-    public static final QName TEST_QNAME = QName
-            .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13", "test");
-
-    public static final QName AUG_QNAME = QName
-            .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:aug", "2014-03-13", "name");
+    public static final QName AUG_QNAME = QName.create(
+        "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:aug", "2014-03-13", "name");
 
     public static final QName AUG_CONTAINER = QName.create(AUG_QNAME, "aug-container");
     public static final QName AUG_INNER_CONTAINER = QName.create(AUG_QNAME, "aug-inner-container");
@@ -98,14 +85,14 @@ public final class CompositeModel {
     private static final String SECOND_GRAND_CHILD_NAME = "second grand child";
 
     private static final MapEntryNode BAR_NODE = mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, TWO_ID)
-            .withChild(mapNodeBuilder(INNER_LIST_QNAME)
-                    .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
-                    .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
-                    .build())
-            .build();
+        .withChild(mapNodeBuilder(INNER_LIST_QNAME)
+            .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_ONE_NAME))
+            .withChild(mapEntry(INNER_LIST_QNAME, NAME_QNAME, TWO_TWO_NAME))
+            .build())
+        .build();
 
     private CompositeModel() {
-
+        // Hidden on purpose
     }
 
     public static SchemaContext createTestContext() {
@@ -113,107 +100,49 @@ public final class CompositeModel {
             DATASTORE_TEST_NOTIFICATION_YANG);
     }
 
-    /**
-     * Returns a test document.
-     *
-     * <pre>
-     * test
-     *     outer-list
-     *          id 1
-     *     outer-list
-     *          id 2
-     *          inner-list
-     *                  name "one"
-     *          inner-list
-     *                  name "two"
-     *
-     * </pre>
-     */
-    public static NormalizedNode<?, ?> createDocumentOne(final SchemaContext schemaContext) {
-        return ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(schemaContext.getQName()))
-                .withChild(createTestContainer()).build();
-
-    }
-
     public static ContainerNode createTestContainer() {
-
-        final LeafSetEntryNode<Object> nike = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "nike"))
-                .withValue("nike").build();
-        final LeafSetEntryNode<Object> puma = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeWithValue<>(QName.create(TEST_QNAME, "shoe"), "puma"))
-                .withValue("puma").build();
-        final LeafSetNode<Object> shoes = ImmutableLeafSetNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(QName.create(TEST_QNAME, "shoe")))
-                .withChild(nike).withChild(puma).build();
-
-        final LeafSetEntryNode<Object> five = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeWithValue<>(QName.create(TEST_QNAME, "number"), 5))
-                .withValue(5).build();
-        final LeafSetEntryNode<Object> fifteen = ImmutableLeafSetEntryNodeBuilder.create()
-                .withNodeIdentifier(
-                        new YangInstanceIdentifier.NodeWithValue<>(QName.create(TEST_QNAME, "number"), 15))
-                .withValue(15).build();
-        final LeafSetNode<Object> numbers = ImmutableLeafSetNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(QName.create(TEST_QNAME, "number")))
-                .withChild(five).withChild(fifteen).build();
-
-        Set<QName> childAugmentations = new HashSet<>();
-        childAugmentations.add(AUG_QNAME);
-        final YangInstanceIdentifier.AugmentationIdentifier augmentationIdentifier =
-                new YangInstanceIdentifier.AugmentationIdentifier(childAugmentations);
-        final AugmentationNode augmentationNode = Builders.augmentationBuilder()
-                .withNodeIdentifier(augmentationIdentifier).withChild(ImmutableNodes.leafNode(AUG_QNAME, "First Test"))
-                .build();
-        return ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME))
-                .withChild(ImmutableNodes.leafNode(DESC_QNAME, DESC)).withChild(augmentationNode).withChild(shoes)
-                .withChild(numbers).withChild(mapNodeBuilder(OUTER_LIST_QNAME)
-                        .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID)).withChild(BAR_NODE).build())
-                .build();
-
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(leafNode(DESC_QNAME, DESC))
+            .withChild(leafNode(AUG_QNAME, "First Test"))
+            .withChild(ImmutableNodes.<String>newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "shoe")))
+                .withChildValue("nike")
+                .withChildValue("puma")
+                .build())
+            .withChild(ImmutableNodes.<Integer>newSystemLeafSetBuilder()
+                .withNodeIdentifier(new NodeIdentifier(QName.create(TEST_QNAME, "number")))
+                .withChildValue(5)
+                .withChildValue(15)
+                .build())
+            .withChild(mapNodeBuilder(OUTER_LIST_QNAME)
+                .withChild(mapEntry(OUTER_LIST_QNAME, ID_QNAME, ONE_ID))
+                .withChild(BAR_NODE)
+                .build())
+            .build();
     }
 
     public static ContainerNode createFamily() {
-        final DataContainerNodeAttrBuilder<YangInstanceIdentifier.NodeIdentifier, ContainerNode>
-            familyContainerBuilder = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                    new YangInstanceIdentifier.NodeIdentifier(FAMILY_QNAME));
-
-        final CollectionNodeBuilder<MapEntryNode, MapNode> childrenBuilder = mapNodeBuilder(CHILDREN_QNAME);
-
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            firstChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID);
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            secondChildBuilder = mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID);
-
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            firstGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    FIRST_GRAND_CHILD_ID);
-        final DataContainerNodeBuilder<YangInstanceIdentifier.NodeIdentifierWithPredicates, MapEntryNode>
-            secondGrandChildBuilder = mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME,
-                    SECOND_GRAND_CHILD_ID);
-
-        firstGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME));
-
-        secondGrandChildBuilder.withChild(ImmutableNodes.leafNode(GRAND_CHILD_NUMBER_QNAME, SECOND_GRAND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(GRAND_CHILD_NAME_QNAME, SECOND_GRAND_CHILD_NAME));
-
-        firstChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
-        secondChildBuilder.withChild(ImmutableNodes.leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
-                .withChild(ImmutableNodes.leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
-                .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChildBuilder.build()).build());
-
-        childrenBuilder.withChild(firstChildBuilder.build());
-        childrenBuilder.withChild(secondChildBuilder.build());
+        final var firstGrandChild =
+            mapEntryBuilder(GRAND_CHILDREN_QNAME, GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID)
+                .withChild(leafNode(GRAND_CHILD_NUMBER_QNAME, FIRST_GRAND_CHILD_ID))
+                .withChild(leafNode(GRAND_CHILD_NAME_QNAME, FIRST_GRAND_CHILD_NAME))
+                .build();
 
-        return familyContainerBuilder.withChild(childrenBuilder.build()).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(FAMILY_QNAME))
+            .withChild(mapNodeBuilder(CHILDREN_QNAME)
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, FIRST_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, FIRST_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, FIRST_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+                    .build())
+                .withChild(mapEntryBuilder(CHILDREN_QNAME, CHILD_NUMBER_QNAME, SECOND_CHILD_ID)
+                    .withChild(leafNode(CHILD_NUMBER_QNAME, SECOND_CHILD_ID))
+                    .withChild(leafNode(CHILD_NAME_QNAME, SECOND_CHILD_NAME))
+                    .withChild(mapNodeBuilder(GRAND_CHILDREN_QNAME).withChild(firstGrandChild).build())
+                    .build())
+                .build())
+            .build();
     }
-
 }
index 33ee0a38afe9a4c2ee186f6bcc35e525e3a2c6ec..725faf56f1e5663eb66c9936738495e9683167d3 100644 (file)
@@ -7,15 +7,15 @@
  */
 package org.opendaylight.controller.md.cluster.datastore.model;
 
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
+
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableMapNodeBuilder;
+import org.opendaylight.yangtools.yang.data.api.schema.SystemMapNode;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 
 public final class PeopleModel {
     public static final QName BASE_QNAME = QName.create(
@@ -30,59 +30,45 @@ public final class PeopleModel {
     public static final YangInstanceIdentifier PERSON_LIST_PATH = BASE_PATH.node(PERSON_QNAME);
 
     private PeopleModel() {
-
+        // Hidden on purpose
     }
 
-    public static NormalizedNode<?, ?> create() {
-
-        // Create a list builder
-        CollectionNodeBuilder<MapEntryNode, MapNode> cars =
-            ImmutableMapNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(
-                    PERSON_QNAME));
-
-        // Create an entry for the person jack
-        MapEntryNode jack =
-            ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
-                .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
-                .build();
-
-        // Create an entry for the person jill
-        MapEntryNode jill =
-            ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
-                .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
-                .build();
-
-        cars.withChild(jack);
-        cars.withChild(jill);
-
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .withChild(cars.build())
+    public static ContainerNode create() {
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(BASE_QNAME))
+            .withChild(ImmutableNodes.newSystemMapBuilder()
+                .withNodeIdentifier(new NodeIdentifier(PERSON_QNAME))
+                // Create an entry for the person jack
+                .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jack")
+                    .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jack"))
+                    .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 100L))
+                    .build())
+                // Create an entry for the person jill
+                .withChild(mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, "jill")
+                    .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, "jill"))
+                    .withChild(ImmutableNodes.leafNode(PERSON_AGE_QNAME, 200L))
+                    .build())
+                .build())
             .build();
-
     }
 
-    public static NormalizedNode<?, ?> emptyContainer() {
-        return ImmutableContainerNodeBuilder.create()
-            .withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(BASE_QNAME))
-            .build();
+    public static ContainerNode emptyContainer() {
+        return ImmutableNodes.newContainerBuilder().withNodeIdentifier(new NodeIdentifier(BASE_QNAME)).build();
     }
 
-    public static NormalizedNode<?, ?> newPersonMapNode() {
-        return ImmutableNodes.mapNodeBuilder(PERSON_QNAME).build();
+    public static SystemMapNode newPersonMapNode() {
+        return ImmutableNodes.newSystemMapBuilder().withNodeIdentifier(new NodeIdentifier(PERSON_QNAME)).build();
     }
 
     public static MapEntryNode newPersonEntry(final String name) {
-        return ImmutableNodes.mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
-                .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name)).build();
+        return mapEntryBuilder(PERSON_QNAME, PERSON_NAME_QNAME, name)
+            .withChild(ImmutableNodes.leafNode(PERSON_NAME_QNAME, name))
+            .build();
     }
 
     public static YangInstanceIdentifier newPersonPath(final String name) {
         return YangInstanceIdentifier.builder(PERSON_LIST_PATH)
-                .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name).build();
+            .nodeWithKey(PERSON_QNAME, PERSON_NAME_QNAME, name)
+            .build();
     }
 }
index 7f8b7483a42a62013a38fad7b1a2208373bba5b2..378c9089b70a40b1cd5d7eb2b3677d231249a43f 100644 (file)
@@ -5,12 +5,10 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.md.cluster.datastore.model;
 
-import java.io.File;
 import java.io.InputStream;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
 public final class SchemaContextHelper {
@@ -19,6 +17,8 @@ public final class SchemaContextHelper {
     public static final String PEOPLE_YANG = "/people.yang";
     public static final String CARS_YANG = "/cars.yang";
 
+    private static volatile EffectiveModelContext FULL;
+
     private SchemaContextHelper() {
 
     }
@@ -27,22 +27,21 @@ public final class SchemaContextHelper {
         return SchemaContextHelper.class.getResourceAsStream(yangFileName);
     }
 
-    public static SchemaContext full() {
-        return select(ODL_DATASTORE_TEST_YANG, PEOPLE_YANG, CARS_YANG);
+    public static EffectiveModelContext full() {
+        EffectiveModelContext ret = FULL;
+        if (ret == null) {
+            synchronized (SchemaContextHelper.class) {
+                ret = FULL;
+                if (ret == null) {
+                    ret = FULL = select(ODL_DATASTORE_TEST_YANG, PEOPLE_YANG, CARS_YANG);
+                }
+            }
+        }
+
+        return ret;
     }
 
-    public static SchemaContext select(final String... schemaFiles) {
+    public static EffectiveModelContext select(final String... schemaFiles) {
         return YangParserTestUtils.parseYangResources(SchemaContextHelper.class, schemaFiles);
     }
-
-    public static SchemaContext distributedShardedDOMDataTreeSchemaContext() {
-        // we need prefix-shard-configuration and odl-datastore-test models
-        // for DistributedShardedDOMDataTree tests
-        return YangParserTestUtils.parseYangResources(SchemaContextHelper.class, ODL_DATASTORE_TEST_YANG,
-            "/META-INF/yang/prefix-shard-configuration@2017-01-10.yang");
-    }
-
-    public static SchemaContext entityOwners() {
-        return YangParserTestUtils.parseYangFiles(new File("src/main/yang/entity-owners.yang"));
-    }
 }
index 1f7c3fd67fdc9cd5231b010f557c582d2f7dc79e..31a928256162e10e91b0479752b2279425bbf532 100644 (file)
@@ -9,15 +9,14 @@ package org.opendaylight.controller.md.cluster.datastore.model;
 
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
 public final class TestModel {
@@ -56,16 +55,16 @@ public final class TestModel {
 
     }
 
-    public static SchemaContext createTestContext() {
+    public static EffectiveModelContext createTestContext() {
         return YangParserTestUtils.parseYangResource(DATASTORE_TEST_YANG);
     }
 
-    public static DataContainerChild<?, ?> outerMapNode() {
+    public static DataContainerChild outerMapNode() {
         return ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME).build();
     }
 
-    public static DataContainerChild<?, ?> outerNode(final int... ids) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+    public static DataContainerChild outerNode(final int... ids) {
+        var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
         for (int id: ids) {
             outer.addChild(ImmutableNodes.mapEntry(OUTER_LIST_QNAME, ID_QNAME, id));
         }
@@ -73,8 +72,8 @@ public final class TestModel {
         return outer.build();
     }
 
-    public static DataContainerChild<?, ?> outerNode(final MapEntryNode... entries) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
+    public static DataContainerChild outerNode(final MapEntryNode... entries) {
+        var outer = ImmutableNodes.mapNodeBuilder(OUTER_LIST_QNAME);
         for (MapEntryNode e: entries) {
             outer.addChild(e);
         }
@@ -82,8 +81,8 @@ public final class TestModel {
         return outer.build();
     }
 
-    public static DataContainerChild<?, ?> innerNode(final String... names) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
+    public static DataContainerChild innerNode(final String... names) {
+        var outer = ImmutableNodes.mapNodeBuilder(INNER_LIST_QNAME);
         for (String name: names) {
             outer.addChild(ImmutableNodes.mapEntry(INNER_LIST_QNAME, NAME_QNAME, name));
         }
@@ -91,21 +90,23 @@ public final class TestModel {
         return outer.build();
     }
 
-    public static MapEntryNode outerNodeEntry(final int id, final DataContainerChild<?, ?> inner) {
+    public static MapEntryNode outerNodeEntry(final int id, final DataContainerChild inner) {
         return ImmutableNodes.mapEntryBuilder(OUTER_LIST_QNAME, ID_QNAME, id).addChild(inner).build();
     }
 
-    public static NormalizedNode<?, ?> testNodeWithOuter(final int... ids) {
+    public static ContainerNode testNodeWithOuter(final int... ids) {
         return testNodeWithOuter(outerNode(ids));
     }
 
-    public static NormalizedNode<?, ?> testNodeWithOuter(final DataContainerChild<?, ?> outer) {
-        return ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-                new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME)).withChild(outer).build();
+    public static ContainerNode testNodeWithOuter(final DataContainerChild outer) {
+        return Builders.containerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_QNAME))
+            .withChild(outer)
+            .build();
     }
 
     public static NodeIdentifierWithPredicates outerEntryKey(final int id) {
-        return new NodeIdentifierWithPredicates(OUTER_LIST_QNAME, ID_QNAME, id);
+        return NodeIdentifierWithPredicates.of(OUTER_LIST_QNAME, ID_QNAME, id);
     }
 
     public static YangInstanceIdentifier outerEntryPath(final int id) {
@@ -113,7 +114,7 @@ public final class TestModel {
     }
 
     public static NodeIdentifierWithPredicates innerEntryKey(final String name) {
-        return new NodeIdentifierWithPredicates(INNER_LIST_QNAME, NAME_QNAME, name);
+        return NodeIdentifierWithPredicates.of(INNER_LIST_QNAME, NAME_QNAME, name);
     }
 
     public static YangInstanceIdentifier innerEntryPath(final int id, final String name) {
index abefdf0ce94006297264a7ed25d126240a7514b4..a24085e13440a6cdc01224e0ae9098372aec7d36 100644 (file)
@@ -1,6 +1,10 @@
 akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
@@ -51,7 +55,11 @@ test-config {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
 
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
     loglevel = "INFO"
 
     actor {
@@ -68,11 +76,19 @@ test-config {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2565
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2565
+        transport = tcp
       }
 
       netty.tcp {
@@ -82,7 +98,6 @@ test-config {
     }
 
     cluster {
-      auto-down-unreachable-after = 100s
       retry-unsuccessful-join-after = 100ms
 
       roles = [
@@ -117,6 +132,11 @@ Member1 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -134,11 +154,19 @@ Member1 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2558
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2558
+        transport = tcp
       }
 
       netty.tcp {
@@ -182,6 +210,11 @@ Member2 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     actor {
       provider = "akka.cluster.ClusterActorRefProvider"
@@ -197,11 +230,19 @@ Member2 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2559
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2559
+        transport = tcp
       }
 
       netty.tcp {
@@ -245,6 +286,11 @@ Member3 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -262,11 +308,19 @@ Member3 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2557
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2557
+        transport = tcp
       }
 
       netty.tcp {
@@ -310,6 +364,11 @@ Member4 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -327,11 +386,19 @@ Member4 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2560
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2560
+        transport = tcp
       }
 
       netty.tcp {
@@ -375,6 +442,11 @@ Member5 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -392,11 +464,19 @@ Member5 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2561
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2561
+        transport = tcp
       }
 
       netty.tcp {
@@ -440,6 +520,11 @@ Member256 {
   akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
     persistence.journal.plugin = "in-memory-journal"
+    coordinated-shutdown.run-by-actor-system-terminate = off
+
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -457,11 +542,19 @@ Member256 {
       warn-about-java-serializer-usage = false
     }
     remote {
+      classic {
+        netty.tcp {
+          hostname = "127.0.0.1"
+          port = 2562
+        }
+      }
+
       log-remote-lifecycle-events = off
       artery {
         enabled = on
         canonical.hostname = "127.0.0.1"
         canonical.port = 2562
+        transport = tcp
       }
 
       netty.tcp {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedJournalExport.json b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedJournalExport.json
new file mode 100644 (file)
index 0000000..d534372
--- /dev/null
@@ -0,0 +1 @@
+{"Entries":[{"Entry":[{"Node":[{"Path":"/"},{"ModificationType":"UNMODIFIED"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=1}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=2}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=3}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=4}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=5}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=6}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=7}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=8}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=9}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=10}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=11}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=12}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=13}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=14}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=15}]"}]}]},{"Entry":[{"Node":[{"Path":"/(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)test/outer-list"},{"ModificationType":"WRITE"},{"Data":"[ImmutableLeafNode{name=(urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test?revision=2014-03-13)id, body=16}]"}]}]}]}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedSnapshotExport.json b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/expectedSnapshotExport.json
new file mode 100644 (file)
index 0000000..e8be65b
--- /dev/null
@@ -0,0 +1 @@
+{"odl-datastore-test:test":{}}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1-and-2.conf b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1-and-2.conf
new file mode 100644 (file)
index 0000000..efc3dd4
--- /dev/null
@@ -0,0 +1,26 @@
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default",
+                replicas = [
+                    "member-1",
+                    "member-2"
+                ]
+            }
+        ]
+    },
+    {
+        name = "cars"
+        shards = [
+            {
+                name="cars"
+                replicas = [
+                    "member-1",
+                    "member-2"
+                ]
+            }
+        ]
+    }
+]
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1.conf b/opendaylight/md-sal/sal-distributed-datastore/src/test/resources/module-shards-default-cars-member1.conf
new file mode 100644 (file)
index 0000000..109ff58
--- /dev/null
@@ -0,0 +1,24 @@
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default",
+                replicas = [
+                    "member-1",
+                ]
+            }
+        ]
+    },
+    {
+        name = "cars"
+        shards = [
+            {
+                name="cars"
+                replicas = [
+                    "member-1"
+                ]
+            }
+        ]
+    }
+]
\ No newline at end of file
index c667887b54d2b3e8df4c11fe9051057a383cb15b..7a0b83212f669b33b7092f0fead633064b065bb4 100644 (file)
@@ -32,6 +32,9 @@ Member1 {
         }
       }
     }
+    persistence.non-persistent.journal {
+      class = "org.opendaylight.controller.cluster.raft.utils.InMemoryJournal"
+    }
 
     loglevel = "INFO"
 
@@ -70,4 +73,4 @@ Member1 {
       ]
     }
   }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-dom-api/.gitignore b/opendaylight/md-sal/sal-dom-api/.gitignore
deleted file mode 100644 (file)
index ea8c4bf..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/target
diff --git a/opendaylight/md-sal/sal-dom-api/pom.xml b/opendaylight/md-sal/sal-dom-api/pom.xml
deleted file mode 100644 (file)
index f178428..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-core-api</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-model-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-test-model</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/ClusteredDOMDataTreeChangeListener.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/ClusteredDOMDataTreeChangeListener.java
deleted file mode 100644 (file)
index 3fdc1b4..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-/**
- * ClusteredDOMDataTreeChangeListener is a marker interface to enable data tree change notifications on all
- * instances in a cluster where this listener is registered.
- *
- * <p>
- * Applications should implement ClusteredDOMDataTreeChangeListener instead of {@link DOMDataTreeChangeListener},
- * if they want to listen for data tree change notifications on any node of a clustered data store.
- * {@link DOMDataTreeChangeListener} enables notifications only at the leader of the data store.
- *
- * @author Thomas Pantelis
- */
-public interface ClusteredDOMDataTreeChangeListener extends DOMDataTreeChangeListener {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionProviderService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionProviderService.java
deleted file mode 100644 (file)
index 7678d4d..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Bridge to action registration.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMActionProviderService} instead
- */
-@Deprecated
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMActionProviderService extends DOMService, org.opendaylight.mdsal.dom.api.DOMActionProviderService {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMActionService.java
deleted file mode 100644 (file)
index 838e71f..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Bridge to action invocation.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMActionService} instead
- */
-@Deprecated
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMActionService extends DOMService, org.opendaylight.mdsal.dom.api.DOMActionService {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBroker.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBroker.java
deleted file mode 100644 (file)
index 2f123cd..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainFactory;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Data Broker which provides data transaction and data change listener functionality
- * using {@link NormalizedNode} data format.
- *
- * <p>
- * This interface is type capture of generic interfaces and returns type captures
- * of results for client-code convenience.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMDataBroker} instead
- */
-@Deprecated
-public interface DOMDataBroker extends
-        AsyncDataBroker<YangInstanceIdentifier, NormalizedNode<?, ?>>,
-        TransactionChainFactory<YangInstanceIdentifier, NormalizedNode<?, ?>>,
-            DOMExtensibleService<DOMDataBroker, DOMDataBrokerExtension> {
-
-    @Override
-    DOMDataReadOnlyTransaction newReadOnlyTransaction();
-
-    @Override
-    DOMDataReadWriteTransaction newReadWriteTransaction();
-
-    @Override
-    DOMDataWriteTransaction newWriteOnlyTransaction();
-
-    @Override
-    DOMTransactionChain createTransactionChain(TransactionChainListener listener);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBrokerExtension.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataBrokerExtension.java
deleted file mode 100644 (file)
index abbcc40..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-/**
- * Type capture of a {@link DOMServiceExtension} applicable to {@link DOMDataBroker}
- * implementations.
- */
-public interface DOMDataBrokerExtension extends DOMServiceExtension<DOMDataBroker, DOMDataBrokerExtension> {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadOnlyTransaction.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadOnlyTransaction.java
deleted file mode 100644 (file)
index 53af455..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadOnlyTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public interface DOMDataReadOnlyTransaction extends DOMDataReadTransaction,
-        AsyncReadOnlyTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadTransaction.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadTransaction.java
deleted file mode 100644 (file)
index 0517ad6..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A transaction that provides read access to a logical data store.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in {@link AsyncReadTransaction}.
- */
-public interface DOMDataReadTransaction extends AsyncReadTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-    /**
-     * Reads data from provided logical data store located at the provided path.
-     *
-     * <p>
-     * If the target is a subtree, then the whole subtree is read (and will be
-     * accessible from the returned data object).
-     *
-     * @param store
-     *            Logical data store from which read should occur.
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            read
-     * @return a CheckFuture containing the result of the read. The Future blocks until the
-     *         commit operation is complete. Once complete:
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns an Optional object
-     *         containing the data.</li>
-     *         <li>If the data at the supplied path does not exist, the Future returns
-     *         Optional#absent().</li>
-     *         <li>If the read of the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
-            LogicalDatastoreType store, YangInstanceIdentifier path);
-
-    /**
-     * Checks if data is available in the logical data store located at provided path.
-     *
-     * <p>
-     * Note: a successful result from this method makes no guarantee that a subsequent call to {@link #read}
-     * will succeed. It is possible that the data resides in a data store on a remote node and, if that
-     * node goes down or a network failure occurs, a subsequent read would fail. Another scenario is if
-     * the data is deleted in between the calls to <code>exists</code> and <code>read</code>
-     *
-     * @param store
-     *            Logical data store from which read should occur.
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            check existence of
-     * @return a CheckFuture containing the result of the check.
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns a Boolean
-     *         whose value is true, false otherwise</li>
-     *         <li>If checking for the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store, YangInstanceIdentifier path);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadWriteTransaction.java
deleted file mode 100644 (file)
index 39e2ad3..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public interface DOMDataReadWriteTransaction extends DOMDataReadTransaction, DOMDataWriteTransaction,
-        AsyncReadWriteTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeListener.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeListener.java
deleted file mode 100644 (file)
index 53ced96..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import java.util.Collection;
-import java.util.EventListener;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-
-/**
- * Interface implemented by classes interested in receiving notifications about
- * data tree changes. This interface provides a cursor-based view of the change, which has potentially
- * lower overhead.
- *
- * <p>
- * Note: this interface enables notifications only at the leader of the data store, if clustered. If you want
- * notifications on all instances in a cluster, use the {@link ClusteredDOMDataTreeChangeListener}.
- */
-public interface DOMDataTreeChangeListener extends EventListener {
-    /**
-     * Invoked when there was data change for the supplied path, which was used
-     * to register this listener.
-     *
-     * <p>
-     * This method may be also invoked during registration of the listener if
-     * there is any pre-existing data in the conceptual data tree for supplied
-     * path. This initial event will contain all pre-existing data as created.
-     *
-     * <p>
-     * A data change event may be triggered spuriously, e.g. such that data before
-     * and after compare as equal. Implementations of this interface are expected
-     * to recover from such events. Event producers are expected to exert reasonable
-     * effort to suppress such events.
-     *
-     * <p>
-     * In other words, it is completely acceptable to observe
-     * a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode},
-     * which reports a {@link org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType}
-     * other than UNMODIFIED, while the before- and after- data items compare as
-     * equal.
-     *
-     * @param changes Collection of change events, may not be null or empty.
-     */
-    void onDataTreeChanged(@NonNull Collection<DataTreeCandidate> changes);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeChangeService.java
deleted file mode 100644 (file)
index 28590fc..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * A {@link DOMServiceExtension} which allows users to register for changes to a
- * subtree.
- */
-public interface DOMDataTreeChangeService extends DOMDataBrokerExtension {
-    /**
-     * Registers a {@link DOMDataTreeChangeListener} to receive
-     * notifications when data changes under a given path in the conceptual data
-     * tree.
-     *
-     * <p>
-     * You are able to register for notifications  for any node or subtree
-     * which can be represented using {@link DOMDataTreeIdentifier}.
-     *
-     * <p>
-     * You are able to register for data change notifications for a subtree or leaf
-     * even if it does not exist. You will receive notification once that node is
-     * created.
-     *
-     * <p>
-     * If there is any pre-existing data in the data tree for the path for which you are
-     * registering, you will receive an initial data change event, which will
-     * contain all pre-existing data, marked as created.
-     *
-     * <p>
-     * This method returns a {@link ListenerRegistration} object. To
-     * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
-     * method on the returned object.
-     *
-     * <p>
-     * You MUST explicitly unregister your listener when you no longer want to receive
-     * notifications. This is especially true in OSGi environments, where failure to
-     * do so during bundle shutdown can lead to stale listeners being still registered.
-     *
-     * @param treeId
-     *            Data tree identifier of the subtree which should be watched for
-     *            changes.
-     * @param listener
-     *            Listener instance which is being registered
-     * @return Listener registration object, which may be used to unregister
-     *         your listener using {@link ListenerRegistration#close()} to stop
-     *         delivery of change events.
-     */
-    @NonNull <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-            @NonNull DOMDataTreeIdentifier treeId, @NonNull L listener);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeCommitCohortRegistry.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeCommitCohortRegistry.java
deleted file mode 100644 (file)
index d3858dc..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2017 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Interface for a DOM commit cohort registry.
- *
- * @author Thomas Pantelis
- */
-@SuppressFBWarnings("NM_SAME_SIMPLE_NAME_AS_INTERFACE")
-public interface DOMDataTreeCommitCohortRegistry extends DOMDataBrokerExtension,
-        org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry {
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeIdentifier.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeIdentifier.java
deleted file mode 100644 (file)
index 9ed435a..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import java.io.Serializable;
-import java.util.Iterator;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-
-/**
- * A unique identifier for a particular subtree. It is composed of the logical
- * data store type and the instance identifier of the root node.
- */
-public final class DOMDataTreeIdentifier implements Immutable,
-        Path<DOMDataTreeIdentifier>, Serializable, Comparable<DOMDataTreeIdentifier> {
-    private static final long serialVersionUID = 1L;
-    private final @NonNull YangInstanceIdentifier rootIdentifier;
-    private final @NonNull LogicalDatastoreType datastoreType;
-
-    public DOMDataTreeIdentifier(final LogicalDatastoreType datastoreType,
-            final YangInstanceIdentifier rootIdentifier) {
-        this.datastoreType = requireNonNull(datastoreType);
-        this.rootIdentifier = requireNonNull(rootIdentifier);
-    }
-
-    /**
-     * Return a counterpart to an MD-SAL data tree identifier.
-     *
-     * @return Controller data tree identifier.
-     */
-    public static DOMDataTreeIdentifier fromMdsal(final org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier mdsal) {
-        return new DOMDataTreeIdentifier(LogicalDatastoreType.fromMdsal(mdsal.getDatastoreType()),
-            mdsal.getRootIdentifier());
-    }
-
-    /**
-     * Return MD-SAL counterpart of this object.
-     *
-     * @return MD-SAL data tree identifier.
-     */
-    public org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier toMdsal() {
-        return new org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier(datastoreType.toMdsal(), rootIdentifier);
-    }
-
-    /**
-     * Return the logical data store type.
-     *
-     * @return Logical data store type. Guaranteed to be non-null.
-     */
-    public @NonNull LogicalDatastoreType getDatastoreType() {
-        return datastoreType;
-    }
-
-    /**
-     * Return the {@link YangInstanceIdentifier} of the root node.
-     *
-     * @return Instance identifier corresponding to the root node.
-     */
-    public @NonNull YangInstanceIdentifier getRootIdentifier() {
-        return rootIdentifier;
-    }
-
-    @Override
-    public boolean contains(final DOMDataTreeIdentifier other) {
-        return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
-    }
-
-    @Override
-    public int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + datastoreType.hashCode();
-        result = prime * result + rootIdentifier.hashCode();
-        return result;
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof DOMDataTreeIdentifier)) {
-            return false;
-        }
-        DOMDataTreeIdentifier other = (DOMDataTreeIdentifier) obj;
-        if (datastoreType != other.datastoreType) {
-            return false;
-        }
-        return rootIdentifier.equals(other.rootIdentifier);
-    }
-
-    @Override
-    public int compareTo(final DOMDataTreeIdentifier obj) {
-        int cmp = datastoreType.compareTo(obj.datastoreType);
-        if (cmp != 0) {
-            return cmp;
-        }
-
-        final Iterator<PathArgument> mi = rootIdentifier.getPathArguments().iterator();
-        final Iterator<PathArgument> oi = obj.rootIdentifier.getPathArguments().iterator();
-
-        while (mi.hasNext()) {
-            if (!oi.hasNext()) {
-                return 1;
-            }
-
-            final PathArgument ma = mi.next();
-            final PathArgument oa = oi.next();
-            cmp = ma.compareTo(oa);
-            if (cmp != 0) {
-                return cmp;
-            }
-        }
-
-        return oi.hasNext() ? -1 : 0;
-    }
-
-    @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this).add("datastore", datastoreType).add("root", rootIdentifier).toString();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataWriteTransaction.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataWriteTransaction.java
deleted file mode 100644 (file)
index 236e925..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.AsyncWriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A transaction that provides mutation capabilities on a data tree.
- *
- * <p>
- * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
- */
-public interface DOMDataWriteTransaction extends AsyncWriteTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-
-    @Override
-    void delete(LogicalDatastoreType store, YangInstanceIdentifier path);
-
-    /**
-     * Stores a piece of data at the specified path. This acts as an add / replace
-     * operation, which is to say that whole subtree will be replaced by the specified data.
-     *
-     * <p>
-     * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
-     *
-     * <p>
-     * If you need to make sure that a parent object exists but you do not want modify
-     * its pre-existing state by using put, consider using {@link #merge} instead.
-     *
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be written to the specified path
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    void put(LogicalDatastoreType store, YangInstanceIdentifier path, NormalizedNode<?, ?> data);
-
-    /**
-     * Merges a piece of data with the existing data at a specified path. Any pre-existing data
-     * which is not explicitly overwritten will be preserved. This means that if you store a container,
-     * its child lists will be merged.
-     *
-     * <p>
-     * For more information on usage and examples, please see the documentation in {@link AsyncWriteTransaction}.
-     *
-     *<p>
-     * If you require an explicit replace operation, use {@link #put} instead.
-     *
-     * @param store
-     *            the logical data store which should be modified
-     * @param path
-     *            the data object path
-     * @param data
-     *            the data object to be merged to the specified path
-     * @throws IllegalStateException
-     *             if the transaction has already been submitted
-     */
-    void merge(LogicalDatastoreType store, YangInstanceIdentifier path, NormalizedNode<?, ?> data);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMEvent.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMEvent.java
deleted file mode 100644 (file)
index 312de34..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.time.Instant;
-import java.util.Date;
-
-/**
- * Generic event interface.
- */
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMEvent extends org.opendaylight.mdsal.dom.api.DOMEvent {
-
-    @Override
-    default Instant getEventInstant() {
-        final Date eventTime = getEventTime();
-        return eventTime != null ? eventTime.toInstant() : null;
-    }
-
-    /**
-     * Get the time of the event occurrence.
-     *
-     * @return the event time
-     */
-    Date getEventTime();
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMExtensibleService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMExtensibleService.java
deleted file mode 100644 (file)
index 1f2c496..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.annotations.Beta;
-import java.util.Map;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * Marker interface for services which can support {@link DOMServiceExtension}.
- * Aside for marking these, they also provide runtime query to detect whether
- * a particular trait is in fact available.
- *
- * @param <T> Base {@link DOMService}
- * @param <E> Extension type
- */
-@Beta
-public interface DOMExtensibleService<T extends DOMExtensibleService<T, E>,
-        E extends DOMServiceExtension<T, E>> extends DOMService {
-    /**
-     * Return a map of currently-supported extensions, along with accessor services
-     * which provide access to the specific functionality bound to this service.
-     *
-     * @return A map of supported functionality.
-     */
-    @NonNull Map<Class<? extends E>, E> getSupportedExtensions();
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPoint.java
deleted file mode 100644 (file)
index b679fc0..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.base.Optional;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public interface DOMMountPoint extends Identifiable<YangInstanceIdentifier> {
-
-    <T extends DOMService> Optional<T> getService(Class<T> cls);
-
-    SchemaContext getSchemaContext();
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMMountPointService.java
deleted file mode 100644 (file)
index 0c72b86..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.base.Optional;
-import org.opendaylight.mdsal.dom.api.DOMMountPointListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Deprecated.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMMountPointService} instead
- */
-@Deprecated
-public interface DOMMountPointService extends DOMService {
-
-    Optional<DOMMountPoint> getMountPoint(YangInstanceIdentifier path);
-
-    DOMMountPointBuilder createMountPoint(YangInstanceIdentifier path);
-
-    ListenerRegistration<DOMMountPointListener> registerProvisionListener(DOMMountPointListener listener);
-
-    interface DOMMountPointBuilder {
-
-        <T extends DOMService> DOMMountPointBuilder addService(Class<T> type,T impl);
-
-        DOMMountPointBuilder addInitialSchemaContext(SchemaContext ctx);
-
-        ObjectRegistration<DOMMountPoint> register();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotification.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotification.java
deleted file mode 100644 (file)
index d30e0f2..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * A single YANG notification.
- */
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMNotification extends org.opendaylight.mdsal.dom.api.DOMNotification {
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationListener.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationListener.java
deleted file mode 100644 (file)
index bd5355f..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import java.util.EventListener;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * Interface implemented by listeners interested in {@link DOMNotification}s.
- */
-public interface DOMNotificationListener extends EventListener {
-    /**
-     * Invoked whenever a {@link DOMNotification} matching the subscription
-     * criteria is received.
-     *
-     * @param notification Received notification
-     */
-    void onNotification(@NonNull DOMNotification notification);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationPublishService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationPublishService.java
deleted file mode 100644 (file)
index 08cb25b..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.TimeUnit;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * A {@link DOMService} which allows its user to send {@link DOMNotification}s. It
- * provides two styles of initiating the notification delivery, similar to
- * {@link java.util.concurrent.BlockingQueue}:
- * - a put-style method which waits until the implementation can accept the notification
- *   for delivery, and
- * - an offer-style method, which attempts to enqueue the notification, but allows
- *   the caller to specify that it should never wait, or put an upper bound on how
- *   long it is going to wait.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMNotificationPublishService} instead
- */
-@Deprecated
-public interface DOMNotificationPublishService extends DOMService {
-    /**
-     * Well-known value indicating that the implementation is currently not
-     * able to accept a notification.
-     */
-    ListenableFuture<Object> REJECTED = Futures.immediateFailedFuture(
-            new DOMNotificationRejectedException("Unacceptable blocking conditions encountered"));
-
-    /**
-     * Publish a notification. The result of this method is a {@link ListenableFuture}
-     * which will complete once the notification has been delivered to all immediate
-     * registrants. The type of the object resulting from the future is not defined
-     * and implementations may use it to convey additional information related to the
-     * publishing process.
-     *
-     * <p>
-     * Abstract subclasses can refine the return type as returning a promise of a
-     * more specific type, e.g.:
-     *
-     * {@code
-     *     public interface DeliveryStatus { int getListenerCount(); }
-     *     ListenableFuture<? extends DeliveryStatus> putNotification(DOMNotification notification);
-     * }
-     *
-     * <p>
-     * Once the Future succeeds, the resulting object can be queried for traits using
-     * instanceof, e.g:
-     *
-     * {@code
-     *     // Can block when (for example) the implemention's ThreadPool queue is full
-     *     Object o = service.putNotification(notif).get();
-     *     if (o instanceof DeliveryStatus) {
-     *         DeliveryStatus ds = (DeliveryStatus)o;
-     *         LOG.debug("Notification was received by {} listeners", ds.getListenerCount(););
-     *     }
-     * }
-     *
-     * <p>
-     * In case an implementation is running out of resources, it can block the calling
-     * thread until enough resources become available to accept the notification for
-     * processing, or it is interrupted.
-     *
-     * <p>
-     * Caution: completion here means that the implementation has completed processing
-     *          of the notification. This does not mean that all existing registrants
-     *          have seen the notification. Most importantly, the delivery process at
-     *          other cluster nodes may have not begun yet.
-     *
-     * @param notification Notification to be published.
-     * @return A listenable future which will report completion when the service
-     *         has finished propagating the notification to its immediate registrants.
-     * @throws InterruptedException if interrupted while waiting
-     * @throws NullPointerException if notification is null.
-     */
-    @NonNull ListenableFuture<?> putNotification(@NonNull DOMNotification notification) throws InterruptedException;
-
-    /**
-     * Attempt to publish a notification. The result of this method is a {@link ListenableFuture}
-     * which will complete once the notification has been delivered to all immediate
-     * registrants. The type of the object resulting from the future is not defined
-     * and implementations may use it to convey additional information related to the
-     * publishing process. Unlike {@link #putNotification(DOMNotification)}, this method
-     * is guaranteed not to block if the underlying implementation encounters contention.
-     *
-     * @param notification Notification to be published.
-     * @return A listenable future which will report completion when the service
-     *         has finished propagating the notification to its immediate registrants,
-     *         or {@link #REJECTED} if resource constraints prevent
-     *         the implementation from accepting the notification for delivery.
-     * @throws NullPointerException if notification is null.
-     */
-    @NonNull ListenableFuture<?> offerNotification(@NonNull DOMNotification notification);
-
-    /**
-     * Attempt to publish a notification. The result of this method is a {@link ListenableFuture}
-     * which will complete once the notification has been delivered to all immediate
-     * registrants. The type of the object resulting from the future is not defined
-     * and implementations may use it to convey additional information related to the
-     * publishing process. Unlike {@link #putNotification(DOMNotification)}, this method
-     * is guaranteed to block more than the specified timeout.
-     *
-     * @param notification Notification to be published.
-     * @param timeout how long to wait before giving up, in units of unit, must not be negative
-     * @param unit a TimeUnit determining how to interpret the timeout parameter
-     * @return A listenable future which will report completion when the service
-     *         has finished propagating the notification to its immediate registrants,
-     *         or {@link #REJECTED} if resource constraints prevent
-     *         the implementation from accepting the notification for delivery.
-     * @throws InterruptedException if interrupted while waiting
-     * @throws NullPointerException if notification or unit is null.
-     * @throws IllegalArgumentException if timeout is negative.
-     */
-    @NonNull ListenableFuture<?> offerNotification(@NonNull DOMNotification notification,
-        long timeout, @NonNull TimeUnit unit) throws InterruptedException;
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationRejectedException.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationRejectedException.java
deleted file mode 100644 (file)
index 6c03e9b..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-/**
- * This exception indicates that given notification can not be processed by corresponding mechanism.
- * More info can be provided in message.
- *
- * <p>
- * Expected use: {@link DOMNotificationPublishService}
- */
-public class DOMNotificationRejectedException extends Exception {
-    private static final long serialVersionUID = 1L;
-
-    public DOMNotificationRejectedException(final String message) {
-        super(message);
-    }
-
-    public DOMNotificationRejectedException(final String message, final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMNotificationService.java
deleted file mode 100644 (file)
index 1e4625b..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import java.util.Collection;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * A {@link DOMService} which allows its users to subscribe to receive
- * {@link DOMNotification}s.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMNotificationService} instead
- */
-@Deprecated
-public interface DOMNotificationService extends DOMService {
-    /**
-     * Register a {@link DOMNotificationListener} to receive a set of notifications. As with
-     * other ListenerRegistration-based interfaces, registering an instance multiple times
-     * results in notifications being delivered for each registration.
-     *
-     * @param listener Notification instance to register
-     * @param types Notification types which should be delivered to the listener. Duplicate
-     *              entries are processed only once, null entries are ignored.
-     * @return Registration handle. Invoking {@link ListenerRegistration#close()}
-     *         will stop the delivery of notifications to the listener
-     * @throws IllegalArgumentException if types is empty or contains an invalid element, such as
-     *         null or a SchemaPath which does not represent a valid {@link DOMNotification} type.
-     * @throws NullPointerException if either of the arguments is null
-     */
-    <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(
-            @NonNull T listener, @NonNull Collection<SchemaPath> types);
-
-    /**
-     * Register a {@link DOMNotificationListener} to receive a set of notifications. As with
-     * other ListenerRegistration-based interfaces, registering an instance multiple times
-     * results in notifications being delivered for each registration.
-     *
-     * @param listener Notification instance to register
-     * @param types Notification types which should be delivered to the listener. Duplicate
-     *              entries are processed only once, null entries are ignored.
-     * @return Registration handle. Invoking {@link ListenerRegistration#close()}
-     *         will stop the delivery of notifications to the listener
-     * @throws IllegalArgumentException if types is empty or contains an invalid element, such as
-     *         null or a SchemaPath which does not represent a valid {@link DOMNotification} type.
-     * @throws NullPointerException if listener is null
-     */
-    // FIXME: Java 8: provide a default implementation of this method.
-    <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(
-            @NonNull T listener, SchemaPath... types);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcAvailabilityListener.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcAvailabilityListener.java
deleted file mode 100644 (file)
index 2131cf1..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import java.util.Collection;
-import java.util.EventListener;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * An {@link EventListener} used to track RPC implementations becoming (un)available a {@link DOMRpcService}. Note that
- * the reported {@link DOMRpcIdentifier}s form an identifier space shared between RFC7950 actions and RFC6020 RPCs,
- * the former being also known as 'Routed RPCs'.
- *
- * <p>
- * Interpretation of DOMRpcIdentifiers has to be evaluated in the context of one of these types, which need to be
- * determined by matching {@link DOMRpcIdentifier#getType()} against a
- * {@link org.opendaylight.yangtools.yang.model.api.SchemaContext}, which determines actual semantics of
- * {@link DOMRpcIdentifier#getContextReference()}. Corresponding SchemaNode is required to be a known sub-interface
- * of {@link org.opendaylight.yangtools.yang.model.api.OperationDefinition}.
- *
- * <p>
- * For RFC6020 RPCs, reported context reference is always non-null and empty. It indicates an RPC implementation has
- * been registered and invocations can be reasonably (with obvious distributed system caveats coming from asynchronous
- * events) expected to succeed.
- *
- * <p>
- * For RFC7950 actions with a non-empty context-reference, the indication is the same as for RFC6020 RPCs.
- *
- * <p>
- * For RFC7950 actions with an empty context-reference, the indication is that the corresponding actions are
- * potentially available, but are subject to dynamic lifecycle of their context references. This includes two primary
- * use cases:
- * <ul>
- *     <li>dynamic action instantiation (when a device connects)</li>
- *     <li>dynamic action translation, such as transforming one action into another</li>
- * </ul>
- * First use case will provide further availability events with non-empty context references as they become available,
- * which can be safely ignored if the listener is interested in pure invocation-type integration.
- *
- * <p>
- * Second use case will not be providing further events, but rather will attempt to map any incoming invocation onto
- * some other RPC or action, or similar, which can separately fail. If a sub-request fails, such implementations are
- * required do report {@link DOMRpcImplementationNotAvailableException} as the invocation result, with the underlying
- * failure being linked as a cause.
- */
-public interface DOMRpcAvailabilityListener extends EventListener {
-    /**
-     * Method invoked whenever an RPC type becomes available.
-     *
-     * @param rpcs RPC types newly available
-     */
-    void onRpcAvailable(@NonNull Collection<DOMRpcIdentifier> rpcs);
-
-    /**
-     * Method invoked whenever an RPC type becomes unavailable.
-     *
-     * @param rpcs RPC types which became unavailable
-     */
-    void onRpcUnavailable(@NonNull Collection<DOMRpcIdentifier> rpcs);
-
-    /**
-     * Implementation filtering method. This method is useful for forwarding RPC implementations,
-     * which need to ensure they do not re-announce their own implementations. Without this method
-     * a forwarder which registers an implementation would be notified of its own implementation,
-     * potentially re-exporting it as local -- hence creating a forwarding loop.
-     *
-     * @param impl RPC implementation being registered
-     * @return False if the implementation should not be reported, defaults to true.
-     */
-    default boolean acceptsImplementation(final DOMRpcImplementation impl) {
-        return true;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcException.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcException.java
deleted file mode 100644 (file)
index e4c30aa..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Base class for failures that can occur during RPC invocation. This covers
- * transport and protocol-level failures.
- */
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_SUPERCLASS", justification = "Migration")
-public abstract class DOMRpcException extends org.opendaylight.mdsal.dom.api.DOMRpcException {
-    private static final long serialVersionUID = 1L;
-
-    /**
-     * Construct an new instance with a message and an empty cause.
-     *
-     * @param message Exception message
-     */
-    protected DOMRpcException(final String message) {
-        super(message);
-    }
-
-    /**
-     * Construct an new instance with a message and a cause.
-     *
-     * @param message Exception message
-     * @param cause Chained cause
-     */
-    protected DOMRpcException(final String message, final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcIdentifier.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcIdentifier.java
deleted file mode 100644 (file)
index a574564..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.MoreObjects;
-import java.util.Objects;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Identifier of a RPC context. This is an extension of the YANG RPC, which
- * always has global context. It allows an RPC to have a instance identifier
- * attached, so that there can be multiple implementations bound to different
- * contexts concurrently.
- */
-public abstract class DOMRpcIdentifier {
-
-    private static final class Global extends DOMRpcIdentifier {
-        private Global(final @NonNull SchemaPath type) {
-            super(type);
-        }
-
-        @Override
-        public YangInstanceIdentifier getContextReference() {
-            return YangInstanceIdentifier.EMPTY;
-        }
-    }
-
-    private static final class Local extends DOMRpcIdentifier {
-        private final YangInstanceIdentifier contextReference;
-
-        private Local(final @NonNull SchemaPath type, final @NonNull YangInstanceIdentifier contextReference) {
-            super(type);
-            this.contextReference = requireNonNull(contextReference);
-        }
-
-        @Override
-        public YangInstanceIdentifier getContextReference() {
-            return contextReference;
-        }
-    }
-
-    private final SchemaPath type;
-
-    private DOMRpcIdentifier(final SchemaPath type) {
-        this.type = requireNonNull(type);
-    }
-
-    /**
-     * Create a global RPC identifier.
-     *
-     * @param type RPC type, SchemaPath of its definition, may not be null
-     * @return A global RPC identifier, guaranteed to be non-null.
-     */
-    public static @NonNull DOMRpcIdentifier create(final @NonNull SchemaPath type) {
-        return new Global(type);
-    }
-
-    /**
-     * Create an RPC identifier with a particular context reference.
-     *
-     * @param type RPC type, SchemaPath of its definition, may not be null
-     * @param contextReference Context reference, null means a global RPC identifier.
-     * @return A global RPC identifier, guaranteed to be non-null.
-     */
-    public static @NonNull DOMRpcIdentifier create(final @NonNull SchemaPath type,
-            final @Nullable YangInstanceIdentifier contextReference) {
-        if (contextReference == null || contextReference.isEmpty()) {
-            return new Global(type);
-        }
-        return new Local(type, contextReference);
-    }
-
-    public static DOMRpcIdentifier fromMdsal(final org.opendaylight.mdsal.dom.api.DOMRpcIdentifier mdsal) {
-        return create(mdsal.getType(), mdsal.getContextReference());
-    }
-
-    public org.opendaylight.mdsal.dom.api.DOMRpcIdentifier toMdsal() {
-        return org.opendaylight.mdsal.dom.api.DOMRpcIdentifier.create(type, getContextReference());
-    }
-
-    /**
-     * Return the RPC type.
-     *
-     * @return RPC type.
-     */
-    public final @NonNull SchemaPath getType() {
-        return type;
-    }
-
-    /**
-     * Return the RPC context reference. Null value indicates global context.
-     *
-     * @return RPC context reference.
-     */
-    public abstract @NonNull YangInstanceIdentifier getContextReference();
-
-    @Override
-    public final int hashCode() {
-        final int prime = 31;
-        int result = 1;
-        result = prime * result + type.hashCode();
-        result = prime * result + getContextReference().hashCode();
-        return result;
-    }
-
-    @Override
-    public final boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof DOMRpcIdentifier)) {
-            return false;
-        }
-        DOMRpcIdentifier other = (DOMRpcIdentifier) obj;
-        if (!type.equals(other.type)) {
-            return false;
-        }
-        return Objects.equals(getContextReference(), other.getContextReference());
-    }
-
-    @Override
-    public final String toString() {
-        return MoreObjects.toStringHelper(this).omitNullValues().add("type", type).add("contextReference",
-                getContextReference()).toString();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementation.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementation.java
deleted file mode 100644 (file)
index 540f50b..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Interface implemented by an individual RPC implementation. This API allows for dispatch
- * implementations, e.g. an individual object handling a multitude of RPCs.
- */
-public interface DOMRpcImplementation {
-    /**
-     * Initiate invocation of the RPC. Implementations of this method are
-     * expected to not block on external resources.
-     *
-     * @param rpc RPC identifier which was invoked
-     * @param input Input arguments, null if the RPC does not take any.
-     * @return A {@link CheckedFuture} which will return either a result structure,
-     *         or report a subclass of {@link DOMRpcException} reporting a transport
-     *         error.
-     */
-    @NonNull CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@NonNull DOMRpcIdentifier rpc,
-            @Nullable NormalizedNode<?, ?> input);
-
-    /**
-     * Return the relative invocation cost of this implementation. Default implementation return 0.
-     *
-     * @return Non-negative cost of invoking this implementation.
-     */
-    default long invocationCost() {
-        return 0;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationNotAvailableException.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationNotAvailableException.java
deleted file mode 100644 (file)
index a856607..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import static java.util.Objects.requireNonNull;
-
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * Exception indicating that no implementation of the requested RPC service is available.
- */
-public class DOMRpcImplementationNotAvailableException extends DOMRpcException {
-    private static final long serialVersionUID = 1L;
-
-    public DOMRpcImplementationNotAvailableException(final @NonNull String format, final Object... args) {
-        super(String.format(format, args));
-    }
-
-    public DOMRpcImplementationNotAvailableException(final @NonNull Throwable cause, final @NonNull String format,
-            final Object... args) {
-        super(String.format(format, args), requireNonNull(cause));
-    }
-
-    public DOMRpcImplementationNotAvailableException(final String message, final Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationRegistration.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcImplementationRegistration.java
deleted file mode 100644 (file)
index 0b8dff5..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-
-/**
- * A registration of a {@link DOMRpcImplementation}. Used to track and revoke a registration
- * with a {@link DOMRpcProviderService}.
- *
- * @param <T> RPC implementation type
- */
-public interface DOMRpcImplementationRegistration<T extends DOMRpcImplementation> extends ObjectRegistration<T> {
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcProviderService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcProviderService.java
deleted file mode 100644 (file)
index 2d0c136..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import java.util.Set;
-import org.eclipse.jdt.annotation.NonNull;
-
-/**
- * A {@link DOMService} which allows registration of RPC implementations with a conceptual router. The client
- * counterpart of this service is {@link DOMRpcService}.
- *
- * <p>
- * This interface supports both RFC6020 RPCs and RFC7950 actions (formerly known as 'Routed RPCs'. Invocation for
- * RFC6020 RPCs is always based on an empty context reference. Invocation of actions requires a non-empty context
- * reference and is matched against registered implementations as follows:
- * <ul>
- *     <li>First, attempt to look up the implementation based on exact match. If a match is found the invocation is
- *         on that implementation, returning its result.</li>
- *     <li>Second, attempt to look up the implementation which registered for empty context reference. If a such an
- *         implementation exists, invoke that implementation, returning its result</li>
- *     <li>Throw {@link DOMRpcImplementationNotAvailableException}
- * </ul>
- *
- * <p>
- * All implementations are required to perform these steps as specified above.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMRpcProviderService} instead
- */
-@Deprecated
-public interface DOMRpcProviderService extends DOMService {
-    /**
-     * Register an {@link DOMRpcImplementation} object with this service.
-     *
-     * @param implementation RPC implementation, must not be null
-     * @param rpcs Array of supported RPC identifiers. Must not be null, empty, or contain a null element.
-     *             Each identifier is added exactly once, no matter how many times it occurs.
-     * @return A {@link DOMRpcImplementationRegistration} object, guaranteed to be non-null.
-     * @throws NullPointerException if implementation or types is null
-     * @throws IllegalArgumentException if types is empty or contains a null element.
-     */
-    @NonNull <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            @NonNull T implementation, @NonNull DOMRpcIdentifier... rpcs);
-
-    /**
-     * Register an {@link DOMRpcImplementation} object with this service.
-     *
-     * @param implementation RPC implementation, must not be null
-     * @param rpcs Set of supported RPC identifiers. Must not be null, empty, or contain a null element.
-     * @return A {@link DOMRpcImplementationRegistration} object, guaranteed to be non-null.
-     * @throws NullPointerException if implementation or types is null
-     * @throws IllegalArgumentException if types is empty or contains a null element.
-     */
-    @NonNull <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            @NonNull T implementation, @NonNull Set<DOMRpcIdentifier> rpcs);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcResult.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcResult.java
deleted file mode 100644 (file)
index ef7c670..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Interface defining a result of an RPC call.
- */
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMRpcResult extends org.opendaylight.mdsal.dom.api.DOMRpcResult {
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMRpcService.java
deleted file mode 100644 (file)
index 870fe82..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * A {@link DOMService} which allows clients to invoke RPCs. The conceptual model of this
- * service is that of a dynamic router, where the set of available RPC services can change
- * dynamically. The service allows users to add a listener to track the process of
- * RPCs becoming available.
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.api.DOMRpcService} instead
- */
-@Deprecated
-public interface DOMRpcService extends DOMService {
-    /**
-     * Initiate invocation of an RPC. This method is guaranteed to not block on any external
-     * resources.
-     *
-     * @param type SchemaPath of the RPC to be invoked
-     * @param input Input arguments, null if the RPC does not take any.
-     * @return A {@link CheckedFuture} which will return either a result structure,
-     *         or report a subclass of {@link DOMRpcException} reporting a transport
-     *         error.
-     */
-    @NonNull CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@NonNull SchemaPath type,
-            @Nullable NormalizedNode<?, ?> input);
-
-    /**
-     * Register a {@link DOMRpcAvailabilityListener} with this service to receive notifications
-     * about RPC implementations becoming (un)available. The listener will be invoked with the
-     * current implementations reported and will be kept uptodate as implementations come and go.
-     *
-     * <p>
-     * Users should note that using a listener does not necessarily mean that
-     * {@link #invokeRpc(SchemaPath, NormalizedNode)} will not report a failure due to
-     * {@link DOMRpcImplementationNotAvailableException} and need to be ready to handle it.
-     *
-     * <p>
-     * Implementations of this interface are encouraged to take reasonable precautions to prevent this scenario from
-     * occurring.
-     *
-     * @param listener {@link DOMRpcAvailabilityListener} instance to register
-     * @return A {@link ListenerRegistration} representing this registration. Performing
-     *         a {@link ListenerRegistration#close()} will cancel it. Returned object
-     *         is guaranteed to be non-null.
-     */
-    @NonNull <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@NonNull T listener);
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMService.java
deleted file mode 100644 (file)
index 740cea8..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Marker interface for services which can be obtained from a {@link DOMMountPoint} instance. No further semantics are
- * implied.
- */
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMService extends org.opendaylight.mdsal.dom.api.DOMService {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMServiceExtension.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMServiceExtension.java
deleted file mode 100644 (file)
index 552b289..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import com.google.common.annotations.Beta;
-
-/**
- * Marker interface for services which expose additional functionality on top
- * of some base {@link DOMService}.
- */
-@Beta
-public interface DOMServiceExtension<T extends DOMExtensibleService<T, E>, E extends DOMServiceExtension<T, E>> {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMTransactionChain.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMTransactionChain.java
deleted file mode 100644 (file)
index 446960c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * A chain of DOM Data transactions.
- *
- * <p>
- * Transactions in a chain need to be committed in sequence and each
- * transaction should see the effects of previous transactions as if they happened. A chain
- * makes no guarantees of atomicity, in fact transactions are committed as soon as possible.
- *
- * <p>
- * This interface is type capture of {@link TransactionChain} for DOM Data Contracts.
- */
-public interface DOMTransactionChain extends TransactionChain<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-
-    @Override
-    DOMDataReadOnlyTransaction newReadOnlyTransaction();
-
-    @Override
-    DOMDataReadWriteTransaction newReadWriteTransaction();
-
-    @Override
-    DOMDataWriteTransaction newWriteOnlyTransaction();
-}
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DefaultDOMRpcException.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DefaultDOMRpcException.java
deleted file mode 100644 (file)
index c07b31a..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.api;
-
-/**
- * Default implementation of DOMRpcException.
- *
- * @author Thomas Pantelis
- */
-public class DefaultDOMRpcException extends DOMRpcException {
-    private static final long serialVersionUID = 1L;
-
-    public DefaultDOMRpcException(String message, Throwable cause) {
-        super(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/pom.xml b/opendaylight/md-sal/sal-dom-broker/pom.xml
deleted file mode 100644 (file)
index 2f66629..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-broker-impl</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>com.lmax</groupId>
-      <artifactId>disruptor</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-eos-dom-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-broker</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-impl</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-spi</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>mockito-configuration</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-inmemory-datastore</artifactId>
-      <!-- FIXME: this should be scope=test
-      <scope>test</scope-->
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <extensions>true</extensions>
-        <configuration>
-          <instructions>
-            <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
-            <Export-Package>
-                            <!--  Legacy code -->
-                            org.opendaylight.controller.sal.dom.broker,
-                            org.opendaylight.controller.sal.dom.broker.impl,
-                            org.opendaylight.controller.sal.dom.broker.util,
-                            org.opendaylight.controller.sal.dom.broker.spi,
-                            <!--sal.broker.impl is exported for sal-netconf-connector to use SchemaAwareRpcRegistry.-->
-                            <!-- TODO Remove sal.broker.impl from export when SchemaAwareRpcRegistry is not used in connector anymore -->
-                            org.opendaylight.controller.md.sal.dom.broker.impl,
-                            org.opendaylight.controller.md.sal.dom.broker.impl.*,
-                            org.opendaylight.controller.md.sal.dom.clustering.impl,
-            </Export-Package>
-          </instructions>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMDataBroker.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMDataBroker.java
deleted file mode 100755 (executable)
index 28e4d9b..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.collect.ImmutableMap;
-import java.util.Collections;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public abstract class AbstractDOMDataBroker extends AbstractDOMForwardedTransactionFactory<DOMStore>
-       implements DOMDataBroker {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMDataBroker.class);
-
-    private final AtomicLong txNum = new AtomicLong();
-    private final AtomicLong chainNum = new AtomicLong();
-    private final Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> extensions;
-    private volatile AutoCloseable closeable;
-
-    protected AbstractDOMDataBroker(final Map<LogicalDatastoreType, DOMStore> datastores) {
-        super(datastores);
-
-        boolean treeChange = true;
-        for (DOMStore ds : datastores.values()) {
-            if (!(ds instanceof DOMStoreTreeChangePublisher)) {
-                treeChange = false;
-                break;
-            }
-        }
-
-        if (treeChange) {
-            extensions = ImmutableMap.<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension>of(
-                    DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
-                        @Override
-                        public <L extends DOMDataTreeChangeListener> ListenerRegistration<L>
-                            registerDataTreeChangeListener(final DOMDataTreeIdentifier treeId, final L listener) {
-                                DOMStore publisher = getTxFactories().get(treeId.getDatastoreType());
-                                checkState(publisher != null,
-                                           "Requested logical data store is not available.");
-
-                                return ((DOMStoreTreeChangePublisher) publisher)
-                                        .registerTreeChangeListener(treeId.getRootIdentifier(), listener);
-                            }
-                    });
-        } else {
-            extensions = Collections.emptyMap();
-        }
-    }
-
-    public void setCloseable(final AutoCloseable closeable) {
-        this.closeable = closeable;
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void close() {
-        super.close();
-
-        if (closeable != null) {
-            try {
-                closeable.close();
-            } catch (Exception e) {
-                LOG.debug("Error closing instance", e);
-            }
-        }
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-        return extensions;
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(
-                LogicalDatastoreType.class);
-        for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
-            backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
-        }
-
-        final long chainId = chainNum.getAndIncrement();
-        LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
-                  backingChains);
-        return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, this, listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java
deleted file mode 100644 (file)
index ad2b2f0..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Composite DOM Transaction backed by {@link DOMStoreTransaction}.
- *
- * <p>
- * Abstract base for composite transaction, which provides access only to common
- * functionality as retrieval of subtransaction, close method and retrieval of
- * identifier.
- *
- * @param <K>
- *            Subtransaction distinguisher
- * @param <T>
- *            Subtransaction type
- */
-abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
-        AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
-
-    private final Map<K, T> backingTxs;
-    private final Object identifier;
-
-    /**
-     * Creates new composite Transactions.
-     *
-     * @param identifier
-     *            Identifier of transaction.
-     * @param backingTxs
-     *            Key,value map of backing transactions.
-     */
-    protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
-        this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
-        this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
-    }
-
-    /**
-     * Returns subtransaction associated with supplied key.
-     *
-     * @param key key
-     * @return subtransaction
-     * @throws NullPointerException
-     *             if key is null
-     * @throws IllegalArgumentException
-     *             if no subtransaction is associated with key.
-     */
-    protected final T getSubtransaction(final K key) {
-        Preconditions.checkNotNull(key, "key must not be null.");
-
-        final T ret = backingTxs.get(key);
-        Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
-        return ret;
-    }
-
-    /**
-     * Returns immutable Iterable of all subtransactions.
-     *
-     */
-    protected Collection<T> getSubtransactions() {
-        return backingTxs.values();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return identifier;
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    protected void closeSubtransactions() {
-        /*
-         * We share one exception for all failures, which are added
-         * as supressedExceptions to it.
-         */
-        IllegalStateException failure = null;
-        for (T subtransaction : backingTxs.values()) {
-            try {
-                subtransaction.close();
-            } catch (Exception e) {
-                // If we did not allocated failure we allocate it
-                if (failure == null) {
-                    failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
-                } else {
-                    // We update it with additional exceptions, which occurred during error.
-                    failure.addSuppressed(e);
-                }
-            }
-        }
-        // If we have failure, we throw it at after all attempts to close.
-        if (failure != null) {
-            throw failure;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedTransactionFactory.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedTransactionFactory.java
deleted file mode 100644 (file)
index e309996..0000000
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collection;
-import java.util.EnumMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.Path;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Abstract composite transaction factory.
- *
- * <p>
- * Provides an convenience common implementation for composite DOM Transactions,
- * where subtransaction is identified by {@link LogicalDatastoreType} type and
- * implementation of subtransaction is provided by
- * {@link DOMStoreTransactionFactory}.
- *
- * <b>Note:</b>This class does not have thread-safe implementation of  {@link #close()},
- *   implementation may allow accessing and allocating new transactions during closing
- *   this instance.
- *
- * @param <T>
- *            Type of {@link DOMStoreTransactionFactory} factory.
- */
-abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements AutoCloseable {
-    @SuppressWarnings("rawtypes")
-    private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
-            AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
-    private final Map<LogicalDatastoreType, T> storeTxFactories;
-    private volatile int closed = 0;
-
-    protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
-        this.storeTxFactories = new EnumMap<>(txFactories);
-    }
-
-    /**
-     * Implementations must return unique identifier for each and every call of
-     * this method.
-     *
-     * @return new Unique transaction identifier.
-     */
-    protected abstract Object newTransactionIdentifier();
-
-    /**
-     * User-supplied implementation of {@link DOMDataWriteTransaction#commit()}
-     * for transaction.
-     *
-     * <p>
-     * Callback invoked when {@link DOMDataWriteTransaction#commit()} is invoked
-     * on transaction created by this factory.
-     *
-     * @param transaction
-     *            Transaction on which {@link DOMDataWriteTransaction#commit()}
-     *            was invoked.
-     * @param cohorts
-     *            Iteratable of cohorts for subtransactions associated with
-     *            the transaction being committed.
-     * @return a ListenableFuture. if commit coordination on cohorts finished successfully,
-     *         a CommitInfo is returned from the Future, On failure,
-     *         the Future fails with a {@link TransactionCommitFailedException}.
-     */
-    protected abstract <T> ListenableFuture<T> commit(DOMDataWriteTransaction transaction,
-            Collection<DOMStoreThreePhaseCommitCohort> cohorts, Supplier<T> futureValueSupplier);
-
-    /**
-     * Creates a new composite read-only transaction
-     *
-     * <p>
-     * Creates a new composite read-only transaction backed by one transaction
-     * per factory in {@link #getTxFactories()}.
-     *
-     * <p>
-     * Subtransaction for reading is selected by supplied
-     * {@link LogicalDatastoreType} as parameter for
-     * {@link DOMDataReadOnlyTransaction#read(LogicalDatastoreType,
-     * org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier)}.
-     *
-     * <p>
-     * Id of returned transaction is retrieved via
-     * {@link #newTransactionIdentifier()}.
-     *
-     * @return New composite read-only transaction.
-     */
-    public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
-        for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
-        }
-        return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
-    }
-
-    /**
-     * Creates a new composite write-only transaction
-     *
-     * <p>
-     * Creates a new composite write-only transaction backed by one write-only
-     * transaction per factory in {@link #getTxFactories()}.
-     *
-     * <p>
-     * Implementation of composite Write-only transaction is following:
-     *
-     * <p>
-     * <ul><li>
-     * {@link DOMDataWriteTransaction#put(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreWriteTransaction#write(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * is invoked on selected subtransaction.
-     * </li><li>
-     * {@link DOMDataWriteTransaction#merge(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreWriteTransaction#merge(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * is invoked on selected subtransaction.
-     * </li><li>
-     * {@link DOMDataWriteTransaction#delete(LogicalDatastoreType, Path)}
-     * {@link DOMStoreWriteTransaction#delete(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier)} is
-     * invoked on
-     * selected subtransaction.
-     * </li><li> {@link DOMDataWriteTransaction#commit()} - results in invoking
-     * {@link DOMStoreWriteTransaction#ready()}, gathering all resulting cohorts
-     * and then invoking finalized implementation callback
-     * {@link #commit} with transaction which was commited and gathered results.
-     * </li>
-     * </ul>
-     *
-     * <p>
-     * Id of returned transaction is generated via
-     * {@link #newTransactionIdentifier()}.
-     *
-     * @return New composite write-only transaction associated with this factory.
-     */
-    public final DOMDataWriteTransaction newWriteOnlyTransaction() {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
-        for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
-        }
-        return new DOMForwardedWriteTransaction<>(newTransactionIdentifier(), txns, this);
-    }
-
-    /**
-     * Creates a new composite write-only transaction
-     *
-     * <p>
-     * Creates a new composite write-only transaction backed by one write-only transaction per factory in
-     * {@link #getTxFactories()}.
-     *
-     * <p>
-     * Implementation of composite Write-only transaction is following:
-     *
-     * <p>
-     * <ul>
-     * <li>
-     * {@link org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction#read(LogicalDatastoreType,
-     * YangInstanceIdentifier)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreReadTransaction#read(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier)} is invoked
-     * on selected subtransaction.
-     * <li>
-     * {@link DOMDataWriteTransaction#put(LogicalDatastoreType,
-     * org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreWriteTransaction#write(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * is invoked on selected subtransaction.
-     * <li>
-     * {@link DOMDataWriteTransaction#merge(LogicalDatastoreType,
-     * org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreWriteTransaction#merge(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier,
-     * org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode)}
-     * is invoked on selected subtransaction.
-     * <li>
-     * {@link DOMDataWriteTransaction#delete(LogicalDatastoreType,
-     * org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier)}
-     * - backing subtransaction is selected by {@link LogicalDatastoreType},
-     * {@link DOMStoreWriteTransaction#delete(org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier)} is
-     * invoked on
-     * selected subtransaction.
-     * <li> {@link DOMDataWriteTransaction#commit()} - results in invoking
-     * {@link DOMStoreWriteTransaction#ready()}, gathering all resulting cohorts
-     * and then invoking finalized implementation callback
-     * {@link #commit} with transaction which was committed and gathered results.
-     * <li>
-     * </ul>
-     *
-     * <p>
-     * Id of returned transaction is generated via
-     * {@link #newTransactionIdentifier()}.
-     *
-     * @return New composite read-write transaction associated with this factory.
-     */
-    public final DOMDataReadWriteTransaction newReadWriteTransaction() {
-        checkNotClosed();
-
-        final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
-        for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
-        }
-        return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
-    }
-
-    /**
-     * Convenience accessor of backing factories intended to be used only by
-     * finalization of this class.
-     *
-     * <b>Note:</b>
-     * Finalization of this class may want to access other functionality of
-     * supplied Transaction factories.
-     *
-     * @return Map of backing transaction factories.
-     */
-    protected final Map<LogicalDatastoreType, T> getTxFactories() {
-        return storeTxFactories;
-    }
-
-    /**
-     * Checks if instance is not closed.
-     *
-     * @throws IllegalStateException If instance of this class was closed.
-     *
-     */
-    protected final void checkNotClosed() {
-        Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
-    }
-
-    @Override
-    public void close() {
-        final boolean success = UPDATER.compareAndSet(this, 0, 1);
-        Preconditions.checkState(success, "Transaction factory was already closed");
-    }
-}
-
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMRpcRoutingTableEntry.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMRpcRoutingTableEntry.java
deleted file mode 100644 (file)
index e3cf76d..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.CheckedFuture;
-import java.util.ArrayList;
-import java.util.Comparator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-abstract class AbstractDOMRpcRoutingTableEntry {
-    private final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> implementations;
-    private final SchemaPath schemaPath;
-
-    AbstractDOMRpcRoutingTableEntry(final SchemaPath schemaPath,
-        final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> implementations) {
-        this.schemaPath = Preconditions.checkNotNull(schemaPath);
-        this.implementations = Preconditions.checkNotNull(implementations);
-    }
-
-    final SchemaPath getSchemaPath() {
-        return schemaPath;
-    }
-
-    final List<DOMRpcImplementation> getImplementations(final YangInstanceIdentifier context) {
-        return implementations.get(context);
-    }
-
-    final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> getImplementations() {
-        return implementations;
-    }
-
-    final boolean containsContext(final YangInstanceIdentifier contextReference) {
-        return implementations.containsKey(contextReference);
-    }
-
-    final Set<YangInstanceIdentifier> registeredIdentifiers(final DOMRpcAvailabilityListener listener) {
-        return Maps.filterValues(implementations, list -> list.stream().anyMatch(listener::acceptsImplementation))
-                .keySet();
-    }
-
-    /**
-     * Adds an entry to the DOM RPC routing table.
-     *
-     * @param implementation RPC implementation
-     * @param newRpcs List of new RPCs, must be mutable
-     */
-    final AbstractDOMRpcRoutingTableEntry add(final DOMRpcImplementation implementation,
-            final List<YangInstanceIdentifier> newRpcs) {
-        final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
-        for (final Entry<YangInstanceIdentifier, List<DOMRpcImplementation>> ve : implementations.entrySet()) {
-            if (newRpcs.remove(ve.getKey())) {
-                final List<DOMRpcImplementation> i = new ArrayList<>(ve.getValue().size() + 1);
-                i.addAll(ve.getValue());
-                i.add(implementation);
-
-                // New implementation is at the end, this will move it to be the last among implementations
-                // with equal cost -- relying on sort() being stable.
-                i.sort(Comparator.comparingLong(DOMRpcImplementation::invocationCost));
-                vb.put(ve.getKey(), i);
-            } else {
-                vb.put(ve);
-            }
-        }
-        for (final YangInstanceIdentifier ii : newRpcs) {
-            final List<DOMRpcImplementation> impl = new ArrayList<>(1);
-            impl.add(implementation);
-            vb.put(ii, impl);
-        }
-
-        return newInstance(vb.build());
-    }
-
-    final AbstractDOMRpcRoutingTableEntry remove(final DOMRpcImplementation implementation,
-            final List<YangInstanceIdentifier> removed) {
-        final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
-        for (final Entry<YangInstanceIdentifier, List<DOMRpcImplementation>> ve : implementations.entrySet()) {
-            if (removed.remove(ve.getKey())) {
-                final List<DOMRpcImplementation> i = new ArrayList<>(ve.getValue());
-                i.remove(implementation);
-                // We could trimToSize(), but that may perform another copy just to get rid
-                // of a single element. That is probably not worth the trouble.
-                if (!i.isEmpty()) {
-                    vb.put(ve.getKey(), i);
-                }
-            } else {
-                vb.put(ve);
-            }
-        }
-
-        final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> v = vb.build();
-        return v.isEmpty() ? null : newInstance(v);
-    }
-
-    protected abstract CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(NormalizedNode<?, ?> input);
-
-    protected abstract AbstractDOMRpcRoutingTableEntry newInstance(
-            Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls);
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/CommitCoordinationTask.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/CommitCoordinationTask.java
deleted file mode 100644 (file)
index 810f09a..0000000
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collection;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.util.DurationStatisticsTracker;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of blocking three-phase commit-coordination tasks without
- * support of cancellation.
- */
-final class CommitCoordinationTask<T> implements Callable<T> {
-    private enum Phase {
-        canCommit,
-        preCommit,
-        doCommit,
-    }
-
-    private static final Logger LOG = LoggerFactory.getLogger(CommitCoordinationTask.class);
-    private final Collection<DOMStoreThreePhaseCommitCohort> cohorts;
-    private final DurationStatisticsTracker commitStatTracker;
-    private final DOMDataWriteTransaction tx;
-    private final Supplier<T> futureValueSupplier;
-
-    CommitCoordinationTask(final DOMDataWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
-            final DurationStatisticsTracker commitStatTracker,
-            final Supplier<T> futureValueSupplier) {
-        this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
-        this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
-        this.commitStatTracker = commitStatTracker;
-        this.futureValueSupplier = futureValueSupplier;
-    }
-
-    @Override
-    public T call() throws TransactionCommitFailedException {
-        final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
-
-        Phase phase = Phase.canCommit;
-
-        try {
-            LOG.debug("Transaction {}: canCommit Started", tx.getIdentifier());
-            canCommitBlocking();
-
-            phase = Phase.preCommit;
-            LOG.debug("Transaction {}: preCommit Started", tx.getIdentifier());
-            preCommitBlocking();
-
-            phase = Phase.doCommit;
-            LOG.debug("Transaction {}: doCommit Started", tx.getIdentifier());
-            commitBlocking();
-
-            LOG.debug("Transaction {}: doCommit completed", tx.getIdentifier());
-            return futureValueSupplier.get();
-        } catch (final TransactionCommitFailedException e) {
-            LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
-            abortBlocking(e);
-            throw e;
-        } finally {
-            if (commitStatTracker != null) {
-                commitStatTracker.addDuration(System.nanoTime() - startTime);
-            }
-        }
-    }
-
-    /**
-     * Invokes canCommit on underlying cohorts and blocks till
-     * all results are returned.
-     *
-     * <p>
-     * Valid state transition is from SUBMITTED to CAN_COMMIT,
-     * if currentPhase is not SUBMITTED throws IllegalStateException.
-     *
-     * @throws TransactionCommitFailedException
-     *             If one of cohorts failed can Commit
-     *
-     */
-    private void canCommitBlocking() throws TransactionCommitFailedException {
-        for (final ListenableFuture<?> canCommit : canCommitAll()) {
-            try {
-                final Boolean result = (Boolean)canCommit.get();
-                if (result == null || !result) {
-                    throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
-                }
-            } catch (InterruptedException | ExecutionException e) {
-                throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
-            }
-        }
-    }
-
-    /**
-     * Invokes canCommit on underlying cohorts and returns composite future
-     * which will contains {@link Boolean#TRUE} only and only if
-     * all cohorts returned true.
-     *
-     * <p>
-     * Valid state transition is from SUBMITTED to CAN_COMMIT,
-     * if currentPhase is not SUBMITTED throws IllegalStateException.
-     *
-     * @return List of all cohorts futures from can commit phase.
-     *
-     */
-    private ListenableFuture<?>[] canCommitAll() {
-        final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
-        int index = 0;
-        for (final DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            ops[index++] = cohort.canCommit();
-        }
-        return ops;
-    }
-
-    /**
-     * Invokes preCommit on underlying cohorts and blocks till
-     * all results are returned.
-     *
-     * <p>
-     * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
-     * state is not CAN_COMMIT
-     * throws IllegalStateException.
-     *
-     * @throws TransactionCommitFailedException
-     *             If one of cohorts failed preCommit
-     *
-     */
-    private void preCommitBlocking() throws TransactionCommitFailedException {
-        final ListenableFuture<?>[] preCommitFutures = preCommitAll();
-        try {
-            for (final ListenableFuture<?> future : preCommitFutures) {
-                future.get();
-            }
-        } catch (InterruptedException | ExecutionException e) {
-            throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e);
-        }
-    }
-
-    /**
-     * Invokes preCommit on underlying cohorts and returns future
-     * which will complete once all preCommit on cohorts completed or
-     * failed.
-     *
-     * <p>
-     * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
-     * state is not CAN_COMMIT
-     * throws IllegalStateException.
-     *
-     * @return List of all cohorts futures from can commit phase.
-     *
-     */
-    private ListenableFuture<?>[] preCommitAll() {
-        final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
-        int index = 0;
-        for (final DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            ops[index++] = cohort.preCommit();
-        }
-        return ops;
-    }
-
-    /**
-     * Invokes commit on underlying cohorts and blocks till
-     * all results are returned.
-     *
-     * <p>
-     * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
-     * IllegalStateException.
-     *
-     * @throws TransactionCommitFailedException
-     *             If one of cohorts failed preCommit
-     *
-     */
-    private void commitBlocking() throws TransactionCommitFailedException {
-        final ListenableFuture<?>[] commitFutures = commitAll();
-        try {
-            for (final ListenableFuture<?> future : commitFutures) {
-                future.get();
-            }
-        } catch (InterruptedException | ExecutionException e) {
-            throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e);
-        }
-    }
-
-    /**
-     * Invokes commit on underlying cohorts and returns future which
-     * completes
-     * once all commits on cohorts are completed.
-     *
-     * <p>
-     * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
-     * IllegalStateException
-     *
-     * @return List of all cohorts futures from can commit phase.
-     */
-    private ListenableFuture<?>[] commitAll() {
-        final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
-        int index = 0;
-        for (final DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            ops[index++] = cohort.commit();
-        }
-        return ops;
-    }
-
-    /**
-     * Aborts transaction.
-     *
-     * <p>
-     * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
-     * cohorts, blocks
-     * for all results. If any of the abort failed throws
-     * IllegalStateException,
-     * which will contains originalCause as suppressed Exception.
-     *
-     * <p>
-     * If aborts we're successful throws supplied exception
-     *
-     * @param originalCause
-     *            Exception which should be used to fail transaction for
-     *            consumers of transaction
-     *            future and listeners of transaction failure.
-     * @param phase phase in which the problem ensued
-     * @throws TransactionCommitFailedException
-     *             on invocation of this method.
-     *             originalCa
-     * @throws IllegalStateException
-     *             if abort failed.
-     */
-    private void abortBlocking(
-            final TransactionCommitFailedException originalCause) throws TransactionCommitFailedException {
-        Exception cause = originalCause;
-        try {
-            abortAsyncAll().get();
-        } catch (InterruptedException | ExecutionException e) {
-            LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
-            cause = new IllegalStateException("Abort failed.", e);
-            cause.addSuppressed(e);
-        }
-        Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
-    }
-
-    /**
-     * Invokes abort on underlying cohorts and returns future which
-     * completes once all abort on cohorts are completed.
-     *
-     * @return Future which will complete once all cohorts completed
-     *         abort.
-     */
-    @SuppressWarnings({"unchecked", "rawtypes"})
-    private ListenableFuture<Void> abortAsyncAll() {
-
-        final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohorts.size()];
-        int index = 0;
-        for (final DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-            ops[index++] = cohort.abort();
-        }
-
-        /*
-         * We are returning all futures as list, not only succeeded ones in
-         * order to fail composite future if any of them failed.
-         * See Futures.allAsList for this description.
-         */
-        return (ListenableFuture) Futures.allAsList(ops);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java
deleted file mode 100755 (executable)
index 4c131e1..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which
- * is backed
- * by several {@link DOMStoreTransactionChain} differentiated by provided
- * {@link org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType} type.
- */
-final class DOMDataBrokerTransactionChainImpl extends
-        AbstractDOMForwardedTransactionFactory<DOMStoreTransactionChain> implements DOMTransactionChain {
-    private enum State {
-        RUNNING, CLOSING, CLOSED, FAILED,
-    }
-
-    private static final AtomicIntegerFieldUpdater<DOMDataBrokerTransactionChainImpl> COUNTER_UPDATER
-            = AtomicIntegerFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, "counter");
-    private static final AtomicReferenceFieldUpdater<DOMDataBrokerTransactionChainImpl, State> STATE_UPDATER
-            = AtomicReferenceFieldUpdater.newUpdater(DOMDataBrokerTransactionChainImpl.class, State.class, "state");
-    private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
-    private final AtomicLong txNum = new AtomicLong();
-    private final AbstractDOMDataBroker broker;
-    private final TransactionChainListener listener;
-    private final long chainId;
-
-    private volatile State state = State.RUNNING;
-    private volatile int counter = 0;
-
-    /**
-     * DOMDataBrokerTransactionChainImpl constructor.
-     *
-     * @param chainId  ID of transaction chain
-     * @param chains   Backing {@link DOMStoreTransactionChain}s.
-     * @param broker   Commit Coordinator which should be used to coordinate commits
-     *                 of transaction
-     *                 produced by this chain.
-     * @param listener Listener, which listens on transaction chain events.
-     * @throws NullPointerException If any of arguments is null.
-     */
-    DOMDataBrokerTransactionChainImpl(final long chainId,
-                                             final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
-                                             final AbstractDOMDataBroker broker,
-                                             final TransactionChainListener listener) {
-        super(chains);
-        this.chainId = chainId;
-        this.broker = Preconditions.checkNotNull(broker);
-        this.listener = Preconditions.checkNotNull(listener);
-    }
-
-    private void checkNotFailed() {
-        Preconditions.checkState(state != State.FAILED, "Transaction chain has failed");
-    }
-
-    @Override
-    protected Object newTransactionIdentifier() {
-        return "DOM-CHAIN-" + chainId + "-" + txNum.getAndIncrement();
-    }
-
-    @Override
-    public <T> ListenableFuture<T> commit(final DOMDataWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts, final Supplier<T> futureValueSupplier) {
-        checkNotFailed();
-        checkNotClosed();
-
-        final ListenableFuture<T> ret = broker.commit(transaction, cohorts, futureValueSupplier);
-
-        COUNTER_UPDATER.incrementAndGet(this);
-        Futures.addCallback(ret, new FutureCallback<T>() {
-            @Override
-            public void onSuccess(final T result) {
-                transactionCompleted();
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                transactionFailed(transaction, throwable);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return ret;
-    }
-
-    @Override
-    public void close() {
-        final boolean success = STATE_UPDATER.compareAndSet(this, State.RUNNING, State.CLOSING);
-        if (!success) {
-            LOG.debug("Chain {} is no longer running", this);
-            return;
-        }
-
-        super.close();
-        for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
-            subChain.close();
-        }
-
-        if (counter == 0) {
-            finishClose();
-        }
-    }
-
-    private void finishClose() {
-        state = State.CLOSED;
-        listener.onTransactionChainSuccessful(this);
-    }
-
-    private void transactionCompleted() {
-        if (COUNTER_UPDATER.decrementAndGet(this) == 0 && state == State.CLOSING) {
-            finishClose();
-        }
-    }
-
-    private void transactionFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
-        state = State.FAILED;
-        LOG.debug("Transaction chain {} failed.", this, cause);
-        listener.onTransactionChainFailed(this, tx, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java
deleted file mode 100644 (file)
index 124bf9f..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Read Only Transaction, which is composed of several
- * {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
- * {@link LogicalDatastoreType} type parameter in
- * {@link #read(LogicalDatastoreType, YangInstanceIdentifier)}.
- */
-class DOMForwardedReadOnlyTransaction extends
-        AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, DOMStoreReadTransaction> implements
-        DOMDataReadOnlyTransaction {
-
-    protected DOMForwardedReadOnlyTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
-        super(identifier, backingTxs);
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
-            final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(
-        final LogicalDatastoreType store,
-        final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-
-    @Override
-    public void close() {
-        closeSubtransactions();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java
deleted file mode 100644 (file)
index 3b5dfeb..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Read-Write Transaction, which is composed of several
- * {@link DOMStoreReadWriteTransaction} transactions. Subtransaction is selected by
- * {@link LogicalDatastoreType} type parameter in:
- *
- * <ul>
- * <li>{@link #read(LogicalDatastoreType, YangInstanceIdentifier)}
- * <li>{@link #put(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
- * <li>{@link #delete(LogicalDatastoreType, YangInstanceIdentifier)}
- * <li>{@link #merge(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
- * </ul>
- * {@link #commit()} will result in invocation of
- * {@link DOMDataCommitImplementation#submit(org.opendaylight.controller.md.sal.dom
- * .api.DOMDataWriteTransaction, Iterable)}
- * invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
- * for underlying transactions.
- *
- */
-final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction>
-        implements DOMDataReadWriteTransaction {
-    protected DOMForwardedReadWriteTransaction(final Object identifier,
-            final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
-            final AbstractDOMForwardedTransactionFactory<?> commitImpl) {
-        super(identifier, backingTxs, commitImpl);
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
-            final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return getSubtransaction(store).read(path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(
-        final LogicalDatastoreType store,
-        final YangInstanceIdentifier path) {
-        return getSubtransaction(store).exists(path);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java
deleted file mode 100644 (file)
index 145bfe0..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
- * {@link LogicalDatastoreType} type parameter in:
- *
- * <p>
- * <ul>
- * <li>{@link #put(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
- * <li>{@link #delete(LogicalDatastoreType, YangInstanceIdentifier)}
- * <li>{@link #merge(LogicalDatastoreType, YangInstanceIdentifier, NormalizedNode)}
- * </ul>
- *
- * <p>
- * {@link #commit()} will result in invocation of
- * {@link DOMDataCommitImplementation#submit(org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction,
- * Iterable)} invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
- * for underlying transactions.
- *
- * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
- *            subtransaction.
- */
-class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
-        AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction,
-            AbstractDOMForwardedTransactionFactory>
-            IMPL_UPDATER = AtomicReferenceFieldUpdater
-            .newUpdater(DOMForwardedWriteTransaction.class, AbstractDOMForwardedTransactionFactory.class, "commitImpl");
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER
-            = AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
-    private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
-    private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
-
-    /**
-     * Implementation of real commit. It also acts as an indication that
-     * the transaction is running -- which we flip atomically using
-     * {@link #IMPL_UPDATER}.
-     */
-    private volatile AbstractDOMForwardedTransactionFactory<?> commitImpl;
-
-    /**
-     * Future task of transaction commit. It starts off as null, but is
-     * set appropriately on {@link #commit()} and {@link #cancel()} via
-     * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
-     *
-     * <p>
-     * Lazy set is safe for use because it is only referenced to in the
-     * {@link #cancel()} slow path, where we will busy-wait for it. The
-     * fast path gets the benefit of a store-store barrier instead of the
-     * usual store-load barrier.
-     */
-    private volatile Future<?> commitFuture;
-
-    protected DOMForwardedWriteTransaction(final Object identifier, final Map<LogicalDatastoreType, T> backingTxs,
-                                           final AbstractDOMForwardedTransactionFactory<?> commitImpl) {
-        super(identifier, backingTxs);
-        this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                    final NormalizedNode<?, ?> data) {
-        checkRunning(commitImpl);
-        getSubtransaction(store).write(path, data);
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        checkRunning(commitImpl);
-        getSubtransaction(store).delete(path);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                      final NormalizedNode<?, ?> data) {
-        checkRunning(commitImpl);
-        getSubtransaction(store).merge(path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        final AbstractDOMForwardedTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        if (impl != null) {
-            LOG.trace("Transaction {} cancelled before submit", getIdentifier());
-            FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
-            closeSubtransactions();
-            return true;
-        }
-
-        // The transaction is in process of being submitted or cancelled. Busy-wait
-        // for the corresponding future.
-        Future<?> future;
-        do {
-            future = commitFuture;
-        } while (future == null);
-
-        return future.cancel(false);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return FluentFuture.from(doCommit(CommitInfo::empty));
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private <V> ListenableFuture<V> doCommit(Supplier<V> futureValueSupplier) {
-        final AbstractDOMForwardedTransactionFactory<?> impl = IMPL_UPDATER.getAndSet(this, null);
-        checkRunning(impl);
-
-        final Collection<T> txns = getSubtransactions();
-        final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
-
-        ListenableFuture<V> ret;
-        try {
-            for (DOMStoreWriteTransaction txn : txns) {
-                cohorts.add(txn.ready());
-            }
-
-            ret = impl.commit(this, cohorts, futureValueSupplier);
-        } catch (RuntimeException e) {
-            ret = FluentFuture.from(Futures.immediateFailedFuture(
-                    TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e)));
-        }
-        FUTURE_UPDATER.lazySet(this, ret);
-        return ret;
-    }
-
-    private void checkRunning(final AbstractDOMForwardedTransactionFactory<?> impl) {
-        Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouter.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouter.java
deleted file mode 100644 (file)
index c2cf65c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListener;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListenerRegistry;
-import org.opendaylight.controller.sal.core.compat.LegacyDOMNotificationServiceAdapter;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Joint implementation of {@link DOMNotificationPublishService} and {@link DOMNotificationService}. Provides
- * routing of notifications from publishers to subscribers.
- *
- * <p>
- * Internal implementation works by allocating a two-handler Disruptor. The first handler delivers notifications
- * to subscribed listeners and the second one notifies whoever may be listening on the returned future. Registration
- * state tracking is performed by a simple immutable multimap -- when a registration or unregistration occurs we
- * re-generate the entire map from scratch and set it atomically. While registrations/unregistrations synchronize
- * on this instance, notifications do not take any locks here.
- *
- * <p>
- * The fully-blocking {@link #offerNotification(DOMNotification)}
- * is realized using the Disruptor's native operations. The bounded-blocking
- * {@link #offerNotification(DOMNotification, long, TimeUnit)}
- * is realized by arming a background wakeup interrupt.
- */
-@SuppressFBWarnings(value = "NP_NONNULL_PARAM_VIOLATION", justification = "Void is the only allowed value")
-public final class DOMNotificationRouter extends LegacyDOMNotificationServiceAdapter implements AutoCloseable,
-        DOMNotificationPublishService, DOMNotificationSubscriptionListenerRegistry {
-
-    private final org.opendaylight.mdsal.dom.api.DOMNotificationPublishService delegateNotificationPublishService;
-    private final org.opendaylight.mdsal.dom.spi.DOMNotificationSubscriptionListenerRegistry delegateListenerRegistry;
-
-    private DOMNotificationRouter(
-            final org.opendaylight.mdsal.dom.api.DOMNotificationService delegateNotificationService,
-            final org.opendaylight.mdsal.dom.api.DOMNotificationPublishService delegateNotificationPublishService,
-            final org.opendaylight.mdsal.dom.spi.DOMNotificationSubscriptionListenerRegistry delegateListenerRegistry) {
-        super(delegateNotificationService);
-        this.delegateNotificationPublishService = delegateNotificationPublishService;
-        this.delegateListenerRegistry = delegateListenerRegistry;
-    }
-
-    public static DOMNotificationRouter create(final int queueDepth) {
-        final org.opendaylight.mdsal.dom.broker.DOMNotificationRouter delegate =
-                org.opendaylight.mdsal.dom.broker.DOMNotificationRouter.create(queueDepth);
-        return create(delegate, delegate, delegate);
-    }
-
-    public static DOMNotificationRouter create(final int queueDepth, final long spinTime, final long parkTime,
-                                               final TimeUnit unit) {
-        final org.opendaylight.mdsal.dom.broker.DOMNotificationRouter delegate =
-                org.opendaylight.mdsal.dom.broker.DOMNotificationRouter.create(queueDepth, spinTime, parkTime, unit);
-        return create(delegate, delegate, delegate);
-    }
-
-    public static DOMNotificationRouter create(
-            final org.opendaylight.mdsal.dom.api.DOMNotificationService delegateNotificationService,
-            final org.opendaylight.mdsal.dom.api.DOMNotificationPublishService delegateNotificationPublishService,
-            final org.opendaylight.mdsal.dom.spi.DOMNotificationSubscriptionListenerRegistry delegateListenerRegistry) {
-        return new DOMNotificationRouter(delegateNotificationService, delegateNotificationPublishService,
-                delegateListenerRegistry);
-    }
-
-    @Override
-    public synchronized <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(
-            final T listener, final Collection<SchemaPath> types) {
-        return super.registerNotificationListener(listener, types);
-    }
-
-    @Override
-    public <L extends DOMNotificationSubscriptionListener> ListenerRegistration<L> registerSubscriptionListener(
-            final L listener) {
-        return delegateListenerRegistry.registerSubscriptionListener(listener);
-    }
-
-    @Override
-    public ListenableFuture<?> putNotification(final DOMNotification notification) throws InterruptedException {
-        return delegateNotificationPublishService.putNotification(notification);
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final DOMNotification notification) {
-        return delegateNotificationPublishService.offerNotification(notification);
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final DOMNotification notification, final long timeout,
-                                                 final TimeUnit unit) throws InterruptedException {
-        return delegateNotificationPublishService.offerNotification(notification, timeout, unit);
-    }
-
-    @Override
-    public void close() {
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterEvent.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterEvent.java
deleted file mode 100644 (file)
index 1aa52ea..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import com.lmax.disruptor.EventFactory;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * A single notification event in the disruptor ringbuffer. These objects are reused,
- * so they do have mutable state.
- */
-final class DOMNotificationRouterEvent {
-    private static final Logger LOG = LoggerFactory.getLogger(DOMNotificationRouterEvent.class);
-    public static final EventFactory<DOMNotificationRouterEvent> FACTORY = DOMNotificationRouterEvent::new;
-
-    private Collection<ListenerRegistration<? extends DOMNotificationListener>> subscribers;
-    private DOMNotification notification;
-    private SettableFuture<Void> future;
-
-    private DOMNotificationRouterEvent() {
-        // Hidden on purpose, initialized in initialize()
-    }
-
-    @SuppressWarnings("checkstyle:hiddenField")
-    ListenableFuture<Void> initialize(final DOMNotification notification,
-                                      final Collection<ListenerRegistration<? extends DOMNotificationListener>>
-                                              subscribers) {
-        this.notification = Preconditions.checkNotNull(notification);
-        this.subscribers = Preconditions.checkNotNull(subscribers);
-        this.future = SettableFuture.create();
-        return this.future;
-    }
-
-    void deliverNotification() {
-        LOG.trace("Start delivery of notification {}", notification);
-        for (ListenerRegistration<? extends DOMNotificationListener> r : subscribers) {
-            final DOMNotificationListener listener = r.getInstance();
-            LOG.trace("Notifying listener {}", listener);
-            listener.onNotification(notification);
-            LOG.trace("Listener notification completed");
-        }
-        LOG.trace("Delivery completed");
-    }
-
-    @SuppressFBWarnings(value = "NP_NONNULL_PARAM_VIOLATION", justification = "Void is the only allowed value")
-    void setFuture() {
-        future.set(null);
-        notification = null;
-        subscribers = null;
-        future = null;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouter.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouter.java
deleted file mode 100644 (file)
index b660423..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-import java.util.WeakHashMap;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMRpcImplementationRegistration;
-import org.opendaylight.controller.sal.core.compat.DOMRpcServiceAdapter;
-import org.opendaylight.controller.sal.core.compat.LegacyDOMRpcResultFutureAdapter;
-import org.opendaylight.controller.sal.core.compat.MdsalDOMRpcResultFutureAdapter;
-import org.opendaylight.controller.sal.core.compat.RpcAvailabilityListenerAdapter;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public final class DOMRpcRouter implements AutoCloseable, DOMRpcService, DOMRpcProviderService, SchemaContextListener {
-    // This mapping is used to translate mdsal DOMRpcImplementations to their corresponding legacy
-    // DOMRpcImplementations registered thru this interface when invoking a DOMRpcAvailabilityListener.
-    private final Map<org.opendaylight.mdsal.dom.api.DOMRpcImplementation, DOMRpcImplementation> implMapping =
-            Collections.synchronizedMap(new WeakHashMap<>());
-
-    private final org.opendaylight.mdsal.dom.api.DOMRpcService delegateRpcService;
-    private final org.opendaylight.mdsal.dom.api.DOMRpcProviderService delegateRpcProviderService;
-
-    // Note - this is only used for backward compatibility for UTs that use the empty constructor which creates
-    // a local mdsal DOMRpcRouter that needs to be updated with the SchemaContext. In production, the mdsal API
-    // services are passed via the constructor and are set up externally with the SchemaContext.
-    private final SchemaContextListener delegateSchemaContextListener;
-
-    @VisibleForTesting
-    public DOMRpcRouter() {
-        org.opendaylight.mdsal.dom.broker.DOMRpcRouter delegate = new org.opendaylight.mdsal.dom.broker.DOMRpcRouter();
-        this.delegateRpcService = delegate.getRpcService();
-        this.delegateRpcProviderService = delegate.getRpcProviderService();
-        this.delegateSchemaContextListener = delegate;
-    }
-
-    public DOMRpcRouter(final org.opendaylight.mdsal.dom.api.DOMRpcService delegateRpcService,
-            final org.opendaylight.mdsal.dom.api.DOMRpcProviderService delegateRpcProviderService) {
-        this.delegateRpcService = delegateRpcService;
-        this.delegateRpcProviderService = delegateRpcProviderService;
-        this.delegateSchemaContextListener = null;
-    }
-
-    @Override
-    public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            final T implementation, final DOMRpcIdentifier... rpcs) {
-        return registerRpcImplementation(implementation, ImmutableSet.copyOf(rpcs));
-    }
-
-    @Override
-    public synchronized <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            final T implementation, final Set<DOMRpcIdentifier> rpcs) {
-        org.opendaylight.mdsal.dom.api.DOMRpcImplementation delegateImpl =
-            new org.opendaylight.mdsal.dom.api.DOMRpcImplementation() {
-                @Override
-                public FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> invokeRpc(
-                        final org.opendaylight.mdsal.dom.api.DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
-                    return new MdsalDOMRpcResultFutureAdapter(implementation.invokeRpc(DOMRpcIdentifier.fromMdsal(rpc),
-                        input));
-                }
-
-
-                @Override
-                public long invocationCost() {
-                    return implementation.invocationCost();
-                }
-            };
-
-        implMapping.put(delegateImpl, implementation);
-
-        final org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration
-            <org.opendaylight.mdsal.dom.api.DOMRpcImplementation> reg = delegateRpcProviderService
-                .registerRpcImplementation(delegateImpl, DOMRpcServiceAdapter.convert(rpcs));
-
-        return new AbstractDOMRpcImplementationRegistration<T>(implementation) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-                implMapping.remove(delegateImpl);
-            }
-        };
-    }
-
-    @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type,
-                                                                  final NormalizedNode<?, ?> input) {
-        final FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> future =
-                delegateRpcService.invokeRpc(type, input);
-        return future instanceof MdsalDOMRpcResultFutureAdapter ? ((MdsalDOMRpcResultFutureAdapter)future).delegate()
-                : new LegacyDOMRpcResultFutureAdapter(future);
-    }
-
-    @Override
-    public synchronized <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(
-            final T listener) {
-        final ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener> reg =
-            delegateRpcService.registerRpcListener(new RpcAvailabilityListenerAdapter<T>(listener) {
-                @Override
-                public boolean acceptsImplementation(final org.opendaylight.mdsal.dom.api.DOMRpcImplementation impl) {
-                    // If the DOMRpcImplementation wasn't registered thru this interface then the mapping won't be
-                    // present - in this we can't call the listener so just assume acceptance which is the default
-                    // behavior. This should be fine since a legacy listener would not be aware of implementation types
-                    // registered via the new mdsal API.
-                    final DOMRpcImplementation legacyImpl = implMapping.get(impl);
-                    return legacyImpl != null ? delegate().acceptsImplementation(legacyImpl) : true;
-                }
-            });
-
-        return new AbstractListenerRegistration<T>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-
-    @Override
-    public void close() {
-    }
-
-    @Override
-    @VisibleForTesting
-    public void onGlobalContextUpdated(final SchemaContext context) {
-        if (delegateSchemaContextListener != null) {
-            delegateSchemaContextListener.onGlobalContextUpdated(context);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRoutingTable.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRoutingTable.java
deleted file mode 100644 (file)
index 800a35f..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableMap.Builder;
-import com.google.common.collect.LinkedListMultimap;
-import com.google.common.collect.ListMultimap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.broker.spi.rpc.RpcRoutingStrategy;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-final class DOMRpcRoutingTable {
-
-    static final DOMRpcRoutingTable EMPTY = new DOMRpcRoutingTable(ImmutableMap.of(), null);
-
-    private final Map<SchemaPath, AbstractDOMRpcRoutingTableEntry> rpcs;
-    private final SchemaContext schemaContext;
-
-    private DOMRpcRoutingTable(final Map<SchemaPath, AbstractDOMRpcRoutingTableEntry> rpcs,
-                               final SchemaContext schemaContext) {
-        this.rpcs = Preconditions.checkNotNull(rpcs);
-        this.schemaContext = schemaContext;
-    }
-
-    static ListMultimap<SchemaPath, YangInstanceIdentifier> decomposeIdentifiers(final Set<DOMRpcIdentifier> rpcs) {
-        final ListMultimap<SchemaPath, YangInstanceIdentifier> ret = LinkedListMultimap.create();
-        for (DOMRpcIdentifier i : rpcs) {
-            ret.put(i.getType(), i.getContextReference());
-        }
-        return ret;
-    }
-
-    DOMRpcRoutingTable add(final DOMRpcImplementation implementation, final Set<DOMRpcIdentifier> rpcsToAdd) {
-        if (rpcsToAdd.isEmpty()) {
-            return this;
-        }
-
-        // First decompose the identifiers to a multimap
-        final ListMultimap<SchemaPath, YangInstanceIdentifier> toAdd = decomposeIdentifiers(rpcsToAdd);
-
-        // Now iterate over existing entries, modifying them as appropriate...
-        final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> mb = ImmutableMap.builder();
-        for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> re : this.rpcs.entrySet()) {
-            List<YangInstanceIdentifier> newRpcs = new ArrayList<>(toAdd.removeAll(re.getKey()));
-            if (!newRpcs.isEmpty()) {
-                final AbstractDOMRpcRoutingTableEntry ne = re.getValue().add(implementation, newRpcs);
-                mb.put(re.getKey(), ne);
-            } else {
-                mb.put(re);
-            }
-        }
-
-        // Finally add whatever is left in the decomposed multimap
-        for (Entry<SchemaPath, Collection<YangInstanceIdentifier>> e : toAdd.asMap().entrySet()) {
-            final Builder<YangInstanceIdentifier, List<DOMRpcImplementation>> vb = ImmutableMap.builder();
-            final List<DOMRpcImplementation> v = Collections.singletonList(implementation);
-            for (YangInstanceIdentifier i : e.getValue()) {
-                vb.put(i, v);
-            }
-
-            mb.put(e.getKey(), createRpcEntry(schemaContext, e.getKey(), vb.build()));
-        }
-
-        return new DOMRpcRoutingTable(mb.build(), schemaContext);
-    }
-
-    DOMRpcRoutingTable remove(final DOMRpcImplementation implementation, final Set<DOMRpcIdentifier> rpcIds) {
-        if (rpcIds.isEmpty()) {
-            return this;
-        }
-
-        // First decompose the identifiers to a multimap
-        final ListMultimap<SchemaPath, YangInstanceIdentifier> toRemove = decomposeIdentifiers(rpcIds);
-
-        // Now iterate over existing entries, modifying them as appropriate...
-        final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> b = ImmutableMap.builder();
-        for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> e : this.rpcs.entrySet()) {
-            final List<YangInstanceIdentifier> removed = new ArrayList<>(toRemove.removeAll(e.getKey()));
-            if (!removed.isEmpty()) {
-                final AbstractDOMRpcRoutingTableEntry ne = e.getValue().remove(implementation, removed);
-                if (ne != null) {
-                    b.put(e.getKey(), ne);
-                }
-            } else {
-                b.put(e);
-            }
-        }
-
-        // All done, whatever is in toRemove, was not there in the first place
-        return new DOMRpcRoutingTable(b.build(), schemaContext);
-    }
-
-    boolean contains(final DOMRpcIdentifier input) {
-        final AbstractDOMRpcRoutingTableEntry contexts = rpcs.get(input.getType());
-        return contexts != null && contexts.containsContext(input.getContextReference());
-    }
-
-    Map<SchemaPath, Set<YangInstanceIdentifier>> getRpcs(final DOMRpcAvailabilityListener listener) {
-        final Map<SchemaPath, Set<YangInstanceIdentifier>> ret = new HashMap<>(rpcs.size());
-        for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> e : rpcs.entrySet()) {
-            final Set<YangInstanceIdentifier> ids = e.getValue().registeredIdentifiers(listener);
-            if (!ids.isEmpty()) {
-                ret.put(e.getKey(), ids);
-            }
-        }
-
-        return ret;
-    }
-
-    private static RpcDefinition findRpcDefinition(final SchemaContext context, final SchemaPath schemaPath) {
-        if (context != null) {
-            final QName qname = schemaPath.getPathFromRoot().iterator().next();
-            final Module module = context.findModule(qname.getModule()).orElse(null);
-            if (module != null && module.getRpcs() != null) {
-                for (RpcDefinition rpc : module.getRpcs()) {
-                    if (qname.equals(rpc.getQName())) {
-                        return rpc;
-                    }
-                }
-            }
-        }
-
-        return null;
-    }
-
-    private static AbstractDOMRpcRoutingTableEntry createRpcEntry(final SchemaContext context, final SchemaPath key,
-            final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> implementations) {
-        final RpcDefinition rpcDef = findRpcDefinition(context, key);
-        if (rpcDef == null) {
-            return new UnknownDOMRpcRoutingTableEntry(key, implementations);
-        }
-
-        final RpcRoutingStrategy strategy = RpcRoutingStrategy.from(rpcDef);
-        if (strategy.isContextBasedRouted()) {
-            return new RoutedDOMRpcRoutingTableEntry(rpcDef, YangInstanceIdentifier.of(strategy.getLeaf()),
-                implementations);
-
-        }
-
-        return new GlobalDOMRpcRoutingTableEntry(rpcDef, implementations);
-    }
-
-    CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type, final NormalizedNode<?, ?> input) {
-        final AbstractDOMRpcRoutingTableEntry entry = rpcs.get(type);
-        if (entry == null) {
-            return Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(
-                new DOMRpcImplementationNotAvailableException("No implementation of RPC %s available", type));
-        }
-
-        return entry.invokeRpc(input);
-    }
-
-    DOMRpcRoutingTable setSchemaContext(final SchemaContext context) {
-        final Builder<SchemaPath, AbstractDOMRpcRoutingTableEntry> b = ImmutableMap.builder();
-
-        for (Entry<SchemaPath, AbstractDOMRpcRoutingTableEntry> e : rpcs.entrySet()) {
-            b.put(e.getKey(), createRpcEntry(context, e.getKey(), e.getValue().getImplementations()));
-        }
-
-        return new DOMRpcRoutingTable(b.build(), context);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/GlobalDOMRpcRoutingTableEntry.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/GlobalDOMRpcRoutingTableEntry.java
deleted file mode 100644 (file)
index 776e213..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import java.util.List;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-
-final class GlobalDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
-    private final DOMRpcIdentifier rpcId;
-
-    private GlobalDOMRpcRoutingTableEntry(final DOMRpcIdentifier rpcId,
-                                          final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        super(rpcId.getType(), impls);
-        this.rpcId = Preconditions.checkNotNull(rpcId);
-    }
-
-    // We do not need the RpcDefinition, but this makes sure we do not
-    // forward something we don't know to be an RPC.
-    GlobalDOMRpcRoutingTableEntry(final RpcDefinition def,
-                                  final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        super(def.getPath(), impls);
-        this.rpcId = DOMRpcIdentifier.create(def.getPath());
-    }
-
-    @Override
-    protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
-        return getImplementations(YangInstanceIdentifier.EMPTY).get(0).invokeRpc(rpcId, input);
-    }
-
-    @Override
-    protected GlobalDOMRpcRoutingTableEntry newInstance(
-            final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        return new GlobalDOMRpcRoutingTableEntry(rpcId, impls);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongDataBroker.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongDataBroker.java
deleted file mode 100644 (file)
index de8a754..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static java.util.Objects.requireNonNull;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.spi.ForwardingDOMDataBroker;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * An implementation of a {@link DOMDataBroker}, which forwards most requests to a delegate.
- *
- * <p>
- * Its interpretation of the API contract is somewhat looser, specifically it does not
- * guarantee transaction ordering between transactions allocated directly from the broker
- * and its transaction chains.
- */
-public final class PingPongDataBroker extends ForwardingDOMDataBroker implements AutoCloseable,
-        DOMDataTreeChangeService {
-    private final DOMDataBroker delegate;
-
-    /**
-     * Instantiate a new broker, backed by the the specified delegate
-     * {@link DOMDataBroker}.
-     *
-     * @param delegate Backend broker, may not be null.
-     */
-    public PingPongDataBroker(final @NonNull DOMDataBroker delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected DOMDataBroker delegate() {
-        return delegate;
-    }
-
-    @Override
-    public PingPongTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        return new PingPongTransactionChain(delegate, listener);
-    }
-
-    @Override
-    public void close() {
-        // intentionally NOOP
-    }
-
-    @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-            final DOMDataTreeIdentifier treeId, final L listener) {
-        final DOMDataTreeChangeService treeService = (DOMDataTreeChangeService) delegate.getSupportedExtensions()
-                .get(DOMDataTreeChangeService.class);
-        if (treeService != null) {
-            return treeService.registerDataTreeChangeListener(treeId, listener);
-        }
-
-        throw new UnsupportedOperationException("Delegate " + delegate + " does not support required functionality");
-    }
-
-    @Override
-    public String toString() {
-        return "PingPongDataBroker backed by " + delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransaction.java
deleted file mode 100644 (file)
index 2275a6b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-
-/**
- * Transaction context. Tracks the relationship with the backend transaction.
- * We never leak this class to the user and have it implement the {@link FutureCallback}
- * interface so we have a simple way of propagating the result.
- */
-final class PingPongTransaction implements FutureCallback<CommitInfo> {
-    private final DOMDataReadWriteTransaction delegate;
-    private final SettableFuture<CommitInfo> future;
-    private DOMDataReadWriteTransaction frontendTransaction;
-
-    PingPongTransaction(final DOMDataReadWriteTransaction delegate) {
-        this.delegate = Preconditions.checkNotNull(delegate);
-        future = SettableFuture.create();
-    }
-
-    DOMDataReadWriteTransaction getTransaction() {
-        return delegate;
-    }
-
-    DOMDataReadWriteTransaction getFrontendTransaction() {
-        return frontendTransaction;
-    }
-
-    ListenableFuture<CommitInfo> getCommitFuture() {
-        return future;
-    }
-
-    @Override
-    public void onSuccess(final CommitInfo result) {
-        future.set(result);
-    }
-
-    @Override
-    public void onFailure(final Throwable throwable) {
-        future.setException(throwable);
-    }
-
-    void recordFrontendTransaction(final DOMDataReadWriteTransaction tx) {
-        if (frontendTransaction != null) {
-            frontendTransaction = tx;
-        }
-    }
-
-    @Override
-    public String toString() {
-        return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
-    }
-
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("delegate", delegate);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransactionChain.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/PingPongTransactionChain.java
deleted file mode 100644 (file)
index af474c5..0000000
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static com.google.common.base.Preconditions.checkState;
-import static com.google.common.base.Verify.verify;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.AbstractMap.SimpleImmutableEntry;
-import java.util.Map.Entry;
-import java.util.concurrent.CancellationException;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import javax.annotation.concurrent.GuardedBy;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.md.sal.dom.spi.ForwardingDOMDataReadWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * An implementation of {@link DOMTransactionChain}, which has a very specific
- * behavior, which some users may find surprising. If keeps the general
- * intent of the contract, but it makes sure there are never more than two
- * transactions allocated at any given time: one of them is being committed,
- * and while that is happening, the other one acts as the scratch pad. Once
- * the committing transaction completes successfully, the scratch transaction
- * is enqueued as soon as it is ready.
- *
- * <p>
- * This mode of operation means that there is no inherent isolation between
- * the front-end transactions and transactions cannot be reasonably cancelled.
- *
- * <p>
- * It furthermore means that the transactions returned by {@link #newReadOnlyTransaction()}
- * counts as an outstanding transaction and the user may not allocate multiple
- * read-only transactions at the same time.
- */
-public final class PingPongTransactionChain implements DOMTransactionChain {
-    private static final Logger LOG = LoggerFactory.getLogger(PingPongTransactionChain.class);
-    private final TransactionChainListener listener;
-    private final DOMTransactionChain delegate;
-
-    @GuardedBy("this")
-    private boolean failed;
-    @GuardedBy("this")
-    private PingPongTransaction shutdownTx;
-    @GuardedBy("this")
-    private Entry<PingPongTransaction, Throwable> deadTx;
-
-    /**
-     * This updater is used to manipulate the "ready" transaction. We perform only atomic
-     * get-and-set on it.
-     */
-    private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> READY_UPDATER
-            = AtomicReferenceFieldUpdater
-            .newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "readyTx");
-    private volatile PingPongTransaction readyTx;
-
-    /**
-     * This updater is used to manipulate the "locked" transaction. A locked transaction
-     * means we know that the user still holds a transaction and should at some point call
-     * us. We perform on compare-and-swap to ensure we properly detect when a user is
-     * attempting to allocated multiple transactions concurrently.
-     */
-    private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> LOCKED_UPDATER
-            = AtomicReferenceFieldUpdater
-            .newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "lockedTx");
-    private volatile PingPongTransaction lockedTx;
-
-    /**
-     * This updater is used to manipulate the "inflight" transaction. There can be at most
-     * one of these at any given time. We perform only compare-and-swap on these.
-     */
-    private static final AtomicReferenceFieldUpdater<PingPongTransactionChain, PingPongTransaction> INFLIGHT_UPDATER
-            = AtomicReferenceFieldUpdater
-            .newUpdater(PingPongTransactionChain.class, PingPongTransaction.class, "inflightTx");
-    private volatile PingPongTransaction inflightTx;
-
-    PingPongTransactionChain(final DOMDataBroker broker, final TransactionChainListener listener) {
-        this.listener = requireNonNull(listener);
-        this.delegate = broker.createTransactionChain(new TransactionChainListener() {
-            @Override
-            public void onTransactionChainFailed(final TransactionChain<?, ?> chain,
-                                                 final AsyncTransaction<?, ?> transaction, final Throwable cause) {
-                LOG.debug("Transaction chain {} reported failure in {}", chain, transaction, cause);
-                delegateFailed(chain, cause);
-            }
-
-            @Override
-            public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
-                delegateSuccessful(chain);
-            }
-        });
-    }
-
-    void delegateSuccessful(final TransactionChain<?, ?> chain) {
-        final Entry<PingPongTransaction, Throwable> canceled;
-        synchronized (this) {
-            // This looks weird, but we need not hold the lock while invoking callbacks
-            canceled = deadTx;
-        }
-
-        if (canceled == null) {
-            listener.onTransactionChainSuccessful(this);
-            return;
-        }
-
-        // Backend shutdown successful, but we have a batch of transactions we have to report as dead due to the
-        // user calling cancel().
-        final PingPongTransaction tx = canceled.getKey();
-        final Throwable cause = canceled.getValue();
-        LOG.debug("Transaction chain {} successful, failing cancelled transaction {}", chain, tx, cause);
-
-        listener.onTransactionChainFailed(this, tx.getFrontendTransaction(), cause);
-        tx.onFailure(cause);
-    }
-
-    void delegateFailed(final TransactionChain<?, ?> chain, final Throwable cause) {
-
-        final DOMDataReadWriteTransaction frontend;
-        final PingPongTransaction tx = inflightTx;
-        if (tx == null) {
-            LOG.warn("Transaction chain {} failed with no pending transactions", chain);
-            frontend = null;
-        } else {
-            frontend = tx.getFrontendTransaction();
-        }
-
-        listener.onTransactionChainFailed(this, frontend, cause);
-
-        synchronized (this) {
-            failed = true;
-
-            /*
-             * If we do not have a locked transaction, we need to ensure that
-             * the backend transaction is cancelled. Otherwise we can defer
-             * until the user calls us.
-             */
-            if (lockedTx == null) {
-                processIfReady();
-            }
-        }
-    }
-
-    private synchronized PingPongTransaction slowAllocateTransaction() {
-        checkState(shutdownTx == null, "Transaction chain %s has been shut down", this);
-
-        if (deadTx != null) {
-            throw new IllegalStateException(
-                    String.format("Transaction chain %s has failed due to transaction %s being canceled", this,
-                                  deadTx.getKey()), deadTx.getValue());
-        }
-
-        final DOMDataReadWriteTransaction delegateTx = delegate.newReadWriteTransaction();
-        final PingPongTransaction newTx = new PingPongTransaction(delegateTx);
-
-        if (!LOCKED_UPDATER.compareAndSet(this, null, newTx)) {
-            delegateTx.cancel();
-            throw new IllegalStateException(
-                    String.format("New transaction %s raced with transaction %s", newTx, lockedTx));
-        }
-
-        return newTx;
-    }
-
-    private PingPongTransaction allocateTransaction() {
-        // Step 1: acquire current state
-        final PingPongTransaction oldTx = READY_UPDATER.getAndSet(this, null);
-
-        // Slow path: allocate a delegate transaction
-        if (oldTx == null) {
-            return slowAllocateTransaction();
-        }
-
-        // Fast path: reuse current transaction. We will check failures and similar on commit().
-        if (!LOCKED_UPDATER.compareAndSet(this, null, oldTx)) {
-            // Ouch. Delegate chain has not detected a duplicate transaction allocation. This is the best we can do.
-            oldTx.getTransaction().cancel();
-            throw new IllegalStateException(
-                    String.format("Reusable transaction %s raced with transaction %s", oldTx, lockedTx));
-        }
-
-        return oldTx;
-    }
-
-    /*
-     * This forces allocateTransaction() on a slow path, which has to happen after
-     * this method has completed executing. Also inflightTx may be updated outside
-     * the lock, hence we need to re-check.
-     */
-    @GuardedBy("this")
-    private void processIfReady() {
-        if (inflightTx == null) {
-            final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
-            if (tx != null) {
-                processTransaction(tx);
-            }
-        }
-    }
-
-    /**
-     * Process a ready transaction. The caller needs to ensure that
-     * each transaction is seen only once by this method.
-     *
-     * @param tx Transaction which needs processing.
-     */
-    @GuardedBy("this")
-    private void processTransaction(final @NonNull PingPongTransaction tx) {
-        if (failed) {
-            LOG.debug("Cancelling transaction {}", tx);
-            tx.getTransaction().cancel();
-            return;
-        }
-
-        LOG.debug("Submitting transaction {}", tx);
-        if (!INFLIGHT_UPDATER.compareAndSet(this, null, tx)) {
-            LOG.warn("Submitting transaction {} while {} is still running", tx, inflightTx);
-        }
-
-        tx.getTransaction().commit().addCallback(new FutureCallback<CommitInfo>() {
-            @Override
-            public void onSuccess(final CommitInfo result) {
-                transactionSuccessful(tx, result);
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                transactionFailed(tx, throwable);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-
-    /*
-     * We got invoked from the data store thread. We need to do two things:
-     * 1) release the in-flight transaction
-     * 2) process the potential next transaction
-     *
-     * We have to perform 2) under lock. We could perform 1) without locking, but that means the CAS result may
-     * not be accurate, as a user thread may submit the ready transaction before we acquire the lock -- and checking
-     * for next transaction is not enough, as that may have also be allocated (as a result of a quick
-     * submit/allocate/submit between 1) and 2)). Hence we'd end up doing the following:
-     * 1) CAS of inflightTx
-     * 2) take lock
-     * 3) volatile read of inflightTx
-     *
-     * Rather than doing that, we keep this method synchronized, hence performing only:
-     * 1) take lock
-     * 2) CAS of inflightTx
-     *
-     * Since the user thread is barred from submitting the transaction (in processIfReady), we can then proceed with
-     * the knowledge that inflightTx is null -- processTransaction() will still do a CAS, but that is only for
-     * correctness.
-     */
-    private synchronized void processNextTransaction(final PingPongTransaction tx) {
-        final boolean success = INFLIGHT_UPDATER.compareAndSet(this, tx, null);
-        checkState(success, "Completed transaction %s while %s was submitted", tx, inflightTx);
-
-        final PingPongTransaction nextTx = READY_UPDATER.getAndSet(this, null);
-        if (nextTx != null) {
-            processTransaction(nextTx);
-        } else if (shutdownTx != null) {
-            processTransaction(shutdownTx);
-            delegate.close();
-            shutdownTx = null;
-        }
-    }
-
-    void transactionSuccessful(final PingPongTransaction tx, final CommitInfo result) {
-        LOG.debug("Transaction {} completed successfully", tx);
-
-        tx.onSuccess(result);
-        processNextTransaction(tx);
-    }
-
-    void transactionFailed(final PingPongTransaction tx, final Throwable throwable) {
-        LOG.debug("Transaction {} failed", tx, throwable);
-
-        tx.onFailure(throwable);
-        processNextTransaction(tx);
-    }
-
-    void readyTransaction(final @NonNull PingPongTransaction tx) {
-        // First mark the transaction as not locked.
-        final boolean lockedMatch = LOCKED_UPDATER.compareAndSet(this, tx, null);
-        checkState(lockedMatch, "Attempted to submit transaction %s while we have %s", tx, lockedTx);
-        LOG.debug("Transaction {} unlocked", tx);
-
-        /*
-         * The transaction is ready. It will then be picked up by either next allocation,
-         * or a background transaction completion callback.
-         */
-        final boolean success = READY_UPDATER.compareAndSet(this, null, tx);
-        checkState(success, "Transaction %s collided on ready state", tx, readyTx);
-        LOG.debug("Transaction {} readied", tx);
-
-        /*
-         * We do not see a transaction being in-flight, so we need to take care of dispatching
-         * the transaction to the backend. We are in the ready case, we cannot short-cut
-         * the checking of readyTx, as an in-flight transaction may have completed between us
-         * setting the field above and us checking.
-         */
-        if (inflightTx == null) {
-            synchronized (this) {
-                processIfReady();
-            }
-        }
-    }
-
-    /**
-     * Transaction cancellation is a heavyweight operation. We only support cancelation of a locked transaction
-     * and return false for everything else. Cancelling such a transaction will result in all transactions in the
-     * batch to be cancelled.
-     *
-     * @param tx         Backend shared transaction
-     * @param frontendTx transaction
-     * @param isOpen     indicator whether the transaction was already closed
-     */
-    synchronized void cancelTransaction(final PingPongTransaction tx, final DOMDataReadWriteTransaction frontendTx) {
-        // Attempt to unlock the operation.
-        final boolean lockedMatch = LOCKED_UPDATER.compareAndSet(this, tx, null);
-        verify(lockedMatch, "Cancelling transaction %s collided with locked transaction %s", tx, lockedTx);
-
-        // Cancel the backend transaction, so we do not end up leaking it.
-        final boolean backendCancelled = tx.getTransaction().cancel();
-
-        if (failed) {
-            // The transaction has failed, this is probably the user just clearing up the transaction they had. We have
-            // already cancelled the transaction anyway,
-            return;
-        } else if (!backendCancelled) {
-            LOG.warn("Backend transaction cannot be cancelled during cancellation of {}, attempting to continue", tx);
-        }
-
-        // We have dealt with canceling the backend transaction and have unlocked the transaction. Since we are still
-        // inside the synchronized block, any allocations are blocking on the slow path. Now we have to decide the fate
-        // of this transaction chain.
-        //
-        // If there are no other frontend transactions in this batch we are aligned with backend state and we can
-        // continue processing.
-        if (frontendTx.equals(tx.getFrontendTransaction())) {
-            LOG.debug("Cancelled transaction {} was head of the batch, resuming processing", tx);
-            return;
-        }
-
-        // There are multiple frontend transactions in this batch. We have to report them as failed, which dooms this
-        // transaction chain, too. Since we just came off of a locked transaction, we do not have a ready transaction
-        // at the moment, but there may be some transaction in-flight. So we proceed to shutdown the backend chain
-        // and mark the fact that we should be turning its completion into a failure.
-        deadTx = new SimpleImmutableEntry<>(tx, new CancellationException("Transaction " + frontendTx + " canceled")
-                .fillInStackTrace());
-        delegate.close();
-    }
-
-    @Override
-    public synchronized void close() {
-        final PingPongTransaction notLocked = lockedTx;
-        checkState(notLocked == null, "Attempted to close chain with outstanding transaction %s", notLocked);
-
-        // This is not reliable, but if we observe it to be null and the process has already completed,
-        // the backend transaction chain will throw the appropriate error.
-        checkState(shutdownTx == null, "Attempted to close an already-closed chain");
-
-        // This may be a reaction to our failure callback, in that case the backend is already shutdown
-        if (deadTx != null) {
-            LOG.debug("Delegate {} is already closed due to failure {}", delegate, deadTx);
-            return;
-        }
-
-        // Force allocations on slow path, picking up a potentially-outstanding transaction
-        final PingPongTransaction tx = READY_UPDATER.getAndSet(this, null);
-
-        if (tx != null) {
-            // We have one more transaction, which needs to be processed somewhere. If we do not
-            // a transaction in-flight, we need to push it down ourselves.
-            // If there is an in-flight transaction we will schedule this last one into a dedicated
-            // slot. Allocation slow path will check its presence and fail, the in-flight path will
-            // pick it up, submit and immediately close the chain.
-            if (inflightTx == null) {
-                processTransaction(tx);
-                delegate.close();
-            } else {
-                shutdownTx = tx;
-            }
-        } else {
-            // Nothing outstanding, we can safely shutdown
-            delegate.close();
-        }
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        final PingPongTransaction tx = allocateTransaction();
-
-        return new DOMDataReadOnlyTransaction() {
-            @Override
-            public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
-                    final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-                return tx.getTransaction().read(store, path);
-            }
-
-            @Override
-            public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                                                                      final YangInstanceIdentifier path) {
-                return tx.getTransaction().exists(store, path);
-            }
-
-            @Override
-            public Object getIdentifier() {
-                return tx.getTransaction().getIdentifier();
-            }
-
-            @Override
-            public void close() {
-                readyTransaction(tx);
-            }
-        };
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        final PingPongTransaction tx = allocateTransaction();
-        final DOMDataReadWriteTransaction ret = new ForwardingDOMDataReadWriteTransaction() {
-            private boolean isOpen = true;
-
-            @Override
-            protected DOMDataReadWriteTransaction delegate() {
-                return tx.getTransaction();
-            }
-
-            @Override
-            public FluentFuture<? extends CommitInfo> commit() {
-                readyTransaction(tx);
-                isOpen = false;
-                return FluentFuture.from(tx.getCommitFuture()).transformAsync(
-                    ignored -> CommitInfo.emptyFluentFuture(), MoreExecutors.directExecutor());
-            }
-
-            @Override
-            public boolean cancel() {
-                if (isOpen) {
-                    cancelTransaction(tx, this);
-                    isOpen = false;
-                    return true;
-                } else {
-                    return false;
-                }
-            }
-        };
-
-        tx.recordFrontendTransaction(ret);
-        return ret;
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return newReadWriteTransaction();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/RoutedDOMRpcRoutingTableEntry.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/RoutedDOMRpcRoutingTableEntry.java
deleted file mode 100644 (file)
index 5292118..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNodes;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class RoutedDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
-    private static final Logger LOG = LoggerFactory.getLogger(RoutedDOMRpcRoutingTableEntry.class);
-    private final DOMRpcIdentifier globalRpcId;
-    private final YangInstanceIdentifier keyId;
-
-    private RoutedDOMRpcRoutingTableEntry(final DOMRpcIdentifier globalRpcId, final YangInstanceIdentifier keyId,
-                                          final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        super(globalRpcId.getType(), impls);
-        this.keyId = Preconditions.checkNotNull(keyId);
-        this.globalRpcId = Preconditions.checkNotNull(globalRpcId);
-    }
-
-    RoutedDOMRpcRoutingTableEntry(final RpcDefinition def, final YangInstanceIdentifier keyId,
-                                  final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        super(def.getPath(), impls);
-        this.keyId = Preconditions.checkNotNull(keyId);
-        this.globalRpcId = DOMRpcIdentifier.create(def.getPath());
-    }
-
-    @Override
-    protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
-        final Optional<NormalizedNode<?, ?>> maybeKey = NormalizedNodes.findNode(input, keyId);
-
-        // Routing key is present, attempt to deliver as a routed RPC
-        if (maybeKey.isPresent()) {
-            final NormalizedNode<?, ?> key = maybeKey.get();
-            final Object value = key.getValue();
-            if (value instanceof YangInstanceIdentifier) {
-                final YangInstanceIdentifier iid = (YangInstanceIdentifier) value;
-
-                // Find a DOMRpcImplementation for a specific iid
-                final List<DOMRpcImplementation> specificImpls = getImplementations(iid);
-                if (specificImpls != null) {
-                    return specificImpls.get(0).invokeRpc(DOMRpcIdentifier.create(getSchemaPath(), iid), input);
-                }
-
-                LOG.debug("No implementation for context {} found will now look for wildcard id", iid);
-
-                // Find a DOMRpcImplementation for a wild card. Usually remote-rpc-connector would register an
-                // implementation this way
-                final List<DOMRpcImplementation> mayBeRemoteImpls = getImplementations(YangInstanceIdentifier.EMPTY);
-
-                if (mayBeRemoteImpls != null) {
-                    return mayBeRemoteImpls.get(0).invokeRpc(DOMRpcIdentifier.create(getSchemaPath(), iid), input);
-                }
-
-            } else {
-                LOG.warn("Ignoring wrong context value {}", value);
-            }
-        }
-
-        final List<DOMRpcImplementation> impls = getImplementations(null);
-        if (impls != null) {
-            return impls.get(0).invokeRpc(globalRpcId, input);
-        } else {
-            return Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(
-                    new DOMRpcImplementationNotAvailableException("No implementation of RPC %s available",
-                                                                  getSchemaPath()));
-        }
-    }
-
-    @Override
-    protected RoutedDOMRpcRoutingTableEntry newInstance(
-            final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        return new RoutedDOMRpcRoutingTableEntry(globalRpcId, keyId, impls);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/SerializedDOMDataBroker.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/SerializedDOMDataBroker.java
deleted file mode 100644 (file)
index c170944..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others. All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.base.Supplier;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.RejectedExecutionException;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.util.DurationStatisticsTracker;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of blocking three phase commit coordinator, which which
- * supports coordination on multiple {@link DOMStoreThreePhaseCommitCohort}.
- *
- * <p>
- * This implementation does not support cancellation of commit,
- *
- * <p>
- * In order to advance to next phase of three phase commit all subtasks of
- * previous step must be finish.
- *
- * <p>
- * This executor does not have an upper bound on subtask timeout.
- */
-public class SerializedDOMDataBroker extends AbstractDOMDataBroker {
-    private static final Logger LOG = LoggerFactory.getLogger(SerializedDOMDataBroker.class);
-    private final DurationStatisticsTracker commitStatsTracker = DurationStatisticsTracker.createConcurrent();
-    private final ListeningExecutorService executor;
-
-    /**
-     * Construct DOMDataCommitCoordinator which uses supplied executor to
-     * process commit coordinations.
-     *
-     * @param datastores data stores
-     * @param executor executor service
-     */
-    public SerializedDOMDataBroker(final Map<LogicalDatastoreType, DOMStore> datastores,
-                                   final ListeningExecutorService executor) {
-        super(datastores);
-        this.executor = Preconditions.checkNotNull(executor, "executor must not be null.");
-    }
-
-    public DurationStatisticsTracker getCommitStatsTracker() {
-        return commitStatsTracker;
-    }
-
-    @Override
-    protected <T> ListenableFuture<T> commit(final DOMDataWriteTransaction transaction,
-            final Collection<DOMStoreThreePhaseCommitCohort> cohorts, final Supplier<T> futureValueSupplier) {
-        Preconditions.checkArgument(transaction != null, "Transaction must not be null.");
-        Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
-        LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
-
-        ListenableFuture<T> commitFuture;
-        try {
-            commitFuture = executor.submit(new CommitCoordinationTask<>(transaction, cohorts, commitStatsTracker,
-                    futureValueSupplier));
-        } catch (RejectedExecutionException e) {
-            LOG.error("The commit executor {} queue is full - submit task was rejected. \n", executor, e);
-            commitFuture = Futures.immediateFailedFuture(new TransactionCommitFailedException(
-                    "Could not submit the commit task - the commit queue capacity has been exceeded.", e));
-        }
-
-        return commitFuture;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/TransactionCommitFailedExceptionMapper.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/TransactionCommitFailedExceptionMapper.java
deleted file mode 100644 (file)
index 8f04d09..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Utility exception mapper which translates Exception to {@link TransactionCommitFailedException}.
- *
- * @see ExceptionMapper
- */
-public final class TransactionCommitFailedExceptionMapper extends ExceptionMapper<TransactionCommitFailedException> {
-
-    public static final TransactionCommitFailedExceptionMapper PRE_COMMIT_MAPPER = create("preCommit");
-
-    public static final TransactionCommitFailedExceptionMapper CAN_COMMIT_ERROR_MAPPER = create("canCommit");
-
-    public static final TransactionCommitFailedExceptionMapper COMMIT_ERROR_MAPPER = create("commit");
-
-    private TransactionCommitFailedExceptionMapper(final String opName) {
-        super(opName, TransactionCommitFailedException.class);
-    }
-
-    public static TransactionCommitFailedExceptionMapper create(final String opName) {
-        return new TransactionCommitFailedExceptionMapper(opName);
-    }
-
-    @Override
-    protected TransactionCommitFailedException newWithCause(final String message, final Throwable cause) {
-        return new TransactionCommitFailedException(message, cause);
-    }
-
-    @Override
-    @SuppressFBWarnings("BC_UNCONFIRMED_CAST_OF_RETURN_VALUE")
-    public TransactionCommitFailedException apply(Exception input) {
-        return super.apply(input);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/UnknownDOMRpcRoutingTableEntry.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/UnknownDOMRpcRoutingTableEntry.java
deleted file mode 100644 (file)
index 4e7560e..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import java.util.List;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-final class UnknownDOMRpcRoutingTableEntry extends AbstractDOMRpcRoutingTableEntry {
-    private final CheckedFuture<DOMRpcResult, DOMRpcException> unknownRpc;
-
-    UnknownDOMRpcRoutingTableEntry(final SchemaPath schemaPath,
-                                   final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        super(schemaPath, impls);
-        unknownRpc = Futures.<DOMRpcResult, DOMRpcException>immediateFailedCheckedFuture(
-                new DOMRpcImplementationNotAvailableException("SchemaPath %s is not resolved to an RPC", schemaPath));
-    }
-
-    @Override
-    protected CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final NormalizedNode<?, ?> input) {
-        return unknownRpc;
-    }
-
-    @Override
-    protected UnknownDOMRpcRoutingTableEntry newInstance(
-            final Map<YangInstanceIdentifier, List<DOMRpcImplementation>> impls) {
-        return new UnknownDOMRpcRoutingTableEntry(getSchemaPath(), impls);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/LegacyShardedDOMDataBrokerAdapter.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/LegacyShardedDOMDataBrokerAdapter.java
deleted file mode 100644 (file)
index 8691992..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.dom.broker.ShardedDOMDataBrokerAdapter;
-
-/**
- * DOMDataBroker implementation that forwards calls to
- * {@link org.opendaylight.mdsal.dom.broker.ShardedDOMDataBrokerAdapter},
- * which in turn translates calls to shard aware implementation of
- * {@link org.opendaylight.mdsal.dom.api.DOMDataTreeService}
- *
- * <p>
- * The incompatibility between first and latter APIs, puts restriction on {@link DOMDataReadWriteTransaction}
- * and {@link DOMDataReadOnlyTransaction} provided by this data broker. See
- * {@link ShardedDOMDataBrokerDelegatingReadWriteTransaction}
- * and {@link ShardedDOMDataBrokerDelegatingReadTransaction} respectively.
- */
-// FIXME try to refactor some of the implementation to abstract class for better reusability
-public class LegacyShardedDOMDataBrokerAdapter implements DOMDataBroker {
-
-    private final org.opendaylight.mdsal.dom.api.DOMDataBroker delegateDataBroker;
-    private final DOMSchemaService schemaService;
-    private final AtomicLong txNum = new AtomicLong();
-    private final AtomicLong chainNum = new AtomicLong();
-
-    public LegacyShardedDOMDataBrokerAdapter(final ShardedDOMDataBrokerAdapter delegateDataBroker,
-                                             final DOMSchemaService schemaService) {
-        this.delegateDataBroker = checkNotNull(delegateDataBroker);
-        this.schemaService = checkNotNull(schemaService);
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        return new ShardedDOMDataBrokerDelegatingReadTransaction(newTransactionIdentifier(),
-                                                                 delegateDataBroker.newReadOnlyTransaction());
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return new ShardedDOMDataBrokerDelegatingReadWriteTransaction(newTransactionIdentifier(),
-                                                                      schemaService.getGlobalContext(),
-                                                                      newReadOnlyTransaction(),
-                                                                      newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return new ShardedDOMDataBrokerDelegatingWriteTransaction(newTransactionIdentifier(),
-                                                                  delegateDataBroker.newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        return new ShardedDOMDataBrokerDelegatingTransactionChain(chainNum.getAndIncrement(),
-                                                                  schemaService.getGlobalContext(), delegateDataBroker,
-                                                                  listener);
-    }
-
-    @Override
-    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-        return Collections.emptyMap();
-    }
-
-    private Object newTransactionIdentifier() {
-        return "DOM-" + txNum.getAndIncrement();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadTransaction.java
deleted file mode 100644 (file)
index 98c236a..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Read transaction that delegates calls to {@link org.opendaylight.mdsal.dom.broker.ShardedDOMReadTransactionAdapter},
- * which in turn translates calls to shard aware implementation of
- * {@link org.opendaylight.mdsal.dom.api.DOMDataTreeService}.
- *
- * <p>
- * Since reading data distributed on different subshards is not guaranteed to
- * return all relevant data, we cannot guarantee it neither. Best effort is to
- * return all data we get from first initial data change event received.
- */
-class ShardedDOMDataBrokerDelegatingReadTransaction implements DOMDataReadOnlyTransaction {
-    private final DOMDataTreeReadTransaction delegateTx;
-    private final Object txIdentifier;
-
-    ShardedDOMDataBrokerDelegatingReadTransaction(final Object txIdentifier,
-                                                         final DOMDataTreeReadTransaction delegateTx) {
-        this.delegateTx = checkNotNull(delegateTx);
-        this.txIdentifier = checkNotNull(txIdentifier);
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-                                                                                   final YangInstanceIdentifier path) {
-        return Futures.makeChecked(delegateTx.read(store.toMdsal(), path).transform(
-            Optional::fromJavaUtil, MoreExecutors.directExecutor()), ReadFailedException.MAPPER);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                                                              final YangInstanceIdentifier path) {
-        return Futures.makeChecked(delegateTx.exists(store.toMdsal(), path), ReadFailedException.MAPPER);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return txIdentifier;
-    }
-
-    @Override
-    public void close() {
-        delegateTx.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransaction.java
deleted file mode 100644 (file)
index fd8ed94..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static com.google.common.base.Preconditions.checkState;
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Map;
-import java.util.Queue;
-import javax.annotation.concurrent.NotThreadSafe;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeConfiguration;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeFactory;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Read/write transaction that delegates write and initial read to
- * {@link org.opendaylight.mdsal.dom.broker.ShardedDOMWriteTransactionAdapter}
- * and {@link org.opendaylight.mdsal.dom.broker.ShardedDOMReadTransactionAdapter}
- * respectively. These two in turn rely on shard aware implementation of
- * {@link org.opendaylight.mdsal.dom.api.DOMDataTreeService}.
- *
- * <p>
- * Since reading data distributed on different subshards is not guaranteed to
- * return all relevant data, best effort is to try to operate only on single
- * subtree in conceptual data tree. We define this subtree by first write
- * operation performed on transaction. All next read and write operations
- * should be performed just in this initial subtree.
- */
-// FIXME explicitly enforce just one subtree requirement
-@NotThreadSafe
-class ShardedDOMDataBrokerDelegatingReadWriteTransaction implements DOMDataReadWriteTransaction {
-    private final DOMDataReadOnlyTransaction readTxDelegate;
-    private final DOMDataWriteTransaction writeTxDelegate;
-    private final Object txIdentifier;
-    private final ImmutableMap<LogicalDatastoreType, Queue<Modification>> modificationHistoryMap;
-    private final ImmutableMap<LogicalDatastoreType, DataTreeSnapshot> snapshotMap;
-    private final Map<LogicalDatastoreType, ListenableFuture<Optional<NormalizedNode<?, ?>>>> initialReadMap;
-    private YangInstanceIdentifier root = null;
-
-    ShardedDOMDataBrokerDelegatingReadWriteTransaction(final Object readWriteTxId, final SchemaContext ctx,
-                                                              final DOMDataReadOnlyTransaction readTxDelegate,
-                                                              final DOMDataWriteTransaction writeTxDelegate) {
-        this.readTxDelegate = requireNonNull(readTxDelegate);
-        this.writeTxDelegate = requireNonNull(writeTxDelegate);
-        this.txIdentifier = requireNonNull(readWriteTxId);
-        this.initialReadMap = Maps.newEnumMap(LogicalDatastoreType.class);
-
-        final InMemoryDataTreeFactory treeFactory = new InMemoryDataTreeFactory();
-        final ImmutableMap.Builder<LogicalDatastoreType, DataTreeSnapshot> snapshotMapBuilder = ImmutableMap.builder();
-        final ImmutableMap.Builder<LogicalDatastoreType, Queue<Modification>> modificationHistoryMapBuilder
-                = ImmutableMap.builder();
-        for (final LogicalDatastoreType store : LogicalDatastoreType.values()) {
-            final DataTree tree = treeFactory.create(treeConfigForStore(store));
-            tree.setSchemaContext(ctx);
-            snapshotMapBuilder.put(store, tree.takeSnapshot());
-
-            modificationHistoryMapBuilder.put(store, Lists.newLinkedList());
-        }
-
-        modificationHistoryMap = modificationHistoryMapBuilder.build();
-        snapshotMap = snapshotMapBuilder.build();
-    }
-
-    @Override
-    public boolean cancel() {
-        readTxDelegate.close();
-        return writeTxDelegate.cancel();
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        if (root == null) {
-            initialRead(path);
-        }
-
-        modificationHistoryMap.get(store).add(new Modification(Modification.Operation.DELETE, path, null));
-        writeTxDelegate.delete(store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return writeTxDelegate.commit();
-    }
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-                                                                                   final YangInstanceIdentifier path) {
-        checkState(root != null,
-                   "A modify operation (put, merge or delete) must be performed prior to a read operation");
-        final SettableFuture<Optional<NormalizedNode<?, ?>>> readResult = SettableFuture.create();
-        final Queue<Modification> currentHistory = Lists.newLinkedList(modificationHistoryMap.get(store));
-        Futures.addCallback(initialReadMap.get(store), new FutureCallback<Optional<NormalizedNode<?, ?>>>() {
-            @Override
-            public void onSuccess(final Optional<NormalizedNode<?, ?>> result) {
-                final DataTreeModification mod = snapshotMap.get(store).newModification();
-                if (result.isPresent()) {
-                    mod.write(path, result.get());
-                }
-                applyModificationHistoryToSnapshot(mod, currentHistory);
-                readResult.set(Optional.fromJavaUtil(mod.readNode(path)));
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                readResult.setException(throwable);
-            }
-        }, MoreExecutors.directExecutor());
-
-        return Futures.makeChecked(readResult, ReadFailedException.MAPPER);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                                                              final YangInstanceIdentifier path) {
-        checkState(root != null,
-                   "A modify operation (put, merge or delete) must be performed prior to an exists operation");
-        return Futures.makeChecked(Futures.transform(read(store, path), Optional::isPresent,
-            MoreExecutors.directExecutor()), ReadFailedException.MAPPER);
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                    final NormalizedNode<?, ?> data) {
-        if (root == null) {
-            initialRead(path);
-        }
-
-        modificationHistoryMap.get(store).add(new Modification(Modification.Operation.WRITE, path, data));
-        writeTxDelegate.put(store, path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                      final NormalizedNode<?, ?> data) {
-        if (root == null) {
-            initialRead(path);
-        }
-
-        modificationHistoryMap.get(store).add(new Modification(Modification.Operation.MERGE, path, data));
-        writeTxDelegate.merge(store, path, data);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return txIdentifier;
-    }
-
-    private void initialRead(final YangInstanceIdentifier path) {
-        root = path;
-
-        for (final LogicalDatastoreType store : LogicalDatastoreType.values()) {
-            initialReadMap.put(store, readTxDelegate.read(store, path));
-        }
-    }
-
-    private static DataTreeConfiguration treeConfigForStore(final LogicalDatastoreType store) {
-        return store == LogicalDatastoreType.CONFIGURATION ? DataTreeConfiguration.DEFAULT_CONFIGURATION
-                : DataTreeConfiguration.DEFAULT_OPERATIONAL;
-    }
-
-    private static void applyModificationHistoryToSnapshot(final DataTreeModification dataTreeModification,
-                                                    final Queue<Modification> modificationHistory) {
-        while (!modificationHistory.isEmpty()) {
-            final Modification modification = modificationHistory.poll();
-            switch (modification.getOperation()) {
-                case WRITE:
-                    dataTreeModification.write(modification.getPath(), modification.getData());
-                    break;
-                case MERGE:
-                    dataTreeModification.merge(modification.getPath(), modification.getData());
-                    break;
-                case DELETE:
-                    dataTreeModification.delete(modification.getPath());
-                    break;
-                default:
-                    // NOOP
-            }
-        }
-    }
-
-    static class Modification {
-
-        enum Operation {
-            WRITE, MERGE, DELETE
-        }
-
-        private final NormalizedNode<?, ?> data;
-        private final YangInstanceIdentifier path;
-        private final Operation operation;
-
-        Modification(final Operation operation, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-            this.data = data;
-            this.path = requireNonNull(path);
-            this.operation = requireNonNull(operation);
-        }
-
-        Operation getOperation() {
-            return operation;
-        }
-
-        YangInstanceIdentifier getPath() {
-            return path;
-        }
-
-        NormalizedNode<?, ?> getData() {
-            return data;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChain.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChain.java
deleted file mode 100644 (file)
index ef77bf1..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-class ShardedDOMDataBrokerDelegatingTransactionChain implements DOMTransactionChain, DOMTransactionChainListener {
-    private final org.opendaylight.mdsal.dom.api.DOMTransactionChain txChainDelegate;
-    private final SchemaContext schemaContext;
-    private final TransactionChainListener txChainListener;
-    private final Object txChainIdentifier;
-    private final AtomicLong txNum = new AtomicLong();
-
-    private final Map<Object, AsyncTransaction<?, ?>> transactionMap;
-
-    ShardedDOMDataBrokerDelegatingTransactionChain(final Object txChainIdentifier,
-                                                          final SchemaContext schemaContext,
-                                                          final org.opendaylight.mdsal.dom.api.DOMDataBroker
-                                                                  brokerDelegate,
-                                                          final TransactionChainListener txChainListener) {
-        requireNonNull(brokerDelegate);
-        this.schemaContext = requireNonNull(schemaContext);
-        this.txChainIdentifier = requireNonNull(txChainIdentifier);
-        this.txChainListener = requireNonNull(txChainListener);
-        this.txChainDelegate = brokerDelegate.createTransactionChain(this);
-        transactionMap = Maps.newHashMap();
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        final DOMDataTreeReadTransaction readTxDelegate = txChainDelegate.newReadOnlyTransaction();
-        final DOMDataReadOnlyTransaction readTx = new ShardedDOMDataBrokerDelegatingReadTransaction(
-                newTransactionIdentifier(), readTxDelegate);
-        transactionMap.put(readTxDelegate.getIdentifier(), readTx);
-
-        return readTx;
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        final Object readWriteTxId = newTransactionIdentifier();
-        final DOMDataTreeReadTransaction readTxDelegate = txChainDelegate.newReadOnlyTransaction();
-        final DOMDataReadOnlyTransaction readTx = new ShardedDOMDataBrokerDelegatingReadTransaction(readWriteTxId,
-                                                                                                    readTxDelegate);
-
-        final DOMDataTreeWriteTransaction writeTxDelegate = txChainDelegate.newWriteOnlyTransaction();
-        final DOMDataWriteTransaction writeTx = new ShardedDOMDataBrokerDelegatingWriteTransaction(readWriteTxId,
-                                                                                                   writeTxDelegate);
-
-        final DOMDataReadWriteTransaction readWriteTx = new ShardedDOMDataBrokerDelegatingReadWriteTransaction(
-                readWriteTxId, schemaContext, readTx, writeTx);
-        transactionMap.put(readTxDelegate.getIdentifier(), readWriteTx);
-        transactionMap.put(writeTxDelegate.getIdentifier(), readWriteTx);
-
-        return readWriteTx;
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        final DOMDataTreeWriteTransaction writeTxDelegate = txChainDelegate.newWriteOnlyTransaction();
-        final DOMDataWriteTransaction writeTx = new ShardedDOMDataBrokerDelegatingWriteTransaction(
-                newTransactionIdentifier(), writeTxDelegate);
-        transactionMap.put(writeTxDelegate.getIdentifier(), writeTx);
-
-        return writeTx;
-    }
-
-    @Override
-    public void close() {
-        txChainDelegate.close();
-    }
-
-    @Override
-    public void onTransactionChainFailed(org.opendaylight.mdsal.dom.api.DOMTransactionChain chain,
-            DOMDataTreeTransaction transaction, Throwable cause) {
-        txChainListener.onTransactionChainFailed(this, transactionFromDelegate(transaction.getIdentifier()), cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(org.opendaylight.mdsal.dom.api.DOMTransactionChain chain) {
-        txChainListener.onTransactionChainSuccessful(this);
-    }
-
-    private AsyncTransaction<?, ?> transactionFromDelegate(final Object delegateId) {
-        Preconditions.checkState(transactionMap.containsKey(delegateId),
-                                 "Delegate transaction {} is not present in transaction chain history", delegateId);
-        return transactionMap.get(delegateId);
-    }
-
-    private Object newTransactionIdentifier() {
-        return "DOM-CHAIN-" + txChainIdentifier + "-" + txNum.getAndIncrement();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingWriteTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingWriteTransaction.java
deleted file mode 100644 (file)
index b6deaea..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-class ShardedDOMDataBrokerDelegatingWriteTransaction implements DOMDataWriteTransaction {
-    private final DOMDataTreeWriteTransaction delegateTx;
-    private final Object txIdentifier;
-
-    ShardedDOMDataBrokerDelegatingWriteTransaction(final Object txIdentifier,
-                                                          final DOMDataTreeWriteTransaction delegateTx) {
-        this.delegateTx = checkNotNull(delegateTx);
-        this.txIdentifier = checkNotNull(txIdentifier);
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                    final NormalizedNode<?, ?> data) {
-        delegateTx.put(store.toMdsal(), path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                      final NormalizedNode<?, ?> data) {
-        delegateTx.merge(store.toMdsal(), path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegateTx.cancel();
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        delegateTx.delete(store.toMdsal(), path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegateTx.commit();
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return txIdentifier;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/mount/DOMMountPointServiceImpl.java
deleted file mode 100644 (file)
index 6cfdb5a..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl.mount;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.MutableClassToInstanceMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.md.sal.dom.broker.spi.mount.SimpleDOMMountPoint;
-import org.opendaylight.controller.sal.core.compat.DOMDataBrokerAdapter;
-import org.opendaylight.controller.sal.core.compat.DOMMountPointAdapter;
-import org.opendaylight.controller.sal.core.compat.DOMNotificationServiceAdapter;
-import org.opendaylight.controller.sal.core.compat.DOMRpcServiceAdapter;
-import org.opendaylight.mdsal.dom.api.DOMMountPointListener;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.concepts.ObjectRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class DOMMountPointServiceImpl implements DOMMountPointService {
-    private abstract static class CompatFactory<M extends org.opendaylight.mdsal.dom.api.DOMService,
-            C extends DOMService> {
-        private final Class<C> controllerClass;
-        private final Class<M> mdsalClass;
-
-        CompatFactory(final Class<C> controllerClass, final Class<M> mdsalClass) {
-            this.controllerClass = requireNonNull(controllerClass);
-            this.mdsalClass = requireNonNull(mdsalClass);
-        }
-
-        final void addService(final org.opendaylight.mdsal.dom.api.DOMMountPointService.DOMMountPointBuilder builder,
-                final ClassToInstanceMap<DOMService> services) {
-            if (!services.containsKey(mdsalClass)) {
-                final C controllerService = services.getInstance(controllerClass);
-                if (controllerService != null) {
-                    final M mdsalService = createService(controllerService);
-                    if (mdsalService != null) {
-                        builder.addService(mdsalClass, mdsalService);
-                    }
-                }
-            }
-        }
-
-        abstract M createService(C delegate);
-    }
-
-    private static final Map<Class<? extends DOMService>, CompatFactory<?, ?>> KNOWN_SERVICES = ImmutableMap.of(
-        DOMActionService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMActionService, DOMActionService>(
-                DOMActionService.class, org.opendaylight.mdsal.dom.api.DOMActionService.class) {
-            @Override
-            org.opendaylight.mdsal.dom.api.DOMActionService createService(final DOMActionService delegate) {
-                return delegate;
-            }
-        },
-        DOMDataBroker.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMDataBroker, DOMDataBroker>(
-                DOMDataBroker.class, org.opendaylight.mdsal.dom.api.DOMDataBroker.class) {
-            @Override
-            org.opendaylight.mdsal.dom.api.DOMDataBroker createService(final DOMDataBroker delegate) {
-                return new DOMDataBrokerAdapter(delegate);
-            }
-        },
-        DOMNotificationService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMNotificationService,
-                DOMNotificationService>(DOMNotificationService.class,
-                        org.opendaylight.mdsal.dom.api.DOMNotificationService.class) {
-            @Override
-            org.opendaylight.mdsal.dom.api.DOMNotificationService createService(final DOMNotificationService delegate) {
-                return new DOMNotificationServiceAdapter(delegate);
-            }
-        },
-        DOMRpcService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMRpcService, DOMRpcService>(
-                DOMRpcService.class, org.opendaylight.mdsal.dom.api.DOMRpcService.class) {
-            @Override
-            org.opendaylight.mdsal.dom.api.DOMRpcService createService(final DOMRpcService delegate) {
-                return new DOMRpcServiceAdapter(delegate);
-            }
-        });
-
-    private final org.opendaylight.mdsal.dom.api.DOMMountPointService delegate;
-
-    @VisibleForTesting
-    public DOMMountPointServiceImpl() {
-        this(new org.opendaylight.mdsal.dom.broker.DOMMountPointServiceImpl());
-    }
-
-    public DOMMountPointServiceImpl(final org.opendaylight.mdsal.dom.api.DOMMountPointService delegate) {
-        this.delegate = delegate;
-    }
-
-    @Override
-    public Optional<DOMMountPoint> getMountPoint(final YangInstanceIdentifier path) {
-        return Optional.fromJavaUtil(delegate.getMountPoint(path).map(DOMMountPointAdapter::new));
-    }
-
-    @Override
-    public DOMMountPointBuilder createMountPoint(final YangInstanceIdentifier path) {
-        return new DOMMountPointBuilderImpl(path);
-    }
-
-    @Override
-    public ListenerRegistration<DOMMountPointListener> registerProvisionListener(final DOMMountPointListener listener) {
-        return delegate.registerProvisionListener(listener);
-    }
-
-    @SuppressWarnings("unchecked")
-    ObjectRegistration<DOMMountPoint> registerMountPoint(final SimpleDOMMountPoint mountPoint) {
-        final org.opendaylight.mdsal.dom.api.DOMMountPointService.DOMMountPointBuilder delegateBuilder =
-                delegate.createMountPoint(mountPoint.getIdentifier());
-
-        if (mountPoint.getSchemaContext() != null) {
-            delegateBuilder.addInitialSchemaContext(mountPoint.getSchemaContext());
-        }
-
-        final ClassToInstanceMap<DOMService> myServices = mountPoint.getServices();
-        for (Entry<Class<? extends DOMService>, DOMService> entry : myServices.entrySet()) {
-            delegateBuilder.addService((Class<DOMService>)entry.getKey(), entry.getValue());
-
-            final CompatFactory<?, ?> compat = KNOWN_SERVICES.get(entry.getKey());
-            if (compat != null) {
-                compat.addService(delegateBuilder, myServices);
-            }
-        }
-
-        final ObjectRegistration<org.opendaylight.mdsal.dom.api.DOMMountPoint> delegateReg = delegateBuilder.register();
-        return new AbstractObjectRegistration<DOMMountPoint>(mountPoint) {
-            @Override
-            protected void removeRegistration() {
-                delegateReg.close();
-            }
-        };
-    }
-
-    public class DOMMountPointBuilderImpl implements DOMMountPointBuilder {
-        private final ClassToInstanceMap<DOMService> services = MutableClassToInstanceMap.create();
-        private final YangInstanceIdentifier path;
-        private SimpleDOMMountPoint mountPoint;
-        private SchemaContext schemaContext;
-
-        public DOMMountPointBuilderImpl(final YangInstanceIdentifier path) {
-            this.path = path;
-        }
-
-        @Override
-        public <T extends DOMService> DOMMountPointBuilder addService(final Class<T> type, final T impl) {
-            services.putInstance(type, impl);
-            return this;
-        }
-
-        @Override
-        public DOMMountPointBuilder addInitialSchemaContext(final SchemaContext ctx) {
-            schemaContext = ctx;
-            return this;
-        }
-
-        @Override
-        public ObjectRegistration<DOMMountPoint> register() {
-            Preconditions.checkState(mountPoint == null, "Mount point is already built.");
-            mountPoint = SimpleDOMMountPoint.create(path, services, schemaContext);
-            return registerMountPoint(mountPoint);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/SchemaContextProviders.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/SchemaContextProviders.java
deleted file mode 100644 (file)
index 6a7ab9c..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.concepts.Delegator;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-
-public final class SchemaContextProviders {
-
-    private SchemaContextProviders() {
-        throw new UnsupportedOperationException("Utility class.");
-    }
-
-    public static SchemaContextProvider fromSchemaService(final DOMSchemaService schemaService) {
-        if (schemaService instanceof SchemaContextProvider) {
-            return (SchemaContextProvider) schemaService;
-        }
-        return new SchemaServiceAdapter(schemaService);
-    }
-
-    private static final class SchemaServiceAdapter implements SchemaContextProvider, Delegator<DOMSchemaService> {
-
-        private final DOMSchemaService service;
-
-        SchemaServiceAdapter(final DOMSchemaService service) {
-            this.service = service;
-        }
-
-        @Override
-        public SchemaContext getSchemaContext() {
-            return service.getGlobalContext();
-        }
-
-        @Override
-        public DOMSchemaService getDelegate() {
-            return service;
-        }
-
-        @Override
-        public String toString() {
-            return "SchemaServiceAdapter [service=" + service + "]";
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/util/ProxySchemaContext.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/util/ProxySchemaContext.java
deleted file mode 100644 (file)
index 2a614c7..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.dom.broker.util;
-
-import com.google.common.base.Preconditions;
-import java.util.Collection;
-import java.util.List;
-import java.util.Optional;
-import java.util.Set;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.QNameModule;
-import org.opendaylight.yangtools.yang.common.Revision;
-import org.opendaylight.yangtools.yang.model.api.AugmentationSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.ExtensionDefinition;
-import org.opendaylight.yangtools.yang.model.api.GroupingDefinition;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-import org.opendaylight.yangtools.yang.model.api.Status;
-import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
-import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.UsesNode;
-
-/**
- * ProxySchema Context for SchemaContextProviders.
- */
-public class ProxySchemaContext implements SchemaContext {
-
-    private final SchemaContextProvider schemaProvider;
-
-    public ProxySchemaContext(final SchemaContextProvider schemaProvider) {
-        this.schemaProvider = schemaProvider;
-    }
-
-    private SchemaContext getCurrentSchema() {
-        Preconditions.checkState(schemaProvider.getSchemaContext() != null, "Schema context unavailable from %s",
-                                 schemaProvider);
-        return schemaProvider.getSchemaContext();
-    }
-
-    @Override
-    public Set<DataSchemaNode> getDataDefinitions() {
-        return getCurrentSchema().getDataDefinitions();
-    }
-
-    @Override
-    public Set<Module> getModules() {
-        return getCurrentSchema().getModules();
-    }
-
-    @Override
-    public Set<NotificationDefinition> getNotifications() {
-        return getCurrentSchema().getNotifications();
-    }
-
-    @Override
-    public Set<RpcDefinition> getOperations() {
-        return getCurrentSchema().getOperations();
-    }
-
-    @Override
-    public Set<ExtensionDefinition> getExtensions() {
-        return getCurrentSchema().getExtensions();
-    }
-
-    @Override
-    public boolean isPresenceContainer() {
-        return getCurrentSchema().isPresenceContainer();
-    }
-
-    @Override
-    public Set<TypeDefinition<?>> getTypeDefinitions() {
-        return getCurrentSchema().getTypeDefinitions();
-    }
-
-    @Override
-    public Collection<DataSchemaNode> getChildNodes() {
-        return getCurrentSchema().getChildNodes();
-    }
-
-    @Override
-    public Set<GroupingDefinition> getGroupings() {
-        return getCurrentSchema().getGroupings();
-    }
-
-    @Override
-    public Optional<DataSchemaNode> findDataChildByName(final QName name) {
-        return getCurrentSchema().findDataChildByName(name);
-    }
-
-    @Override
-    public Set<UsesNode> getUses() {
-        return getCurrentSchema().getUses();
-    }
-
-    @Override
-    public Set<AugmentationSchemaNode> getAvailableAugmentations() {
-        return getCurrentSchema().getAvailableAugmentations();
-    }
-
-    @Override
-    public boolean isAugmenting() {
-        return getCurrentSchema().isAugmenting();
-    }
-
-    @Override
-    public boolean isAddedByUses() {
-        return getCurrentSchema().isAddedByUses();
-    }
-
-    @Override
-    public boolean isConfiguration() {
-        return getCurrentSchema().isConfiguration();
-    }
-
-    @Override
-    public QName getQName() {
-        return getCurrentSchema().getQName();
-    }
-
-    @Override
-    public SchemaPath getPath() {
-        return getCurrentSchema().getPath();
-    }
-
-    @Override
-    public List<UnknownSchemaNode> getUnknownSchemaNodes() {
-        return getCurrentSchema().getUnknownSchemaNodes();
-    }
-
-    @Override
-    public Optional<String> getDescription() {
-        return getCurrentSchema().getDescription();
-    }
-
-    @Override
-    public Optional<String> getReference() {
-        return getCurrentSchema().getReference();
-    }
-
-    @Override
-    public Status getStatus() {
-        return getCurrentSchema().getStatus();
-    }
-
-    @Override
-    public Optional<Module> findModule(final String name, final Optional<Revision> revision) {
-        return getCurrentSchema().findModule(name, revision);
-    }
-
-    @Override
-    public Optional<Module> findModule(final QNameModule qnameModule) {
-        return getCurrentSchema().findModule(qnameModule);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/resources/OSGI-INF/blueprint/dom-broker.xml b/opendaylight/md-sal/sal-dom-broker/src/main/resources/OSGI-INF/blueprint/dom-broker.xml
deleted file mode 100644 (file)
index 67ebcf5..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="false">
-
-  <cm:property-placeholder persistent-id="org.opendaylight.mdsal.dom.notification" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="notification-queue-depth" value="65536"/>
-      <cm:property name="notification-queue-spin" value="0"/>
-      <cm:property name="notification-queue-park" value="0"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <!-- Schema Service -->
-
-  <odl:static-reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService"/>
-
-  <!-- DOM Notification Service -->
-
-  <reference id="domNotificationService" interface="org.opendaylight.mdsal.dom.api.DOMNotificationService"
-      odl:type="default"/>
-  <reference id="domNotificationPublishService" interface="org.opendaylight.mdsal.dom.api.DOMNotificationPublishService"
-      odl:type="default"/>
-  <reference id="domNotificationSubscriptionListenerRegistry" interface="org.opendaylight.mdsal.dom.spi.DOMNotificationSubscriptionListenerRegistry"
-      odl:type="default"/>
-
-  <bean id="domNotificationRouter" class="org.opendaylight.controller.md.sal.dom.broker.impl.DOMNotificationRouter"
-          factory-method="create">
-    <argument ref="domNotificationService"/>
-    <argument ref="domNotificationPublishService"/>
-    <argument ref="domNotificationSubscriptionListenerRegistry"/>
-  </bean>
-
-  <service ref="domNotificationRouter" odl:type="default">
-    <interfaces>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMNotificationService</value>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService</value>
-      <value>org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListenerRegistry</value>
-    </interfaces>
-  </service>
-
-  <!-- DOM RPC Service -->
-
-  <reference id="domRpcService" interface="org.opendaylight.mdsal.dom.api.DOMRpcService"
-      odl:type="default"/>
-  <reference id="domRpcProviderService" interface="org.opendaylight.mdsal.dom.api.DOMRpcProviderService"
-      odl:type="default"/>
-
-  <bean id="domRpcRouter" class="org.opendaylight.controller.md.sal.dom.broker.impl.DOMRpcRouter">
-    <argument ref="domRpcService"/>
-    <argument ref="domRpcProviderService"/>
-  </bean>
-
-  <service ref="domRpcRouter" odl:type="default">
-    <interfaces>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMRpcService</value>
-      <value>org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService</value>
-    </interfaces>
-  </service>
-
-  <!-- DOM MountPoint Service -->
-
-  <reference id="delegateDomMountPointService" interface="org.opendaylight.mdsal.dom.api.DOMMountPointService"
-      odl:type="default"/>
-
-  <bean id="domMountPointService" class="org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl">
-    <argument ref="delegateDomMountPointService"/>
-  </bean>
-
-  <service ref="domMountPointService" interface="org.opendaylight.controller.md.sal.dom.api.DOMMountPointService"
-        odl:type="default"/>
-
-  <!-- DOM PingPong Data Broker -->
-
-  <reference id="domDefaultDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-      odl:type="default"/>
-
-  <bean id="domPingPongDataBroker" class="org.opendaylight.controller.md.sal.dom.broker.impl.PingPongDataBroker">
-    <argument ref="domDefaultDataBroker"/>
-  </bean>
-
-  <service ref="domPingPongDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"
-      odl:type="pingpong"/>
-
-</blueprint>
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/BlockingTransactionChainListener.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/BlockingTransactionChainListener.java
deleted file mode 100644 (file)
index 3147ec5..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import com.google.common.util.concurrent.SettableFuture;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-
-/**
- * Simple implementation of {@link TransactionChainListener} for testing.
- *
- * <p>
- * This transaction chain listener does not contain any logic, only update
- * futures ({@link #getFailFuture()} and {@link #getSuccessFuture()} when
- * transaction chain event is retrieved.
- */
-class BlockingTransactionChainListener implements TransactionChainListener {
-
-    private final SettableFuture<Throwable> failFuture = SettableFuture.create();
-    private final SettableFuture<Void> successFuture = SettableFuture.create();
-
-    @Override
-    public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
-                                         final Throwable cause) {
-        failFuture.set(cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
-        successFuture.set(null);
-    }
-
-    public SettableFuture<Throwable> getFailFuture() {
-        return failFuture;
-    }
-
-    public SettableFuture<Void> getSuccessFuture() {
-        return successFuture;
-    }
-
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerPerformanceTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerPerformanceTest.java
deleted file mode 100644 (file)
index 640d603..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Executors;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class DOMBrokerPerformanceTest {
-
-    private static final Logger LOG = LoggerFactory.getLogger(DOMBrokerPerformanceTest.class);
-
-    private static NormalizedNode<?, ?> outerList(final int index) {
-        return ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, index);
-    }
-
-    private static YangInstanceIdentifier outerListPath(final int index) {
-        return YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)//
-                .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, index) //
-                .build();
-    }
-
-    private SchemaContext schemaContext;
-    private AbstractDOMDataBroker domBroker;
-
-    private static <V> V measure(final String name, final Callable<V> callable) throws Exception {
-        // TODO Auto-generated method stub
-        LOG.debug("Measurement:{} Start", name);
-        long startNano = System.nanoTime();
-        try {
-            return callable.call();
-        } finally {
-            long endNano = System.nanoTime();
-            LOG.info("Measurement:\"{}\" Time:{} ms", name, (endNano - startNano) / 1000000.0d);
-        }
-    }
-
-    @Before
-    public void setupStore() {
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
-        schemaContext = TestModel.createTestContext();
-
-        operStore.onGlobalContextUpdated(schemaContext);
-        configStore.onGlobalContextUpdated(schemaContext);
-
-        ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() //
-                .put(CONFIGURATION, configStore) //
-                .put(OPERATIONAL, operStore) //
-                .build();
-        ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
-        domBroker = new SerializedDOMDataBroker(stores, executor);
-    }
-
-    @Test
-    public void testPerformance() throws Exception {
-        measure("Test Suite (all tests)", (Callable<Void>) () -> {
-            smallTestSuite(10, 1000);
-            //smallTestSuite(10, 100);
-            smallTestSuite(100, 100);
-            //smallTestSuite(100, 100);
-            //smallTestSuite(1000, 10);
-            smallTestSuite(1000, 10);
-            //smallTestSuite(1000, 1000);
-            return null;
-        });
-    }
-
-    private void smallTestSuite(final int txNum, final int innerListWriteNum) throws Exception {
-        measure("TestSuite (Txs:" + txNum + " innerWrites:" + innerListWriteNum + ")", (Callable<Void>) () -> {
-            measureOneTransactionTopContainer();
-            measureSeparateWritesOneLevel(txNum, innerListWriteNum);
-            return null;
-        });
-    }
-
-    private void measureSeparateWritesOneLevel(final int txNum, final int innerNum) throws Exception {
-        final List<DOMDataReadWriteTransaction> transactions = measure("Txs:" + txNum + " Allocate", () -> {
-            List<DOMDataReadWriteTransaction> builder = new ArrayList<>(txNum);
-            for (int i = 0; i < txNum; i++) {
-                DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-                builder.add(writeTx);
-            }
-            return builder;
-        });
-        assertEquals(txNum, transactions.size());
-        measure("Txs:" + txNum + " Writes:1", (Callable<Void>) () -> {
-            int index = 0;
-            for (DOMDataReadWriteTransaction writeTx : transactions) {
-                // Writes /test/outer-list/i in writeTx
-                writeTx.put(OPERATIONAL, outerListPath(index), outerList(index));
-                index++;
-            }
-            return null;
-        });
-
-        measure("Txs:" + txNum + " Writes:" + innerNum, (Callable<Void>) () -> {
-            int index = 0;
-            for (DOMDataReadWriteTransaction writeTx : transactions) {
-                // Writes /test/outer-list/i in writeTx
-                YangInstanceIdentifier path = YangInstanceIdentifier.builder(outerListPath(index))
-                        .node(TestModel.INNER_LIST_QNAME).build();
-                writeTx.put(OPERATIONAL, path, ImmutableNodes.mapNodeBuilder(TestModel.INNER_LIST_QNAME).build());
-                for (int j = 0; j < innerNum; j++) {
-                    YangInstanceIdentifier innerPath = YangInstanceIdentifier.builder(path)
-                            .nodeWithKey(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, String.valueOf(j)).build();
-                    writeTx.put(OPERATIONAL, innerPath, ImmutableNodes
-                            .mapEntry(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, String.valueOf(j)));
-                }
-                index++;
-            }
-            return null;
-        });
-
-        measure("Txs:" + txNum + " Submit, Finish", (Callable<Void>) () -> {
-            List<ListenableFuture<?>> allFutures = measure(txNum + " Submits", () -> {
-                List<ListenableFuture<?>> builder = new ArrayList<>(txNum);
-                for (DOMDataReadWriteTransaction tx : transactions) {
-                    builder.add(tx.commit());
-                }
-                return builder;
-            });
-            Futures.allAsList(allFutures).get();
-            return null;
-        });
-
-        final DOMDataReadTransaction readTx = measure("Txs:1 (ro), Allocate",
-                                                      (Callable<DOMDataReadTransaction>) () -> domBroker
-                                                              .newReadOnlyTransaction());
-
-
-        measure("Txs:1 (ro) Reads:" + txNum + " (1-level)", (Callable<Void>) () -> {
-            for (int i = 0; i < txNum; i++) {
-                ListenableFuture<Optional<NormalizedNode<?, ?>>> potential = readTx.read(OPERATIONAL, outerListPath(i));
-                assertTrue("outerList/" + i, potential.get().isPresent());
-            }
-            return null;
-        });
-
-        measure("Txs:1 (ro) Reads:" + txNum * innerNum + " (2-level)", (Callable<Void>) () -> {
-            for (int i = 0; i < txNum; i++) {
-                for (int j = 0; j < innerNum; j++) {
-                    YangInstanceIdentifier path = YangInstanceIdentifier.builder(outerListPath(i))
-                            //
-                            .node(TestModel.INNER_LIST_QNAME)
-                            .nodeWithKey(TestModel.INNER_LIST_QNAME, TestModel.NAME_QNAME, String.valueOf(j)).build();
-                    ListenableFuture<Optional<NormalizedNode<?, ?>>> potential = readTx.read(OPERATIONAL, path);
-                    assertTrue("outer-list/" + i + "/inner-list/" + j, potential.get().isPresent());
-                }
-            }
-            return null;
-        });
-    }
-
-    private void measureOneTransactionTopContainer() throws Exception {
-
-        final DOMDataReadWriteTransaction writeTx =
-                measure("Txs:1 Allocate", () -> domBroker.newReadWriteTransaction());
-
-        measure("Txs:1 Write", (Callable<Void>) () -> {
-            writeTx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-            writeTx.put(OPERATIONAL, TestModel.OUTER_LIST_PATH,
-                        ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build());
-            return null;
-        });
-
-        measure("Txs:1 Reads:1", (Callable<Void>) () -> {
-            // Reads /test in writeTx
-            ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx
-                    .read(OPERATIONAL, TestModel.TEST_PATH);
-            assertTrue(writeTxContainer.get().isPresent());
-            return null;
-        });
-
-        measure("Txs:1 Reads:1", (Callable<Void>) () -> {
-            // Reads /test in writeTx
-            ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx
-                    .read(OPERATIONAL, TestModel.TEST_PATH);
-            assertTrue(writeTxContainer.get().isPresent());
-            return null;
-        });
-
-        measure("Txs:1 Submit, Finish", (Callable<Void>) () -> {
-            measure("Txs:1 Submit", (Callable<ListenableFuture<?>>) writeTx::commit).get();
-            return null;
-        });
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java
deleted file mode 100644 (file)
index b2fb3c2..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Collections;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicReference;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class DOMBrokerTest {
-
-    private SchemaContext schemaContext;
-    private AbstractDOMDataBroker domBroker;
-    private ListeningExecutorService executor;
-    private ExecutorService futureExecutor;
-    private CommitExecutorService commitExecutor;
-
-    @Before
-    public void setupStore() {
-
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
-        schemaContext = TestModel.createTestContext();
-
-        operStore.onGlobalContextUpdated(schemaContext);
-        configStore.onGlobalContextUpdated(schemaContext);
-
-        final ImmutableMap<LogicalDatastoreType, DOMStore> stores =
-                ImmutableMap.<LogicalDatastoreType, DOMStore>builder() //
-                .put(CONFIGURATION, configStore) //
-                .put(OPERATIONAL, operStore) //
-                .build();
-
-        commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
-        futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB", DOMBrokerTest.class);
-        executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
-                                                                 TransactionCommitDeadlockException
-                                                                         .DEADLOCK_EXCEPTION_SUPPLIER,
-                                                                 futureExecutor);
-        domBroker = new SerializedDOMDataBroker(stores, executor);
-    }
-
-    @After
-    public void tearDown() {
-        if (executor != null) {
-            executor.shutdownNow();
-        }
-
-        if (futureExecutor != null) {
-            futureExecutor.shutdownNow();
-        }
-    }
-
-    @Test(timeout = 10000)
-    public void testTransactionIsolation() throws InterruptedException, ExecutionException {
-
-        assertNotNull(domBroker);
-
-        DOMDataReadTransaction readTx = domBroker.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        assertNotNull(writeTx);
-        /**
-         *
-         * Writes /test in writeTx
-         *
-         */
-        writeTx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        /**
-         * Reads /test from writeTx Read should return container.
-         *
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx
-                .read(OPERATIONAL, TestModel.TEST_PATH);
-        assertTrue(writeTxContainer.get().isPresent());
-
-        /**
-         * Reads /test from readTx Read should return Absent.
-         *
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> readTxContainer = readTx
-                .read(OPERATIONAL, TestModel.TEST_PATH);
-        assertFalse(readTxContainer.get().isPresent());
-    }
-
-    @Test(timeout = 10000)
-    public void testTransactionCommit() throws InterruptedException, ExecutionException, TimeoutException {
-
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        assertNotNull(writeTx);
-        /**
-         *
-         * Writes /test in writeTx
-         *
-         */
-        writeTx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        /**
-         * Reads /test from writeTx Read should return container.
-         *
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx
-                .read(OPERATIONAL, TestModel.TEST_PATH);
-        assertTrue(writeTxContainer.get().isPresent());
-
-        writeTx.commit().get(5, TimeUnit.SECONDS);
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domBroker.newReadOnlyTransaction()
-                .read(OPERATIONAL, TestModel.TEST_PATH).get();
-        assertTrue(afterCommitRead.isPresent());
-    }
-
-    @Test(timeout = 10000)
-    @Deprecated
-    public void testTransactionSubmit() throws InterruptedException, ExecutionException, TimeoutException {
-
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        assertNotNull(writeTx);
-        /**
-         *
-         * Writes /test in writeTx
-         *
-         */
-        writeTx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        /**
-         * Reads /test from writeTx Read should return container.
-         *
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx
-                .read(OPERATIONAL, TestModel.TEST_PATH);
-        assertTrue(writeTxContainer.get().isPresent());
-
-        writeTx.submit().get(5, TimeUnit.SECONDS);
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domBroker.newReadOnlyTransaction()
-                .read(OPERATIONAL, TestModel.TEST_PATH).get();
-        assertTrue(afterCommitRead.isPresent());
-    }
-
-    @Test(expected = TransactionCommitFailedException.class)
-    @SuppressWarnings({"checkstyle:IllegalThrows", "checkstyle:AvoidHidingCauseException"})
-    public void testRejectedCommit() throws Throwable {
-
-        commitExecutor.delegate = Mockito.mock(ExecutorService.class);
-        Mockito.doThrow(new RejectedExecutionException("mock")).when(commitExecutor.delegate)
-                .execute(Mockito.any(Runnable.class));
-        Mockito.doNothing().when(commitExecutor.delegate).shutdown();
-        Mockito.doReturn(Collections.emptyList()).when(commitExecutor.delegate).shutdownNow();
-        Mockito.doReturn("").when(commitExecutor.delegate).toString();
-        Mockito.doReturn(true).when(commitExecutor.delegate)
-                .awaitTermination(Mockito.anyLong(), Mockito.any(TimeUnit.class));
-
-        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
-        writeTx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        try {
-            writeTx.commit().get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    AtomicReference<Throwable> submitTxAsync(final DOMDataWriteTransaction writeTx) {
-        final AtomicReference<Throwable> caughtEx = new AtomicReference<>();
-        new Thread(() -> {
-            try {
-                writeTx.commit();
-            } catch (Throwable e) {
-                caughtEx.set(e);
-            }
-        }).start();
-
-        return caughtEx;
-    }
-
-
-    static class CommitExecutorService extends ForwardingExecutorService {
-
-        ExecutorService delegate;
-
-        CommitExecutorService(final ExecutorService delegate) {
-            this.delegate = delegate;
-        }
-
-        @Override
-        protected ExecutorService delegate() {
-            return delegate;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataTreeListenerTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataTreeListenerTest.java
deleted file mode 100644 (file)
index 57dbf83..0000000
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class DOMDataTreeListenerTest {
-
-    private SchemaContext schemaContext;
-    private AbstractDOMDataBroker domBroker;
-    private ListeningExecutorService executor;
-    private ExecutorService futureExecutor;
-    private CommitExecutorService commitExecutor;
-
-    private static final DataContainerChild<?, ?> OUTER_LIST = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-            .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build();
-
-    private static final DataContainerChild<?, ?> OUTER_LIST_2 = ImmutableNodes
-            .mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-            .withChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2)).build();
-
-    private static final NormalizedNode<?, ?> TEST_CONTAINER = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).withChild(OUTER_LIST)
-            .build();
-
-    private static final NormalizedNode<?, ?> TEST_CONTAINER_2 = Builders.containerBuilder()
-            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).withChild(OUTER_LIST_2)
-            .build();
-
-    private static DOMDataTreeIdentifier ROOT_DATA_TREE_ID = new DOMDataTreeIdentifier(
-            LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-
-    private static DOMDataTreeIdentifier OUTER_LIST_DATA_TREE_ID = new DOMDataTreeIdentifier(
-            LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH);
-
-    @Before
-    public void setupStore() {
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
-        schemaContext = TestModel.createTestContext();
-
-        operStore.onGlobalContextUpdated(schemaContext);
-        configStore.onGlobalContextUpdated(schemaContext);
-
-        final ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType,
-                DOMStore>builder() //
-                .put(CONFIGURATION, configStore) //
-                .put(OPERATIONAL, operStore) //
-                .build();
-
-        commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
-        futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB",
-                DOMDataTreeListenerTest.class);
-        executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
-                                                                 TransactionCommitDeadlockException
-                                                                         .DEADLOCK_EXCEPTION_SUPPLIER,
-                                                                 futureExecutor);
-        domBroker = new SerializedDOMDataBroker(stores, executor);
-    }
-
-    @After
-    public void tearDown() {
-        if (executor != null) {
-            executor.shutdownNow();
-        }
-
-        if (futureExecutor != null) {
-            futureExecutor.shutdownNow();
-        }
-    }
-
-    @Test
-    public void writeContainerEmptyTreeTest() throws InterruptedException {
-        CountDownLatch latch = new CountDownLatch(1);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
-
-        final DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit();
-
-        latch.await(5, TimeUnit.SECONDS);
-
-        assertEquals(1, listener.getReceivedChanges().size());
-        final Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
-        listenerReg.close();
-    }
-
-    @Test
-    public void replaceContainerContainerInTreeTest() throws InterruptedException, ExecutionException {
-        final CountDownLatch latch = new CountDownLatch(2);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit().get();
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
-        writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
-        writeTx.commit();
-
-        latch.await(5, TimeUnit.SECONDS);
-
-        assertEquals(2, listener.getReceivedChanges().size());
-        Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
-
-        changes = listener.getReceivedChanges().get(1);
-        assertEquals(1, changes.size());
-
-        candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        candidateRoot = candidate.getRootNode();
-        checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.WRITE, candidateRoot);
-        listenerReg.close();
-    }
-
-    @Test
-    public void deleteContainerContainerInTreeTest() throws InterruptedException, ExecutionException {
-        final CountDownLatch latch = new CountDownLatch(2);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit().get();
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
-
-        writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH);
-        writeTx.commit();
-
-        latch.await(5, TimeUnit.SECONDS);
-
-        assertEquals(2, listener.getReceivedChanges().size());
-        Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
-
-        changes = listener.getReceivedChanges().get(1);
-        assertEquals(1, changes.size());
-
-        candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        candidateRoot = candidate.getRootNode();
-        checkChange(TEST_CONTAINER, null, ModificationType.DELETE, candidateRoot);
-        listenerReg.close();
-    }
-
-    @Test
-    public void replaceChildListContainerInTreeTest() throws InterruptedException, ExecutionException {
-        final CountDownLatch latch = new CountDownLatch(2);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit().get();
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(ROOT_DATA_TREE_ID, listener);
-
-        writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH, OUTER_LIST_2);
-        writeTx.commit();
-
-        latch.await(5, TimeUnit.SECONDS);
-
-        assertEquals(2, listener.getReceivedChanges().size());
-        Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, TEST_CONTAINER, ModificationType.WRITE, candidateRoot);
-
-        changes = listener.getReceivedChanges().get(1);
-        assertEquals(1, changes.size());
-
-        candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        candidateRoot = candidate.getRootNode();
-        checkChange(TEST_CONTAINER, TEST_CONTAINER_2, ModificationType.SUBTREE_MODIFIED, candidateRoot);
-        final DataTreeCandidateNode modifiedChild = candidateRoot
-                .getModifiedChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.OUTER_LIST_QNAME));
-        assertNotNull(modifiedChild);
-        checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, modifiedChild);
-        listenerReg.close();
-    }
-
-    @Test
-    public void rootModificationChildListenerTest() throws InterruptedException, ExecutionException {
-        final CountDownLatch latch = new CountDownLatch(2);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit().get();
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
-
-        writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER_2);
-        writeTx.commit().get();
-
-        latch.await(1, TimeUnit.SECONDS);
-
-        assertEquals(2, listener.getReceivedChanges().size());
-        Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
-
-        changes = listener.getReceivedChanges().get(1);
-        assertEquals(1, changes.size());
-
-        candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        candidateRoot = candidate.getRootNode();
-        checkChange(OUTER_LIST, OUTER_LIST_2, ModificationType.WRITE, candidateRoot);
-        listenerReg.close();
-    }
-
-    @Test
-    public void listEntryChangeNonRootRegistrationTest() throws InterruptedException, ExecutionException {
-        final CountDownLatch latch = new CountDownLatch(2);
-
-        DOMDataTreeChangeService dataTreeChangeService = getDOMDataTreeChangeService();
-        assertNotNull("DOMDataTreeChangeService not found, cannot continue with test!", dataTreeChangeService);
-
-        DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.TEST_PATH, TEST_CONTAINER);
-        writeTx.commit().get();
-
-        final TestDataTreeListener listener = new TestDataTreeListener(latch);
-        final ListenerRegistration<TestDataTreeListener> listenerReg = dataTreeChangeService
-                .registerDataTreeChangeListener(OUTER_LIST_DATA_TREE_ID, listener);
-
-        final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId1
-                = new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME,
-                                                                          TestModel.ID_QNAME, 1);
-        final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId2
-                = new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME,
-                                                                          TestModel.ID_QNAME, 2);
-        final YangInstanceIdentifier.NodeIdentifierWithPredicates outerListEntryId3
-                = new YangInstanceIdentifier.NodeIdentifierWithPredicates(TestModel.OUTER_LIST_QNAME,
-                                                                          TestModel.ID_QNAME, 3);
-
-        final MapEntryNode outerListEntry1 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1);
-        final MapEntryNode outerListEntry2 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2);
-        final MapEntryNode outerListEntry3 = ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 3);
-
-        final MapNode listAfter = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).withChild(outerListEntry2)
-                .withChild(outerListEntry3).build();
-
-        writeTx = domBroker.newWriteOnlyTransaction();
-        writeTx.delete(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId1));
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId2),
-                    outerListEntry2);
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_LIST_PATH.node(outerListEntryId3),
-                    outerListEntry3);
-        writeTx.commit();
-
-        latch.await(5, TimeUnit.SECONDS);
-
-        assertEquals(2, listener.getReceivedChanges().size());
-        Collection<DataTreeCandidate> changes = listener.getReceivedChanges().get(0);
-        assertEquals(1, changes.size());
-
-        DataTreeCandidate candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        DataTreeCandidateNode candidateRoot = candidate.getRootNode();
-        checkChange(null, OUTER_LIST, ModificationType.WRITE, candidateRoot);
-
-        changes = listener.getReceivedChanges().get(1);
-        assertEquals(1, changes.size());
-
-        candidate = changes.iterator().next();
-        assertNotNull(candidate);
-        candidateRoot = candidate.getRootNode();
-        checkChange(OUTER_LIST, listAfter, ModificationType.SUBTREE_MODIFIED, candidateRoot);
-        final DataTreeCandidateNode entry1Canditate = candidateRoot.getModifiedChild(outerListEntryId1);
-        checkChange(outerListEntry1, null, ModificationType.DELETE, entry1Canditate);
-        final DataTreeCandidateNode entry2Canditate = candidateRoot.getModifiedChild(outerListEntryId2);
-        checkChange(null, outerListEntry2, ModificationType.WRITE, entry2Canditate);
-        final DataTreeCandidateNode entry3Canditate = candidateRoot.getModifiedChild(outerListEntryId3);
-        checkChange(null, outerListEntry3, ModificationType.WRITE, entry3Canditate);
-        listenerReg.close();
-    }
-
-    private static void checkChange(final NormalizedNode<?, ?> expectedBefore, final NormalizedNode<?, ?> expectedAfter,
-                                    final ModificationType expectedMod, final DataTreeCandidateNode candidateNode) {
-        if (expectedBefore != null) {
-            assertTrue(candidateNode.getDataBefore().isPresent());
-            assertEquals(expectedBefore, candidateNode.getDataBefore().get());
-        } else {
-            assertFalse(candidateNode.getDataBefore().isPresent());
-        }
-
-        if (expectedAfter != null) {
-            assertTrue(candidateNode.getDataAfter().isPresent());
-            assertEquals(expectedAfter, candidateNode.getDataAfter().get());
-        } else {
-            assertFalse(candidateNode.getDataAfter().isPresent());
-        }
-
-        assertEquals(expectedMod, candidateNode.getModificationType());
-    }
-
-    private DOMDataTreeChangeService getDOMDataTreeChangeService() {
-        final DOMDataBrokerExtension extension = domBroker.getSupportedExtensions().get(DOMDataTreeChangeService.class);
-        if (extension == null) {
-            return null;
-        }
-        DOMDataTreeChangeService dataTreeChangeService = null;
-        if (extension instanceof DOMDataTreeChangeService) {
-            dataTreeChangeService = (DOMDataTreeChangeService) extension;
-        }
-        return dataTreeChangeService;
-    }
-
-
-    static class CommitExecutorService extends ForwardingExecutorService {
-
-        ExecutorService delegate;
-
-        CommitExecutorService(final ExecutorService delegate) {
-            this.delegate = delegate;
-        }
-
-        @Override
-        protected ExecutorService delegate() {
-            return delegate;
-        }
-    }
-
-    static class TestDataTreeListener implements DOMDataTreeChangeListener {
-
-        private final List<Collection<DataTreeCandidate>> receivedChanges = new ArrayList<>();
-        private final CountDownLatch latch;
-
-        TestDataTreeListener(final CountDownLatch latch) {
-            this.latch = latch;
-        }
-
-        @Override
-        public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
-            receivedChanges.add(changes);
-            latch.countDown();
-        }
-
-        public List<Collection<DataTreeCandidate>> getReceivedChanges() {
-            return receivedChanges;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransactionTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransactionTest.java
deleted file mode 100644 (file)
index f98ff6e..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.doThrow;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import java.util.Collections;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-
-public class DOMForwardedWriteTransactionTest {
-
-    @Mock
-    private AbstractDOMForwardedTransactionFactory abstractDOMForwardedTransactionFactory;
-
-    @Mock
-    private DOMStoreWriteTransaction domStoreWriteTransaction;
-
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-    }
-
-    @Test
-    public void readyRuntimeExceptionAndCancel() {
-        RuntimeException thrown = new RuntimeException();
-        doThrow(thrown).when(domStoreWriteTransaction).ready();
-        DOMForwardedWriteTransaction<DOMStoreWriteTransaction> domForwardedWriteTransaction =
-                new DOMForwardedWriteTransaction<>(
-                        new Object(),
-                        Collections.singletonMap(LogicalDatastoreType.OPERATIONAL, domStoreWriteTransaction),
-                        abstractDOMForwardedTransactionFactory);
-        CheckedFuture<Void, TransactionCommitFailedException> submitFuture = domForwardedWriteTransaction.submit();
-        try {
-            submitFuture.checkedGet();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (TransactionCommitFailedException e) {
-            assertTrue(e.getCause() == thrown);
-            domForwardedWriteTransaction.cancel();
-        }
-    }
-
-    @Test
-    public void submitRuntimeExceptionAndCancel() {
-        RuntimeException thrown = new RuntimeException();
-        doReturn(null).when(domStoreWriteTransaction).ready();
-        doThrow(thrown).when(abstractDOMForwardedTransactionFactory).commit(any(), any(), any());
-        DOMForwardedWriteTransaction<DOMStoreWriteTransaction> domForwardedWriteTransaction =
-                new DOMForwardedWriteTransaction<>(
-                    new Object(),
-                    Collections.singletonMap(LogicalDatastoreType.OPERATIONAL, domStoreWriteTransaction),
-                    abstractDOMForwardedTransactionFactory);
-        CheckedFuture<Void, TransactionCommitFailedException> submitFuture = domForwardedWriteTransaction.submit();
-        try {
-            submitFuture.checkedGet();
-            Assert.fail("TransactionCommitFailedException expected");
-        } catch (TransactionCommitFailedException e) {
-            assertTrue(e.getCause() == thrown);
-            domForwardedWriteTransaction.cancel();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouterTest.java
deleted file mode 100644 (file)
index c0f0e02..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.time.Instant;
-import java.time.temporal.ChronoUnit;
-import java.util.Date;
-import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.eclipse.jdt.annotation.Nullable;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.dom.api.DOMEvent;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.md.sal.dom.spi.DOMNotificationSubscriptionListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.NotificationDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Unit tests for DOMNotificationRouter.
- *
- * @author Thomas Pantelis
- */
-public class DOMNotificationRouterTest {
-    private static final ContainerNode BODY = ImmutableContainerNodeBuilder.create().withNodeIdentifier(
-        new NodeIdentifier(QName.create(TestModel.TEST_QNAME.getModule(), "test-notification")))
-            .withChild(ImmutableNodes.leafNode(QName.create(TestModel.TEST_QNAME.getModule(), "value-leaf"), "foo"))
-                .build();
-    // Truncate to milliseconds, as Java 9+ we get microsecond precision, which cannot be expressed in terms of Date
-    private static final Instant INSTANT = Instant.now().truncatedTo(ChronoUnit.MILLIS);
-
-    private static SchemaPath notificationSchemaPath;
-
-    private final org.opendaylight.mdsal.dom.broker.DOMNotificationRouter mdsalRouter =
-            org.opendaylight.mdsal.dom.broker.DOMNotificationRouter.create(16);
-    private final DOMNotificationRouter legacyRouter =
-            DOMNotificationRouter.create(mdsalRouter, mdsalRouter, mdsalRouter);
-    private final TestLegacyDOMNotificationListener testLegacyListener = new TestLegacyDOMNotificationListener();
-    private final TestMdsalDOMNotificationListener testMdsalListener = new TestMdsalDOMNotificationListener();
-
-    @BeforeClass
-    public static void staticSetup() {
-        final SchemaContext schemaContext = TestModel.createTestContext();
-
-        Module testModule = schemaContext.findModule("odl-datastore-test", TestModel.TEST_QNAME.getRevision()).get();
-        NotificationDefinition notificationDefinition = null;
-        for (NotificationDefinition def: testModule.getNotifications()) {
-            if (def.getQName().getLocalName().equals("test-notification")) {
-                notificationDefinition = def;
-                break;
-            }
-        }
-
-        assertNotNull("test-notification not found in " + testModule.getNotifications(), notificationDefinition);
-        notificationSchemaPath = notificationDefinition.getPath();
-    }
-
-    @Test
-    public void testLegacyListenerAndPublish() throws InterruptedException, ExecutionException, TimeoutException {
-        final ListenerRegistration<TestLegacyDOMNotificationListener> reg =
-                legacyRouter.registerNotificationListener(testLegacyListener, notificationSchemaPath);
-
-        legacyRouter.putNotification(new TestLegacyDOMNotification()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, null);
-
-        legacyRouter.offerNotification(new TestLegacyDOMNotification()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, null);
-
-        legacyRouter.offerNotification(new TestLegacyDOMNotification(), 100, TimeUnit.MILLISECONDS)
-            .get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, null);
-
-        legacyRouter.offerNotification(new TestLegacyDOMEvent()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, Date.from(INSTANT));
-
-        reg.close();
-
-        legacyRouter.offerNotification(new TestLegacyDOMNotification()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyNotReceived();
-    }
-
-    @Test
-    public void testLegacyListenerAndMdsalPublish()
-            throws InterruptedException, ExecutionException, TimeoutException {
-        legacyRouter.registerNotificationListener(testLegacyListener, notificationSchemaPath);
-
-        mdsalRouter.offerNotification(new TestMdsalDOMNotification()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, null);
-
-        mdsalRouter.offerNotification(new TestMdsalDOMEvent()).get(5, TimeUnit.SECONDS);
-        testLegacyListener.verifyReceived(notificationSchemaPath, BODY, Date.from(INSTANT));
-    }
-
-    @Test
-    public void testMdsalListenerAndLegacyPublish()
-            throws InterruptedException, ExecutionException, TimeoutException {
-        mdsalRouter.registerNotificationListener(testMdsalListener, notificationSchemaPath);
-
-        legacyRouter.offerNotification(new TestLegacyDOMNotification()).get(5, TimeUnit.SECONDS);
-        testMdsalListener.verifyReceived(notificationSchemaPath, BODY, null);
-
-        legacyRouter.offerNotification(new TestLegacyDOMEvent()).get(5, TimeUnit.SECONDS);
-        testMdsalListener.verifyReceived(notificationSchemaPath, BODY, INSTANT);
-    }
-
-    @Test
-    public void testRegisterSubscriptionListener() throws InterruptedException, ExecutionException, TimeoutException {
-        TestLegacyDOMNotificationSubscriptionListener listener = new TestLegacyDOMNotificationSubscriptionListener();
-        final ListenerRegistration<TestLegacyDOMNotificationSubscriptionListener> subscriptionReg =
-                legacyRouter.registerSubscriptionListener(listener);
-
-        listener.verifyReceived();
-
-        final ListenerRegistration<TestLegacyDOMNotificationListener> listenerReg =
-                legacyRouter.registerNotificationListener(testLegacyListener, notificationSchemaPath);
-
-        listener.verifyReceived(notificationSchemaPath);
-
-        listenerReg.close();
-
-        listener.verifyReceived();
-
-        subscriptionReg.close();
-
-        legacyRouter.registerNotificationListener(testLegacyListener, notificationSchemaPath);
-
-        listener.verifyNotReceived();
-    }
-
-    private static class TestLegacyDOMNotificationListener implements DOMNotificationListener {
-        SettableFuture<DOMNotification> receivedNotification = SettableFuture.create();
-
-        @Override
-        public void onNotification(final DOMNotification notification) {
-            receivedNotification.set(notification);
-        }
-
-        void verifyReceived(final SchemaPath path, final ContainerNode body, final @Nullable Date eventTime)
-                throws InterruptedException, ExecutionException, TimeoutException {
-            final DOMNotification actual = receivedNotification.get(5, TimeUnit.SECONDS);
-            assertEquals(path, actual.getType());
-            assertEquals(body, actual.getBody());
-
-            if (eventTime != null) {
-                assertTrue("Expected DOMEvent", actual instanceof DOMEvent);
-                assertEquals(eventTime, ((DOMEvent)actual).getEventTime());
-            } else {
-                assertFalse("Unexpected DOMEvent", actual instanceof DOMEvent);
-            }
-
-            receivedNotification = SettableFuture.create();
-        }
-
-        void verifyNotReceived() {
-            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
-            assertFalse("Unexpected notification", receivedNotification.isDone());
-        }
-    }
-
-    private static class TestMdsalDOMNotificationListener
-            implements org.opendaylight.mdsal.dom.api.DOMNotificationListener {
-        SettableFuture<org.opendaylight.mdsal.dom.api.DOMNotification> receivedNotification = SettableFuture.create();
-
-        @Override
-        public void onNotification(final org.opendaylight.mdsal.dom.api.DOMNotification notification) {
-            receivedNotification.set(notification);
-        }
-
-        void verifyReceived(final SchemaPath path, final ContainerNode body, final @Nullable Instant eventTime)
-                throws InterruptedException, ExecutionException, TimeoutException {
-            final org.opendaylight.mdsal.dom.api.DOMNotification actual =
-                    receivedNotification.get(5, TimeUnit.SECONDS);
-            assertEquals(path, actual.getType());
-            assertEquals(body, actual.getBody());
-
-            if (eventTime != null) {
-                assertTrue("Expected DOMEvent", actual instanceof org.opendaylight.mdsal.dom.api.DOMEvent);
-                assertEquals(eventTime, ((org.opendaylight.mdsal.dom.api.DOMEvent)actual).getEventInstant());
-            } else {
-                assertFalse("Unexpected DOMEvent", actual instanceof org.opendaylight.mdsal.dom.api.DOMEvent);
-            }
-
-            receivedNotification = SettableFuture.create();
-        }
-    }
-
-    private static class TestLegacyDOMNotificationSubscriptionListener implements DOMNotificationSubscriptionListener {
-        SettableFuture<Set<SchemaPath>> receivedNotification = SettableFuture.create();
-
-        @Override
-        public void onSubscriptionChanged(final Set<SchemaPath> currentTypes) {
-            receivedNotification.set(currentTypes);
-        }
-
-        void verifyReceived(final SchemaPath... paths)
-                throws InterruptedException, ExecutionException, TimeoutException {
-            final Set<SchemaPath> actual = receivedNotification.get(5, TimeUnit.SECONDS);
-            assertEquals(ImmutableSet.copyOf(paths), actual);
-            receivedNotification = SettableFuture.create();
-        }
-
-        void verifyNotReceived() {
-            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
-            assertFalse("Unexpected notification", receivedNotification.isDone());
-        }
-    }
-
-    private static class TestLegacyDOMNotification implements DOMNotification {
-        @Override
-        public SchemaPath getType() {
-            return notificationSchemaPath;
-        }
-
-        @Override
-        public ContainerNode getBody() {
-            return BODY;
-        }
-    }
-
-    private static class TestLegacyDOMEvent extends TestLegacyDOMNotification implements DOMEvent {
-        @Override
-        public Date getEventTime() {
-            return Date.from(INSTANT);
-        }
-    }
-
-    private static class TestMdsalDOMNotification implements org.opendaylight.mdsal.dom.api.DOMNotification {
-        @Override
-        public SchemaPath getType() {
-            return notificationSchemaPath;
-        }
-
-        @Override
-        public ContainerNode getBody() {
-            return BODY;
-        }
-    }
-
-    private static class TestMdsalDOMEvent extends TestMdsalDOMNotification
-            implements org.opendaylight.mdsal.dom.api.DOMEvent {
-        @Override
-        public Instant getEventInstant() {
-            return INSTANT;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouterTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMRpcRouterTest.java
deleted file mode 100644 (file)
index 259cbfb..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.after;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.reset;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.concurrent.ExecutionException;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Unit tests for DOMRpcRouter.
- *
- * @author Thomas Pantelis
- */
-public class DOMRpcRouterTest {
-
-    private static final NormalizedNode<?, ?> RPC_INPUT = ImmutableNodes.leafNode(
-            QName.create(TestModel.TEST_QNAME.getModule(), "input-leaf"), "foo");
-    private static final NormalizedNode<?, ?> RPC_OUTPUT = ImmutableNodes.leafNode(
-            QName.create(TestModel.TEST_QNAME.getModule(), "output-leaf"), "bar");
-    private final TestLegacyDOMRpcImplementation testLegacyRpcImpl = new TestLegacyDOMRpcImplementation();
-    private final TestMdsalDOMRpcImplementation testMdsalRpcImpl = new TestMdsalDOMRpcImplementation();
-    private org.opendaylight.mdsal.dom.broker.DOMRpcRouter mdsalRpcRouter;
-    private DOMRpcRouter legacyRpcRouter;
-    private DOMRpcIdentifier legacyTestRpcIdentifier;
-    private DOMRpcIdentifier legacyTestRpcNoInputIdentifier;
-    private org.opendaylight.mdsal.dom.api.DOMRpcIdentifier mdsalTestRpcIdentifier;
-    private org.opendaylight.mdsal.dom.api.DOMRpcIdentifier mdsalTestRpcNoInputIdentifier;
-
-    @Before
-    public void setup() {
-        mdsalRpcRouter = new org.opendaylight.mdsal.dom.broker.DOMRpcRouter();
-        final SchemaContext schemaContext = TestModel.createTestContext();
-        mdsalRpcRouter.onGlobalContextUpdated(schemaContext);
-        legacyRpcRouter = new DOMRpcRouter(mdsalRpcRouter.getRpcService(), mdsalRpcRouter.getRpcProviderService());
-
-        legacyTestRpcIdentifier = DOMRpcIdentifier.create(findRpc(schemaContext, "test-rpc"));
-        legacyTestRpcNoInputIdentifier = DOMRpcIdentifier.create(findRpc(schemaContext, "test-rpc-no-input"));
-        mdsalTestRpcIdentifier = org.opendaylight.mdsal.dom.api.DOMRpcIdentifier.create(
-                findRpc(schemaContext, "test-rpc"));
-        mdsalTestRpcNoInputIdentifier = org.opendaylight.mdsal.dom.api.DOMRpcIdentifier.create(
-                findRpc(schemaContext, "test-rpc-no-input"));
-    }
-
-    @Test
-    public void testLegacyRegistrationAndInvocation() throws InterruptedException, ExecutionException {
-        final DOMRpcImplementationRegistration<TestLegacyDOMRpcImplementation> reg =
-            legacyRpcRouter.registerRpcImplementation(testLegacyRpcImpl, legacyTestRpcIdentifier,
-                    legacyTestRpcNoInputIdentifier);
-
-        // Test success
-
-        DefaultDOMRpcResult result = new DefaultDOMRpcResult(RPC_OUTPUT);
-        testLegacyRpcImpl.init(Futures.immediateCheckedFuture(result));
-
-        ListenableFuture<DOMRpcResult> future = legacyRpcRouter.invokeRpc(legacyTestRpcIdentifier.getType(), RPC_INPUT);
-
-        assertSame(result, future.get());
-        testLegacyRpcImpl.verifyInput(legacyTestRpcIdentifier, RPC_INPUT);
-
-        // Test exception returned
-
-        TestLegacyDOMRpcException rpcEx = new TestLegacyDOMRpcException();
-        testLegacyRpcImpl.init(Futures.immediateFailedCheckedFuture(rpcEx));
-
-        try {
-            legacyRpcRouter.invokeRpc(legacyTestRpcIdentifier.getType(), RPC_INPUT).get();
-            fail("Expected exception");
-        } catch (ExecutionException e) {
-            assertEquals(rpcEx, e.getCause());
-        }
-
-        // Test no input or output
-
-        testLegacyRpcImpl.init(Futures.immediateCheckedFuture(null));
-
-        future = legacyRpcRouter.invokeRpc(legacyTestRpcNoInputIdentifier.getType(), null);
-
-        assertNull(future.get());
-        testLegacyRpcImpl.verifyInput(legacyTestRpcNoInputIdentifier, null);
-
-        // Test close
-
-        reg.close();
-
-        try {
-            legacyRpcRouter.invokeRpc(legacyTestRpcIdentifier.getType(), RPC_INPUT).get();
-            fail("Expected exception");
-        } catch (ExecutionException e) {
-            assertTrue(e.getCause() instanceof DOMRpcImplementationNotAvailableException);
-        }
-    }
-
-    @Test
-    public void testLegacyRegistrationAndMdsalInvocation() throws InterruptedException, ExecutionException {
-        legacyRpcRouter.registerRpcImplementation(testLegacyRpcImpl, legacyTestRpcIdentifier,
-                legacyTestRpcNoInputIdentifier);
-
-        // Test success
-
-        DefaultDOMRpcResult result = new DefaultDOMRpcResult(RPC_OUTPUT,
-                Collections.singleton(RpcResultBuilder.newError(ErrorType.RPC, "tag", "message")));
-        testLegacyRpcImpl.init(Futures.immediateCheckedFuture(result));
-
-        ListenableFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> future =
-                mdsalRpcRouter.getRpcService().invokeRpc(mdsalTestRpcIdentifier.getType(), RPC_INPUT);
-
-        assertEquals(RPC_OUTPUT, future.get().getResult());
-        assertEquals(1, future.get().getErrors().size());
-        assertEquals(ErrorType.RPC, future.get().getErrors().iterator().next().getErrorType());
-        assertEquals("tag", future.get().getErrors().iterator().next().getTag());
-        assertEquals("message", future.get().getErrors().iterator().next().getMessage());
-        testLegacyRpcImpl.verifyInput(legacyTestRpcIdentifier, RPC_INPUT);
-
-        // Test exception returned
-
-        TestLegacyDOMRpcException rpcEx = new TestLegacyDOMRpcException();
-        testLegacyRpcImpl.init(Futures.immediateFailedCheckedFuture(rpcEx));
-
-        try {
-            mdsalRpcRouter.getRpcService().invokeRpc(mdsalTestRpcIdentifier.getType(), RPC_INPUT).get();
-            fail("Expected exception");
-        } catch (ExecutionException e) {
-            assertEquals(rpcEx, e.getCause());
-        }
-
-        // Test no input or output
-
-        testLegacyRpcImpl.init(Futures.immediateCheckedFuture(null));
-
-        future = mdsalRpcRouter.getRpcService().invokeRpc(mdsalTestRpcNoInputIdentifier.getType(), null);
-
-        assertNull(future.get());
-        testLegacyRpcImpl.verifyInput(legacyTestRpcNoInputIdentifier, null);
-    }
-
-    @Test
-    public void testMdsalRegistrationAndLegacyInvocation() throws InterruptedException, ExecutionException {
-        mdsalRpcRouter.getRpcProviderService().registerRpcImplementation(testMdsalRpcImpl, mdsalTestRpcIdentifier,
-                mdsalTestRpcNoInputIdentifier);
-
-        // Test success
-
-        org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult result =
-            new org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult(RPC_OUTPUT,
-                Collections.singleton(RpcResultBuilder.newError(ErrorType.RPC, "tag", "message")));
-        testMdsalRpcImpl.init(FluentFutures.immediateFluentFuture(result));
-
-        ListenableFuture<DOMRpcResult> future = legacyRpcRouter.invokeRpc(legacyTestRpcIdentifier.getType(), RPC_INPUT);
-
-        assertEquals(RPC_OUTPUT, future.get().getResult());
-        assertEquals(1, future.get().getErrors().size());
-        assertEquals(ErrorType.RPC, future.get().getErrors().iterator().next().getErrorType());
-        assertEquals("tag", future.get().getErrors().iterator().next().getTag());
-        assertEquals("message", future.get().getErrors().iterator().next().getMessage());
-        testMdsalRpcImpl.verifyInput(mdsalTestRpcIdentifier, RPC_INPUT);
-
-        // Test exception returned
-
-        TestMdsalDOMRpcException rpcEx = new TestMdsalDOMRpcException();
-        testMdsalRpcImpl.init(FluentFutures.immediateFailedFluentFuture(rpcEx));
-
-        try {
-            legacyRpcRouter.invokeRpc(legacyTestRpcIdentifier.getType(), RPC_INPUT).get();
-            fail("Expected exception");
-        } catch (ExecutionException e) {
-            assertTrue("Unexpected exception " + e.getCause(), e.getCause() instanceof DOMRpcException);
-            assertEquals(rpcEx, e.getCause().getCause());
-        }
-
-        // Test no input or output
-
-        testMdsalRpcImpl.init(FluentFutures.immediateNullFluentFuture());
-
-        future = legacyRpcRouter.invokeRpc(legacyTestRpcNoInputIdentifier.getType(), null);
-
-        assertNull(future.get());
-        testMdsalRpcImpl.verifyInput(mdsalTestRpcNoInputIdentifier, null);
-    }
-
-    @Test
-    public void testRegisterRpcListener() {
-        final TestLegacyDOMRpcImplementation2 testRpcImpl2 = new TestLegacyDOMRpcImplementation2();
-
-        DOMRpcAvailabilityListener listener = mock(DOMRpcAvailabilityListener.class);
-        doNothing().when(listener).onRpcAvailable(any());
-        doNothing().when(listener).onRpcUnavailable(any());
-        doReturn(true).when(listener).acceptsImplementation(any());
-        final ListenerRegistration<?> listenerReg = legacyRpcRouter.registerRpcListener(listener);
-
-        DOMRpcAvailabilityListener filteredListener = mock(DOMRpcAvailabilityListener.class);
-        doNothing().when(filteredListener).onRpcAvailable(any());
-        doNothing().when(filteredListener).onRpcUnavailable(any());
-        doReturn(true).when(filteredListener).acceptsImplementation(testLegacyRpcImpl);
-        doReturn(false).when(filteredListener).acceptsImplementation(testRpcImpl2);
-        final ListenerRegistration<?> filteredListenerReg = legacyRpcRouter.registerRpcListener(filteredListener);
-
-        final DOMRpcImplementationRegistration<?> testRpcReg =
-                legacyRpcRouter.registerRpcImplementation(testLegacyRpcImpl, legacyTestRpcIdentifier);
-
-        verify(listener, timeout(5000)).onRpcAvailable(ImmutableList.of(legacyTestRpcIdentifier));
-        verify(filteredListener, timeout(5000)).onRpcAvailable(ImmutableList.of(legacyTestRpcIdentifier));
-
-        final DOMRpcImplementationRegistration<?> testRpcNoInputReg =
-                legacyRpcRouter.registerRpcImplementation(testRpcImpl2, legacyTestRpcNoInputIdentifier);
-
-        verify(listener, timeout(5000)).onRpcAvailable(ImmutableList.of(legacyTestRpcNoInputIdentifier));
-        verify(filteredListener, after(200).never()).onRpcAvailable(ImmutableList.of(legacyTestRpcNoInputIdentifier));
-
-        testRpcReg.close();
-
-        verify(listener, timeout(5000)).onRpcUnavailable(ImmutableList.of(legacyTestRpcIdentifier));
-        verify(filteredListener, timeout(5000)).onRpcUnavailable(ImmutableList.of(legacyTestRpcIdentifier));
-
-        testRpcNoInputReg.close();
-
-        verify(listener, timeout(5000)).onRpcUnavailable(ImmutableList.of(legacyTestRpcNoInputIdentifier));
-        verify(filteredListener, after(200).never()).onRpcUnavailable(ImmutableList.of(legacyTestRpcNoInputIdentifier));
-
-        reset(listener, filteredListener);
-
-        listenerReg.close();
-        filteredListenerReg.close();
-
-        legacyRpcRouter.registerRpcImplementation(testLegacyRpcImpl, legacyTestRpcIdentifier);
-
-        verify(listener, after(200).never()).onRpcAvailable(ImmutableList.of(legacyTestRpcIdentifier));
-        verify(filteredListener, never()).onRpcAvailable(ImmutableList.of(legacyTestRpcIdentifier));
-    }
-
-    private static SchemaPath findRpc(SchemaContext schemaContext, String name) {
-        Module testModule = schemaContext.findModule("odl-datastore-test", TestModel.TEST_QNAME.getRevision()).get();
-        RpcDefinition rpcDefinition = null;
-        for (RpcDefinition def: testModule.getRpcs()) {
-            if (def.getQName().getLocalName().equals(name)) {
-                rpcDefinition = def;
-                break;
-            }
-        }
-
-        assertNotNull(name + " rpc not found in " + testModule.getRpcs(), rpcDefinition);
-        return rpcDefinition.getPath();
-    }
-
-    private abstract static class AbstractDOMRpcImplementation<T> {
-        Entry<T, NormalizedNode<?, ?>> rpcInput;
-
-        void verifyInput(T expRpc, NormalizedNode<?, ?> expInput) {
-            assertNotNull(rpcInput);
-            assertEquals(expRpc, rpcInput.getKey());
-            assertEquals(expInput, rpcInput.getValue());
-        }
-    }
-
-    private static class TestLegacyDOMRpcImplementation extends AbstractDOMRpcImplementation<DOMRpcIdentifier>
-            implements DOMRpcImplementation {
-        CheckedFuture<DOMRpcResult, DOMRpcException> returnFuture;
-
-        @Override
-        public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(
-                final DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
-            rpcInput = new SimpleEntry<>(rpc, input);
-            return returnFuture;
-        }
-
-        void init(CheckedFuture<DOMRpcResult, DOMRpcException> retFuture) {
-            this.returnFuture = retFuture;
-            rpcInput = null;
-        }
-    }
-
-    private static class TestMdsalDOMRpcImplementation
-            extends AbstractDOMRpcImplementation<org.opendaylight.mdsal.dom.api.DOMRpcIdentifier>
-            implements org.opendaylight.mdsal.dom.api.DOMRpcImplementation {
-        FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> returnFuture;
-
-        @Override
-        public FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> invokeRpc(
-                    final org.opendaylight.mdsal.dom.api.DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
-            rpcInput = new SimpleEntry<>(rpc, input);
-            return returnFuture;
-        }
-
-        void init(FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> retFuture) {
-            this.returnFuture = retFuture;
-            rpcInput = null;
-        }
-    }
-
-    private static class TestLegacyDOMRpcImplementation2 implements DOMRpcImplementation {
-        @Override
-        public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(
-                final DOMRpcIdentifier rpc, final NormalizedNode<?, ?> input) {
-            return null;
-        }
-    }
-
-    private static class TestLegacyDOMRpcException extends DOMRpcException {
-        private static final long serialVersionUID = 1L;
-
-        TestLegacyDOMRpcException() {
-            super("test");
-        }
-    }
-
-    private static class TestMdsalDOMRpcException extends org.opendaylight.mdsal.dom.api.DOMRpcException {
-        private static final long serialVersionUID = 1L;
-
-        TestMdsalDOMRpcException() {
-            super("test");
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMTransactionChainTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMTransactionChainTest.java
deleted file mode 100644 (file)
index 5815fae..0000000
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class DOMTransactionChainTest {
-
-    private SchemaContext schemaContext;
-    private AbstractDOMDataBroker domBroker;
-
-    @Before
-    public void setupStore() {
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.newDirectExecutorService());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.newDirectExecutorService());
-        schemaContext = TestModel.createTestContext();
-
-        operStore.onGlobalContextUpdated(schemaContext);
-        configStore.onGlobalContextUpdated(schemaContext);
-
-        ImmutableMap<LogicalDatastoreType, DOMStore> stores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder() //
-                .put(CONFIGURATION, configStore) //
-                .put(OPERATIONAL, operStore) //
-                .build();
-
-        ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor());
-        domBroker = new SerializedDOMDataBroker(stores, executor);
-    }
-
-    @Test
-    public void testTransactionChainNoConflict() throws InterruptedException, ExecutionException, TimeoutException {
-        BlockingTransactionChainListener listener = new BlockingTransactionChainListener();
-        DOMTransactionChain txChain = domBroker.createTransactionChain(listener);
-        assertNotNull(txChain);
-
-        /**
-         * We alocate new read-write transaction and write /test.
-         */
-        DOMDataReadWriteTransaction firstTx = allocateAndWrite(txChain);
-
-        /**
-         * First transaction is marked as ready, we are able to allocate chained
-         * transactions.
-         */
-        ListenableFuture<?> firstWriteTxFuture = firstTx.commit();
-
-        /**
-         * We alocate chained transaction - read transaction.
-         */
-        DOMDataReadTransaction secondReadTx = txChain.newReadOnlyTransaction();
-
-        /**
-         *
-         * We test if we are able to read data from tx, read should not fail
-         * since we are using chained transaction.
-         */
-        assertTestContainerExists(secondReadTx);
-
-        /**
-         * We alocate next transaction, which is still based on first one, but
-         * is read-write.
-         *
-         */
-        DOMDataReadWriteTransaction thirdDeleteTx = allocateAndDelete(txChain);
-
-        /**
-         * We commit first transaction.
-         *
-         */
-        assertCommitSuccessful(firstWriteTxFuture);
-
-        /**
-         * Allocates transaction from data store.
-         */
-        DOMDataReadTransaction storeReadTx = domBroker.newReadOnlyTransaction();
-
-        /**
-         * We verify transaction is commited to store, container should exists
-         * in datastore.
-         */
-        assertTestContainerExists(storeReadTx);
-
-        /**
-         * third transaction is sealed and commited.
-         */
-        ListenableFuture<?> thirdDeleteTxFuture = thirdDeleteTx.commit();
-        assertCommitSuccessful(thirdDeleteTxFuture);
-
-        /**
-         * We close transaction chain.
-         */
-        txChain.close();
-
-        listener.getSuccessFuture().get(1000, TimeUnit.MILLISECONDS);
-    }
-
-    @Test
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void testTransactionChainNotSealed() throws InterruptedException, ExecutionException {
-        BlockingTransactionChainListener listener = new BlockingTransactionChainListener();
-        DOMTransactionChain txChain = domBroker.createTransactionChain(listener);
-        assertNotNull(txChain);
-
-        /**
-         * We alocate new read-write transaction and write /test
-         */
-        allocateAndWrite(txChain);
-
-        /**
-         * We alocate chained transaction - read transaction, note first one is
-         * still not commited to datastore, so this allocation should fail with
-         * IllegalStateException.
-         */
-        try {
-            txChain.newReadOnlyTransaction();
-            fail("Allocation of secondReadTx should fail with IllegalStateException");
-        } catch (Exception e) {
-            assertTrue(e instanceof IllegalStateException);
-        }
-    }
-
-    private static DOMDataReadWriteTransaction allocateAndDelete(
-            final DOMTransactionChain txChain) throws InterruptedException, ExecutionException {
-        DOMDataReadWriteTransaction tx = txChain.newReadWriteTransaction();
-
-        /**
-         * We test existence of /test in third transaction container should
-         * still be visible from first one (which is still uncommmited).
-         *
-         */
-        assertTestContainerExists(tx);
-
-        /**
-         * We delete node in third transaction
-         */
-        tx.delete(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH);
-        return tx;
-    }
-
-    private static DOMDataReadWriteTransaction allocateAndWrite(final DOMTransactionChain txChain)
-            throws InterruptedException, ExecutionException {
-        DOMDataReadWriteTransaction tx = txChain.newReadWriteTransaction();
-        assertTestContainerWrite(tx);
-        return tx;
-    }
-
-    private static void assertCommitSuccessful(final ListenableFuture<?> future)
-            throws InterruptedException, ExecutionException {
-        future.get();
-    }
-
-    private static void assertTestContainerExists(final DOMDataReadTransaction readTx)
-            throws InterruptedException, ExecutionException {
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> readFuture = readTx.read(OPERATIONAL, TestModel.TEST_PATH);
-        Optional<NormalizedNode<?, ?>> readedData = readFuture.get();
-        assertTrue(readedData.isPresent());
-    }
-
-    private static void assertTestContainerWrite(final DOMDataReadWriteTransaction tx)
-            throws InterruptedException, ExecutionException {
-        tx.put(OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-        assertTestContainerExists(tx);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/MountPointServiceTest.java
deleted file mode 100644 (file)
index cf5853a..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPointService.DOMMountPointBuilder;
-import org.opendaylight.controller.md.sal.dom.broker.impl.mount.DOMMountPointServiceImpl;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class MountPointServiceTest {
-
-    private DOMMountPointService mountService;
-    private static final YangInstanceIdentifier PATH = YangInstanceIdentifier
-            .of(QName.create("namespace", "2012-12-12", "top"));
-
-    @Before
-    public void setup() {
-        mountService = new DOMMountPointServiceImpl();
-    }
-
-    @Test
-    public void createSimpleMountPoint() {
-        Optional<DOMMountPoint> mountNotPresent = mountService.getMountPoint(PATH);
-        assertFalse(mountNotPresent.isPresent());
-        DOMMountPointBuilder mountBuilder = mountService.createMountPoint(PATH);
-        mountBuilder.register();
-
-        Optional<DOMMountPoint> mountPresent = mountService.getMountPoint(PATH);
-        assertTrue(mountPresent.isPresent());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransactionTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingReadWriteTransactionTest.java
deleted file mode 100644 (file)
index 8475b28..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.Futures;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Matchers;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-
-public class ShardedDOMDataBrokerDelegatingReadWriteTransactionTest {
-
-    @Mock
-    private DOMDataWriteTransaction writeTx;
-
-    @Mock
-    private DOMDataReadOnlyTransaction readTx;
-
-    private ShardedDOMDataBrokerDelegatingReadWriteTransaction rwTx;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        doNothing().when(writeTx).put(any(), any(), any());
-        doNothing().when(writeTx).merge(any(), any(), any());
-        doNothing().when(writeTx).delete(any(), any());
-        rwTx = new ShardedDOMDataBrokerDelegatingReadWriteTransaction("TEST-TX", TestModel.createTestContext(), readTx,
-                                                                      writeTx);
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testFirstReadShouldFail() {
-        rwTx.read(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH);
-    }
-
-    @Test
-    public void testGetIdentifier() {
-        assertEquals("TEST-TX", rwTx.getIdentifier());
-    }
-
-    @Test
-    public void testReadWriteOperations() throws Exception {
-        doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(readTx).read(any(), any());
-        rwTx.put(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH, testNodeWithOuter(1, 2, 3));
-
-        verify(writeTx).put(eq(LogicalDatastoreType.OPERATIONAL), Matchers.eq(TestModel.TEST_PATH),
-                            Matchers.eq(testNodeWithOuter(1, 2, 3)));
-        verify(readTx).read(eq(LogicalDatastoreType.OPERATIONAL), Matchers.eq(TestModel.TEST_PATH));
-
-        assertEquals(testNodeWithOuter(1, 2, 3),
-                     rwTx.read(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH).checkedGet().get());
-
-        rwTx.merge(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH, testNodeWithOuter(4, 5, 6));
-        assertEquals(testNodeWithOuter(1, 2, 3, 4, 5, 6),
-                     rwTx.read(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH).checkedGet().get());
-
-        rwTx.delete(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH);
-
-        verify(writeTx).delete(eq(LogicalDatastoreType.OPERATIONAL), Matchers.eq(TestModel.TEST_PATH));
-        assertEquals(Optional.absent(), rwTx.read(LogicalDatastoreType.OPERATIONAL, TestModel.TEST_PATH).checkedGet());
-    }
-
-    private DataContainerChild<?, ?> outerNode(int... ids) {
-        CollectionNodeBuilder<MapEntryNode, MapNode> outer = ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME);
-        for (int id : ids) {
-            outer.addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, id));
-        }
-
-        return outer.build();
-    }
-
-    private NormalizedNode<?, ?> testNodeWithOuter(int... ids) {
-        return testNodeWithOuter(outerNode(ids));
-    }
-
-    private NormalizedNode<?, ?> testNodeWithOuter(DataContainerChild<?, ?> outer) {
-        return ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(TestModel.TEST_QNAME)).withChild(outer)
-                .build();
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChainTest.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/legacy/sharded/adapter/ShardedDOMDataBrokerDelegatingTransactionChainTest.java
deleted file mode 100644 (file)
index 520fe59..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Copyright (c) 2016 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.impl.legacy.sharded.adapter;
-
-import static org.junit.Assert.assertEquals;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
-import org.opendaylight.mdsal.dom.api.DOMDataBroker;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-
-public class ShardedDOMDataBrokerDelegatingTransactionChainTest {
-
-    @Mock
-    private DOMDataBroker dataBroker;
-
-    @Mock
-    private DOMTransactionChain delegateTxChain;
-
-    @Mock
-    private TransactionChainListener txChainlistener;
-
-    private ShardedDOMDataBrokerDelegatingTransactionChain txChain;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-
-        doReturn(delegateTxChain).when(dataBroker).createTransactionChain(any());
-        txChain = new ShardedDOMDataBrokerDelegatingTransactionChain("1", TestModel.createTestContext(), dataBroker,
-                                                                     txChainlistener);
-    }
-
-    @Test
-    public void testClose() {
-        doNothing().when(delegateTxChain).close();
-        txChain.close();
-        verify(delegateTxChain).close();
-    }
-
-    @Test
-    public void testNewWriteTransaction() {
-        DOMDataTreeWriteTransaction delegateWriteTx = mock(DOMDataTreeWriteTransaction.class);
-        doReturn(delegateWriteTx).when(delegateTxChain).newWriteOnlyTransaction();
-        doReturn("TEST-WRITE-TX-DELEGATE").when(delegateWriteTx).getIdentifier();
-        txChain.newWriteOnlyTransaction();
-        verify(delegateTxChain).newWriteOnlyTransaction();
-    }
-
-    @Test
-    public void testNewReadOnlyTransaction() {
-        DOMDataTreeReadTransaction delegateReadTx = mock(DOMDataTreeReadTransaction.class);
-        doReturn("TEST-READ-TX-DELEGATE").when(delegateReadTx).getIdentifier();
-        doReturn(delegateReadTx).when(delegateTxChain).newReadOnlyTransaction();
-        txChain.newReadOnlyTransaction();
-        verify(delegateTxChain).newReadOnlyTransaction();
-    }
-
-
-    @Test
-    public void testNewReadWriteTransaction() {
-        DOMDataTreeReadTransaction delegateReadTx = mock(DOMDataTreeReadTransaction.class);
-        doReturn("TEST-READ-TX-DELEGATE").when(delegateReadTx).getIdentifier();
-        doReturn(delegateReadTx).when(delegateTxChain).newReadOnlyTransaction();
-
-        DOMDataTreeWriteTransaction delegateWriteTx = mock(DOMDataTreeWriteTransaction.class);
-        doReturn(delegateWriteTx).when(delegateTxChain).newWriteOnlyTransaction();
-        doReturn("TEST-WRITE-TX-DELEGATE").when(delegateWriteTx).getIdentifier();
-
-        txChain.newReadWriteTransaction();
-        verify(delegateTxChain).newReadOnlyTransaction();
-        verify(delegateTxChain).newWriteOnlyTransaction();
-    }
-
-    @Test
-    public void testTransactionChainFailed() {
-        final DOMDataTreeWriteTransaction writeTxDelegate = mock(DOMDataTreeWriteTransaction.class);
-        doReturn("DELEGATE-WRITE-TX-1").when(writeTxDelegate).getIdentifier();
-        doReturn(writeTxDelegate).when(delegateTxChain).newWriteOnlyTransaction();
-        doNothing().when(txChainlistener).onTransactionChainFailed(any(), any(), any());
-
-        // verify writetx fail
-        txChain.newWriteOnlyTransaction();
-        txChain.onTransactionChainFailed(delegateTxChain, writeTxDelegate, new Throwable("Fail"));
-
-        final ArgumentCaptor<AsyncTransaction> txCaptor = ArgumentCaptor.forClass(AsyncTransaction.class);
-        final ArgumentCaptor<Throwable> throwableCaptor = ArgumentCaptor.forClass(Throwable.class);
-        verify(txChainlistener).onTransactionChainFailed(eq(txChain), txCaptor.capture(), throwableCaptor.capture());
-        assertEquals("DOM-CHAIN-1-0", txCaptor.getValue().getIdentifier());
-        assertEquals("Fail", throwableCaptor.getValue().getMessage());
-
-        // verify readtx fail
-        final DOMDataTreeReadTransaction readTxDelegate = mock(DOMDataTreeReadTransaction.class);
-        doReturn("DELEGATE-READ-TX-1").when(readTxDelegate).getIdentifier();
-        doReturn(readTxDelegate).when(delegateTxChain).newReadOnlyTransaction();
-        doNothing().when(txChainlistener).onTransactionChainFailed(any(), any(), any());
-        txChain.newReadOnlyTransaction();
-        txChain.onTransactionChainFailed(delegateTxChain, readTxDelegate, new Throwable("Fail"));
-        verify(txChainlistener, times(2))
-                .onTransactionChainFailed(eq(txChain), txCaptor.capture(), throwableCaptor.capture());
-        assertEquals("DOM-CHAIN-1-1", txCaptor.getValue().getIdentifier());
-        assertEquals("Fail", throwableCaptor.getValue().getMessage());
-
-
-        // verify readwritetx fail, we must check both read and write failure
-        // translates to returned readwritetx
-
-        // we can reuse write and read tx delegates, just return different
-        // identifiers to avoid conflicts in keys in tx dictionary
-        doReturn("DELEGATE-WRITE-RWTX-1").when(writeTxDelegate).getIdentifier();
-        doReturn("DELEGATE-READ-RWTX-1").when(readTxDelegate).getIdentifier();
-        txChain.newReadWriteTransaction();
-        txChain.onTransactionChainFailed(delegateTxChain, writeTxDelegate, new Throwable("Fail"));
-        verify(txChainlistener, times(3))
-                .onTransactionChainFailed(eq(txChain), txCaptor.capture(), throwableCaptor.capture());
-        assertEquals("DOM-CHAIN-1-2", txCaptor.getValue().getIdentifier());
-        assertEquals("Fail", throwableCaptor.getValue().getMessage());
-
-        txChain.onTransactionChainFailed(delegateTxChain, readTxDelegate, new Throwable("Fail"));
-        verify(txChainlistener, times(4))
-                .onTransactionChainFailed(eq(txChain), txCaptor.capture(), throwableCaptor.capture());
-        assertEquals("DOM-CHAIN-1-2", txCaptor.getValue().getIdentifier());
-        assertEquals("Fail", throwableCaptor.getValue().getMessage());
-    }
-
-    @Test
-    public void testTransactionChainSuccessful() {
-        doNothing().when(txChainlistener).onTransactionChainSuccessful(any());
-        txChain.onTransactionChainSuccessful(delegateTxChain);
-        verify(txChainlistener).onTransactionChainSuccessful(eq(txChain));
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java b/opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java
deleted file mode 100644 (file)
index 53096da..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
-
-public final class TestModel {
-
-    private TestModel() {
-    }
-
-    public static final QName TEST_QNAME =
-            QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13", "test");
-    public static final QName TEST2_QNAME =
-            QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13", "test2");
-    public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
-    public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
-    public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
-    public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
-    public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
-    public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
-    public static final QName TWO_QNAME = QName.create(TEST_QNAME,"two");
-    public static final QName THREE_QNAME = QName.create(TEST_QNAME,"three");
-    public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
-    public static final YangInstanceIdentifier TEST2_PATH = YangInstanceIdentifier.of(TEST2_QNAME);
-    public static final YangInstanceIdentifier OUTER_LIST_PATH =
-            YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
-    private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
-
-    public static SchemaContext createTestContext() {
-        return YangParserTestUtils.parseYangResource(DATASTORE_TEST_YANG);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/test/resources/odl-datastore-test.yang b/opendaylight/md-sal/sal-dom-broker/src/test/resources/odl-datastore-test.yang
deleted file mode 100644 (file)
index f7b960e..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-module odl-datastore-test {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
-    prefix "store-test";
-
-    revision "2014-03-13" {
-        description "Initial revision.";
-    }
-
-    container test {
-        presence "needs to be present when empty";
-
-        list outer-list {
-            key id;
-            leaf id {
-                type uint16;
-            }
-            choice outer-choice {
-                case one {
-                    leaf one {
-                        type string;
-                    }
-                }
-                case two-three {
-                    leaf two {
-                        type string;
-                    }
-                    leaf three {
-                        type string;
-                    }
-               }
-           }
-           list inner-list {
-                key name;
-                leaf name {
-                    type string;
-                }
-                leaf value {
-                    type string;
-                }
-            }
-        }
-    }
-
-    container test2 {
-    }
-
-    rpc test-rpc {
-        input {
-            leaf input-leaf {
-                type string;
-            }
-        }
-
-        output {
-            leaf output-leaf {
-                type string;
-            }
-        }
-    }
-
-    rpc test-rpc-no-input {
-    }
-
-    notification test-notification {
-        leaf value-leaf {
-            type string;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/pom.xml b/opendaylight/md-sal/sal-dom-compat/pom.xml
deleted file mode 100644 (file)
index d4b3916..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-core-compat</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-spi</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-spi</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-broker</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/AbstractDOMRpcResultFutureAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/AbstractDOMRpcResultFutureAdapter.java
deleted file mode 100644 (file)
index a16e4fa..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Optional;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.opendaylight.mdsal.dom.api.DOMRpcException;
-import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Base for a DOMRpcResult future adapter.
- *
- * @author Thomas Pantelis
- */
-@SuppressWarnings("checkstyle:ClassTypeParameterName")
-public abstract class AbstractDOMRpcResultFutureAdapter<T extends DOMRpcResult, F extends DOMRpcResult,
-        D extends ListenableFuture<F>, E extends DOMRpcException> extends AbstractFuture<T> {
-    private final D delegate;
-    private final ExceptionMapper<E> exMapper;
-    private volatile Optional<T> result;
-
-    AbstractDOMRpcResultFutureAdapter(D delegate, ExceptionMapper<E> exMapper) {
-        this.delegate = delegate;
-        this.exMapper = exMapper;
-    }
-
-    protected abstract T transform(F fromResult);
-
-    public D delegate() {
-        return delegate;
-    }
-
-    @Override
-    public void addListener(Runnable listener, Executor executor) {
-        delegate.addListener(listener, executor);
-    }
-
-    @Override
-    public boolean cancel(boolean mayInterruptIfRunning) {
-        return delegate.cancel(mayInterruptIfRunning);
-    }
-
-    @Override
-    public boolean isCancelled() {
-        return delegate.isCancelled();
-    }
-
-    @Override
-    public boolean isDone() {
-        return delegate.isDone();
-    }
-
-    @Override
-    public T get() throws InterruptedException, ExecutionException {
-        if (result != null) {
-            return result.orElse(null);
-        }
-
-        try {
-            return transformIfNecessary(delegate.get());
-        } catch (ExecutionException e) {
-            throw new ExecutionException(e.getMessage(), exMapper.apply(e));
-        }
-    }
-
-    @Override
-    public T get(final long timeout, final TimeUnit unit) throws InterruptedException, ExecutionException,
-            TimeoutException {
-        if (result != null) {
-            return result.orElse(null);
-        }
-
-        try {
-            return transformIfNecessary(delegate.get(timeout, unit));
-        } catch (ExecutionException e) {
-            throw new ExecutionException(e.getMessage(), exMapper.apply(e));
-        }
-    }
-
-    private synchronized T transformIfNecessary(F delegateResult) {
-        if (result == null) {
-            if (delegateResult == null) {
-                result = Optional.empty();
-            } else {
-                result = Optional.of(transform(delegateResult));
-            }
-        }
-
-        return result.orElse(null);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataBrokerAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataBrokerAdapter.java
deleted file mode 100644 (file)
index a5a47bb..0000000
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap.Builder;
-import java.util.Map;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-
-@Deprecated
-public class DOMDataBrokerAdapter extends ForwardingObject implements org.opendaylight.mdsal.dom.api.DOMDataBroker {
-    private final ClassToInstanceMap<org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension> extensions;
-    private final DOMDataBroker delegate;
-
-    public DOMDataBrokerAdapter(final DOMDataBroker delegate) {
-        this.delegate = requireNonNull(delegate);
-
-        Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> delegateExtensions =
-                delegate.getSupportedExtensions();
-        Builder<org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension> extBuilder =
-                ImmutableClassToInstanceMap.builder();
-
-        final DOMDataTreeChangeService delegateTreeChangeService = (DOMDataTreeChangeService) delegateExtensions.get(
-            DOMDataTreeChangeService.class);
-        if (delegateTreeChangeService != null) {
-            extBuilder.put(org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService.class,
-                new DOMDataTreeChangeServiceAdapter(delegateTreeChangeService));
-        }
-
-        final DOMDataTreeCommitCohortRegistry delegateCohortRegistry =
-                (DOMDataTreeCommitCohortRegistry) delegateExtensions.get(DOMDataTreeCommitCohortRegistry.class);
-        if (delegateCohortRegistry != null) {
-            extBuilder.put(DOMDataTreeCommitCohortRegistry.class, delegateCohortRegistry::registerCommitCohort);
-        }
-
-        extensions = extBuilder.build();
-    }
-
-    @Override
-    public DOMDataTreeReadTransaction newReadOnlyTransaction() {
-        return new DOMDataTreeReadTransactionAdapter(delegate.newReadOnlyTransaction());
-    }
-
-    @Override
-    public DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
-        return new DOMDataTreeWriteTransactionAdapter(delegate.newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
-        return new DOMDataTreeReadWriteTransactionAdapter(delegate.newReadWriteTransaction());
-    }
-
-    @Override
-    public ClassToInstanceMap<org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension> getExtensions() {
-        return extensions;
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final DOMTransactionChainListener listener) {
-        return new DOMTransactionChainAdapter(listener, delegate::createTransactionChain);
-    }
-
-    @Override
-    protected DOMDataBroker delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeChangeServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeChangeServiceAdapter.java
deleted file mode 100644 (file)
index aa2011b..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-@Deprecated
-public class DOMDataTreeChangeServiceAdapter extends ForwardingObject
-        implements org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService {
-
-    private final DOMDataTreeChangeService delegate;
-
-    DOMDataTreeChangeServiceAdapter(final DOMDataTreeChangeService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public <L extends org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> ListenerRegistration<L>
-            registerDataTreeChangeListener(final org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier treeId,
-                    final L listener) {
-        final DOMDataTreeChangeListener delegateListener;
-        if (listener instanceof org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener) {
-            delegateListener = (ClusteredDOMDataTreeChangeListener) listener::onDataTreeChanged;
-        } else {
-            delegateListener = listener::onDataTreeChanged;
-        }
-        final ListenerRegistration<?> reg = delegate().registerDataTreeChangeListener(
-            DOMDataTreeIdentifier.fromMdsal(treeId), delegateListener);
-
-        return new AbstractListenerRegistration<L>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-
-    @Override
-    protected DOMDataTreeChangeService delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadTransactionAdapter.java
deleted file mode 100644 (file)
index 786177a..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Optional;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMDataTreeReadTransactionAdapter extends ForwardingObject implements DOMDataTreeReadTransaction {
-    private final DOMDataReadOnlyTransaction delegate;
-
-    public DOMDataTreeReadTransactionAdapter(final DOMDataReadOnlyTransaction delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return TransactionUtils.read(delegate(), store, path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return TransactionUtils.exists(delegate(), store, path);
-    }
-
-    @Override
-    public void close() {
-        delegate().close();
-    }
-
-    @Override
-    protected DOMDataReadOnlyTransaction delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadWriteTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeReadWriteTransactionAdapter.java
deleted file mode 100644 (file)
index d35ce44..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Optional;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMDataTreeReadWriteTransactionAdapter extends ForwardingObject
-        implements DOMDataTreeReadWriteTransaction {
-    private final DOMDataReadWriteTransaction delegate;
-
-    public DOMDataTreeReadWriteTransactionAdapter(final DOMDataReadWriteTransaction delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return TransactionUtils.read(delegate(), store, path);
-    }
-
-    @Override
-    public FluentFuture<Boolean> exists(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return TransactionUtils.exists(delegate(), store, path);
-    }
-
-    @Override
-    public void close() {
-        cancel();
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        TransactionUtils.put(delegate(), store, path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        TransactionUtils.merge(delegate(), store, path, data);
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        TransactionUtils.delete(delegate(), store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate().commit();
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate().cancel();
-    }
-
-    @Override
-    protected DOMDataReadWriteTransaction delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeWriteTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMDataTreeWriteTransactionAdapter.java
deleted file mode 100644 (file)
index 7affc35..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMDataTreeWriteTransactionAdapter extends ForwardingObject
-        implements DOMDataTreeWriteTransaction {
-    private final DOMDataWriteTransaction delegate;
-
-    public DOMDataTreeWriteTransactionAdapter(final DOMDataWriteTransaction delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        TransactionUtils.put(delegate(), store, path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        TransactionUtils.merge(delegate(), store, path, data);
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        TransactionUtils.delete(delegate(), store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate().commit();
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate().cancel();
-    }
-
-    @Override
-    protected DOMDataWriteTransaction delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapter.java
deleted file mode 100644 (file)
index b780914..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.collect.ImmutableMap;
-import java.util.Map;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-/**
- * Adapter providing Controller DOMMountPoint implementation based on an MD-SAL DOMMountPoint delegate. Services are
- * looked up in the delegate first. If a lookup is unsuccessful, this class attempts to transparently proxy well-known
- * Controller DOMServices on top of their MD-SAL counterparts available from delegate.
- */
-@Deprecated
-public class DOMMountPointAdapter extends ForwardingObject implements DOMMountPoint {
-    private abstract static class CompatFactory<M extends org.opendaylight.mdsal.dom.api.DOMService,
-            C extends DOMService> {
-        private final Class<M> mdsalClass;
-
-        CompatFactory(final Class<M> mdsalClass) {
-            this.mdsalClass = requireNonNull(mdsalClass);
-        }
-
-        final @Nullable C createService(final org.opendaylight.mdsal.dom.api.DOMMountPoint mountPoint) {
-            return mountPoint.getService(mdsalClass).map(this::createService).orElse(null);
-        }
-
-        abstract C createService(M delegate);
-    }
-
-    private static final Map<Class<? extends DOMService>, CompatFactory<?, ?>> KNOWN_SERVICES = ImmutableMap.of(
-        DOMActionService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMActionService, DOMActionService>(
-                org.opendaylight.mdsal.dom.api.DOMActionService.class) {
-            @Override
-            DOMActionService createService(final org.opendaylight.mdsal.dom.api.DOMActionService delegate) {
-                return new LegacyDOMActionServiceAdapter(delegate);
-            }
-        },
-        DOMDataBroker.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMDataBroker, DOMDataBroker>(
-                org.opendaylight.mdsal.dom.api.DOMDataBroker.class) {
-            @Override
-            DOMDataBroker createService(final org.opendaylight.mdsal.dom.api.DOMDataBroker delegate) {
-                return new LegacyDOMDataBrokerAdapter(delegate);
-            }
-        },
-        DOMNotificationService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMNotificationService,
-                DOMNotificationService>(org.opendaylight.mdsal.dom.api.DOMNotificationService.class) {
-            @Override
-            DOMNotificationService createService(final org.opendaylight.mdsal.dom.api.DOMNotificationService delegate) {
-                return new LegacyDOMNotificationServiceAdapter(delegate);
-            }
-        },
-        DOMRpcService.class, new CompatFactory<org.opendaylight.mdsal.dom.api.DOMRpcService, DOMRpcService>(
-                org.opendaylight.mdsal.dom.api.DOMRpcService.class) {
-            @Override
-            DOMRpcService createService(final org.opendaylight.mdsal.dom.api.DOMRpcService delegate) {
-                return new LegacyDOMRpcServiceAdapter(delegate);
-            }
-        });
-
-    private final org.opendaylight.mdsal.dom.api.DOMMountPoint delegate;
-
-    public DOMMountPointAdapter(final org.opendaylight.mdsal.dom.api.DOMMountPoint delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public YangInstanceIdentifier getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public <T extends DOMService> Optional<T> getService(final Class<T> cls) {
-        final java.util.Optional<T> found = delegate.getService(cls);
-        if (found.isPresent()) {
-            return Optional.of(found.get());
-        }
-
-        final CompatFactory<?, ?> compat = KNOWN_SERVICES.get(cls);
-        return Optional.fromNullable(cls.cast(compat == null ? null : compat.createService(delegate)));
-    }
-
-    @Override
-    public SchemaContext getSchemaContext() {
-        return delegate().getSchemaContext();
-    }
-
-    @Override
-    public int hashCode() {
-        return getIdentifier().hashCode();
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-
-        if (!(obj instanceof DOMMountPoint)) {
-            return false;
-        }
-
-        DOMMountPoint other = (DOMMountPoint) obj;
-        return getIdentifier().equals(other.getIdentifier());
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.api.DOMMountPoint delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMNotificationServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMNotificationServiceAdapter.java
deleted file mode 100644 (file)
index 61b596b..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Arrays;
-import java.util.Collection;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.mdsal.dom.api.DOMNotificationListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-@Deprecated
-public class DOMNotificationServiceAdapter extends ForwardingObject
-        implements org.opendaylight.mdsal.dom.api.DOMNotificationService {
-
-    private final DOMNotificationService delegate;
-
-    public DOMNotificationServiceAdapter(final DOMNotificationService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final Collection<SchemaPath> types) {
-        // Controller events are sub-interfaces of MD-SAL events, hence direct routing is okay
-        final ListenerRegistration<?> reg = delegate().registerNotificationListener(listener::onNotification, types);
-
-        return new AbstractListenerRegistration<T>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-
-            }
-        };
-    }
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final SchemaPath... types) {
-        return registerNotificationListener(listener, Arrays.asList(types));
-    }
-
-    @Override
-    protected DOMNotificationService delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMRpcServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMRpcServiceAdapter.java
deleted file mode 100644 (file)
index 06cf6bc..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.Collection;
-import java.util.Set;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-@Deprecated
-public class DOMRpcServiceAdapter extends ForwardingObject implements org.opendaylight.mdsal.dom.api.DOMRpcService {
-    private final DOMRpcService delegate;
-
-    public DOMRpcServiceAdapter(final DOMRpcService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public FluentFuture<DOMRpcResult> invokeRpc(final SchemaPath type, final NormalizedNode<?, ?> input) {
-        return new MdsalDOMRpcResultFutureAdapter(delegate().invokeRpc(type, input));
-    }
-
-    @Override
-    public <T extends org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener> ListenerRegistration<T>
-            registerRpcListener(final T listener) {
-        final ListenerRegistration<?> reg = delegate().registerRpcListener(new DOMRpcAvailabilityListener() {
-            @Override
-            public void onRpcAvailable(final Collection<DOMRpcIdentifier> rpcs) {
-                listener.onRpcAvailable(convert(rpcs));
-            }
-
-            @Override
-            public void onRpcUnavailable(final Collection<DOMRpcIdentifier> rpcs) {
-                listener.onRpcUnavailable(convert(rpcs));
-            }
-        });
-
-        return new AbstractListenerRegistration<T>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-
-    @Override
-    protected DOMRpcService delegate() {
-        return delegate;
-    }
-
-    public static Set<org.opendaylight.mdsal.dom.api.DOMRpcIdentifier> convert(
-            final Collection<DOMRpcIdentifier> rpcs) {
-        return rpcs.stream().map(DOMRpcIdentifier::toMdsal).collect(Collectors.toSet());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreAdapter.java
deleted file mode 100644 (file)
index f689001..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import com.google.common.collect.ForwardingObject;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public abstract class DOMStoreAdapter<T extends org.opendaylight.mdsal.dom.spi.store.DOMStore
-        & org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher> extends ForwardingObject
-        implements DOMStore, DOMStoreTreeChangePublisher {
-    @Override
-    protected abstract T delegate();
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        return new DOMStoreReadTransactionAdapter<>(delegate().newReadOnlyTransaction());
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        return new DOMStoreReadWriteTransactionAdapter(delegate().newReadWriteTransaction());
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        return new DOMStoreWriteTransactionAdapter(delegate().newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMStoreTransactionChain createTransactionChain() {
-        return new DOMStoreTransactionChainAdapter(delegate().createTransactionChain());
-    }
-
-    @Override
-    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener) {
-        final ListenerRegistration<?> reg = delegate().registerTreeChangeListener(treeId, listener::onDataTreeChanged);
-        return new AbstractListenerRegistration<L>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadTransactionAdapter.java
deleted file mode 100644 (file)
index 0f9b9c3..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMStoreReadTransactionAdapter<T extends org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction>
-        extends ForwardingObject implements DOMStoreReadTransaction {
-    private final T delegate;
-
-    public DOMStoreReadTransactionAdapter(final T delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected T delegate() {
-        return delegate;
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-
-    @Override
-    public CheckedFuture<com.google.common.base.Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
-            final YangInstanceIdentifier path) {
-        return MappingCheckedFuture.create(delegate.read(path).transform(
-            Optional::fromJavaUtil, MoreExecutors.directExecutor()), ReadFailedExceptionAdapter.INSTANCE);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
-        return MappingCheckedFuture.create(delegate.exists(path), ReadFailedExceptionAdapter.INSTANCE);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadWriteTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreReadWriteTransactionAdapter.java
deleted file mode 100644 (file)
index 0a6ee96..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMStoreReadWriteTransactionAdapter
-        extends DOMStoreReadTransactionAdapter<org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction>
-        implements DOMStoreReadWriteTransaction {
-    public DOMStoreReadWriteTransactionAdapter(
-            final org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction delegate) {
-        super(delegate);
-    }
-
-    @Override
-    public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        delegate().write(path, data);
-    }
-
-    @Override
-    public DOMStoreThreePhaseCommitCohort ready() {
-        return new DOMStoreThreePhaseCommitCohortAdapter(delegate().ready());
-    }
-
-    @Override
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        delegate().merge(path, data);
-    }
-
-    @Override
-    public void delete(final YangInstanceIdentifier path) {
-        delegate().delete(path);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreThreePhaseCommitCohortAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreThreePhaseCommitCohortAdapter.java
deleted file mode 100644 (file)
index 2a59b33..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-
-public class DOMStoreThreePhaseCommitCohortAdapter extends ForwardingObject implements DOMStoreThreePhaseCommitCohort {
-    private final org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort delegate;
-
-    public DOMStoreThreePhaseCommitCohortAdapter(
-            final org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort delegate() {
-        return delegate;
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegate.preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return delegate.commit();
-    }
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return delegate.canCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate.abort();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreTransactionChainAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreTransactionChainAdapter.java
deleted file mode 100644 (file)
index 84ff08a..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-
-public class DOMStoreTransactionChainAdapter extends ForwardingObject implements DOMStoreTransactionChain {
-    private final org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain delegate;
-
-    public DOMStoreTransactionChainAdapter(
-            final org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain delegate() {
-        return delegate;
-    }
-
-    @Override
-    public DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        return new DOMStoreWriteTransactionAdapter(delegate.newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        return new DOMStoreReadWriteTransactionAdapter(delegate.newReadWriteTransaction());
-    }
-
-    @Override
-    public DOMStoreReadTransaction newReadOnlyTransaction() {
-        return new DOMStoreReadTransactionAdapter<>(delegate.newReadOnlyTransaction());
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreWriteTransactionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMStoreWriteTransactionAdapter.java
deleted file mode 100644 (file)
index e51febe..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public class DOMStoreWriteTransactionAdapter extends ForwardingObject implements DOMStoreWriteTransaction {
-    private final org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction delegate;
-
-    public DOMStoreWriteTransactionAdapter(
-            final org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction delegate() {
-        return delegate;
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-
-    @Override
-    public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        delegate.write(path, data);
-    }
-
-    @Override
-    public DOMStoreThreePhaseCommitCohort ready() {
-        return new DOMStoreThreePhaseCommitCohortAdapter(delegate.ready());
-    }
-
-    @Override
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        delegate.merge(path, data);
-    }
-
-    @Override
-    public void delete(final YangInstanceIdentifier path) {
-        delegate.delete(path);
-    }
-}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMTransactionChainAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/DOMTransactionChainAdapter.java
deleted file mode 100644 (file)
index 685986c..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.collect.ForwardingObject;
-import java.util.function.Function;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-
-@Deprecated
-public class DOMTransactionChainAdapter extends ForwardingObject
-        implements org.opendaylight.mdsal.dom.api.DOMTransactionChain, TransactionChainListener {
-    private final Cache<AsyncTransaction<?, ?>, DOMDataTreeTransaction> transactions = CacheBuilder.newBuilder()
-            .weakKeys().weakValues().build();
-
-    private final DOMTransactionChainListener listener;
-    private final DOMTransactionChain delegate;
-
-    public DOMTransactionChainAdapter(final DOMTransactionChainListener listener,
-            final Function<TransactionChainListener, DOMTransactionChain> function) {
-        this.listener = requireNonNull(listener);
-        this.delegate = function.apply(this);
-    }
-
-    @Override
-    public void close() {
-        delegate().close();
-        transactions.invalidateAll();
-    }
-
-    @Override
-    public DOMDataTreeReadTransaction newReadOnlyTransaction() {
-        final DOMDataReadOnlyTransaction tx = delegate.newReadOnlyTransaction();
-        return track(tx, new DOMDataTreeReadTransactionAdapter(tx) {
-            @Override
-            public void close() {
-                untrack(delegate());
-                super.close();
-            }
-        });
-    }
-
-    @Override
-    public DOMDataTreeWriteTransaction newWriteOnlyTransaction() {
-        final DOMDataWriteTransaction tx = delegate.newWriteOnlyTransaction();
-        return track(tx, new DOMDataTreeWriteTransactionAdapter(tx) {
-            @Override
-            public boolean cancel() {
-                untrack(delegate());
-                return super.cancel();
-            }
-        });
-    }
-
-    @Override
-    public DOMDataTreeReadWriteTransaction newReadWriteTransaction() {
-        final DOMDataReadWriteTransaction tx = delegate.newReadWriteTransaction();
-        return track(tx, new DOMDataTreeReadWriteTransactionAdapter(tx) {
-            @Override
-            public boolean cancel() {
-                untrack(delegate());
-                return super.cancel();
-            }
-
-            @Override
-            public void close() {
-                untrack(delegate());
-                super.close();
-            }
-        });
-    }
-
-    @Override
-    public void onTransactionChainFailed(final TransactionChain<?, ?> chain, final AsyncTransaction<?, ?> transaction,
-            final Throwable cause) {
-        listener.onTransactionChainFailed(this, null, cause);
-    }
-
-    @Override
-    public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
-        listener.onTransactionChainSuccessful(this);
-    }
-
-    @Override
-    protected DOMTransactionChain delegate() {
-        return delegate;
-    }
-
-    private <T extends DOMDataTreeTransaction> T track(final AsyncTransaction<?, ?> controllerTx, final T mdsalTx) {
-        transactions.put(controllerTx, mdsalTx);
-        return mdsalTx;
-    }
-
-    void untrack(final AsyncTransaction<?, ?> controllerTx) {
-        transactions.invalidate(controllerTx);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMActionServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMActionServiceAdapter.java
deleted file mode 100644 (file)
index 8331bad..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionService;
-import org.opendaylight.mdsal.dom.api.DOMActionResult;
-import org.opendaylight.mdsal.dom.api.DOMActionServiceExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-@Deprecated
-public class LegacyDOMActionServiceAdapter extends ForwardingObject implements DOMActionService {
-    private final org.opendaylight.mdsal.dom.api.DOMActionService delegate;
-
-    public LegacyDOMActionServiceAdapter(final org.opendaylight.mdsal.dom.api.DOMActionService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.api.DOMActionService delegate() {
-        return delegate;
-    }
-
-    @Override
-    public FluentFuture<? extends DOMActionResult> invokeAction(final SchemaPath type, final DOMDataTreeIdentifier path,
-            final ContainerNode input) {
-        return delegate.invokeAction(type, path, input);
-    }
-
-    @Override
-    public ClassToInstanceMap<DOMActionServiceExtension> getExtensions() {
-        return delegate.getExtensions();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapter.java
deleted file mode 100644 (file)
index ab92b00..0000000
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap.Builder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.function.Supplier;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.MappingCheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.DataStoreUnavailableException;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainClosedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainClosedException;
-import org.opendaylight.mdsal.dom.api.DOMTransactionChainListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-
-/**
- * Adapter between the legacy controller API-based DOMDataBroker and the mdsal API-based DOMDataBroker.
- *
- * @author Thomas Pantelis
- */
-public class LegacyDOMDataBrokerAdapter extends ForwardingObject implements DOMDataBroker {
-    private static final ExceptionMapper<TransactionCommitFailedException> COMMIT_EX_MAPPER =
-            new ExceptionMapper<TransactionCommitFailedException>("commit", TransactionCommitFailedException.class) {
-        @Override
-        protected TransactionCommitFailedException newWithCause(final String message, final Throwable cause) {
-            if (cause instanceof org.opendaylight.mdsal.common.api.OptimisticLockFailedException) {
-                return new OptimisticLockFailedException(cause.getMessage(), cause.getCause());
-            } else if (cause instanceof org.opendaylight.mdsal.common.api.TransactionCommitFailedException) {
-                Throwable rootCause = cause.getCause();
-                if (rootCause instanceof org.opendaylight.mdsal.common.api.DataStoreUnavailableException) {
-                    rootCause = new DataStoreUnavailableException(rootCause.getMessage(), rootCause.getCause());
-                }
-
-                return new TransactionCommitFailedException(cause.getMessage(), rootCause);
-            }
-
-            return new TransactionCommitFailedException(message, cause);
-        }
-    };
-
-    private final org.opendaylight.mdsal.dom.api.DOMDataBroker delegate;
-    private final ClassToInstanceMap<DOMDataBrokerExtension> extensions;
-
-    public LegacyDOMDataBrokerAdapter(final org.opendaylight.mdsal.dom.api.DOMDataBroker delegate) {
-        this.delegate = delegate;
-
-        ClassToInstanceMap<org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension> delegateExtensions =
-                delegate.getExtensions();
-
-        Builder<DOMDataBrokerExtension> extBuilder = ImmutableClassToInstanceMap.builder();
-        final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService delegateTreeChangeService =
-                (org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService) delegateExtensions.get(
-                        org.opendaylight.mdsal.dom.api.DOMDataTreeChangeService.class);
-        if (delegateTreeChangeService != null) {
-            extBuilder.put(DOMDataTreeChangeService.class, new DOMDataTreeChangeService() {
-                @Override
-                public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(
-                        final DOMDataTreeIdentifier treeId, final L listener) {
-                    final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegateListener;
-                    if (listener instanceof ClusteredDOMDataTreeChangeListener) {
-                        delegateListener = new ClusteredProxyListener(listener);
-                    } else {
-                        delegateListener = new ProxyListener(listener);
-                    }
-
-                    final ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> reg =
-                        delegateTreeChangeService.registerDataTreeChangeListener(treeId.toMdsal(), delegateListener);
-
-                    return new ListenerRegistration<L>() {
-                        @Override
-                        public L getInstance() {
-                            return listener;
-                        }
-
-                        @Override
-                        public void close() {
-                            reg.close();
-                        }
-                    };
-                }
-            });
-        }
-
-        final org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry delegateCohortRegistry =
-                (org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry) delegateExtensions.get(
-                        org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry.class);
-        if (delegateCohortRegistry != null) {
-            extBuilder.put(DOMDataTreeCommitCohortRegistry.class, delegateCohortRegistry::registerCommitCohort);
-        }
-
-        extensions = extBuilder.build();
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.api.DOMDataBroker delegate() {
-        return delegate;
-    }
-
-    @Override
-    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-        return extensions;
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        return new DOMDataReadOnlyTransactionAdapter(delegate().newReadOnlyTransaction());
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return new DOMDataTransactionAdapter(delegate().newReadWriteTransaction());
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return new DOMDataTransactionAdapter(delegate().newWriteOnlyTransaction());
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        AtomicReference<DOMTransactionChain> legacyChain = new AtomicReference<>();
-        DOMTransactionChainListener delegateListener =
-                new DOMTransactionChainListener() {
-            @Override
-            @SuppressWarnings("rawtypes")
-            public void onTransactionChainFailed(final org.opendaylight.mdsal.dom.api.DOMTransactionChain chain,
-                    final DOMDataTreeTransaction transaction, final Throwable cause) {
-                listener.onTransactionChainFailed(legacyChain.get(),
-                    (AsyncTransaction) () -> transaction.getIdentifier(),
-                        cause instanceof Exception ? COMMIT_EX_MAPPER.apply((Exception)cause) : cause);
-            }
-
-            @Override
-            public void onTransactionChainSuccessful(final org.opendaylight.mdsal.dom.api.DOMTransactionChain chain) {
-                listener.onTransactionChainSuccessful(legacyChain.get());
-            }
-        };
-
-        final org.opendaylight.mdsal.dom.api.DOMTransactionChain delegateChain =
-                delegate().createTransactionChain(delegateListener);
-        legacyChain.set(new DOMTransactionChain() {
-            @Override
-            public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-                return new DOMDataReadOnlyTransactionAdapter(wrapException(delegateChain::newReadOnlyTransaction));
-            }
-
-            @Override
-            public DOMDataReadWriteTransaction newReadWriteTransaction() {
-                return new DOMDataTransactionAdapter(wrapException(delegateChain::newReadWriteTransaction));
-            }
-
-            @Override
-            public DOMDataWriteTransaction newWriteOnlyTransaction() {
-                return new DOMDataTransactionAdapter(wrapException(delegateChain::newWriteOnlyTransaction));
-            }
-
-            @Override
-            public void close() {
-                delegateChain.close();
-            }
-        });
-
-        return legacyChain.get();
-    }
-
-    static <T> T wrapException(final Supplier<T> supplier) {
-        try {
-            return supplier.get();
-        } catch (DOMTransactionChainClosedException e) {
-            throw new TransactionChainClosedException("Transaction chain already closed", e);
-        }
-    }
-
-    private static class DOMDataTransactionAdapter implements DOMDataReadWriteTransaction {
-        private final DOMDataTreeReadTransaction readDelegate;
-        private final DOMDataTreeWriteTransaction writeDelegate;
-        private final Object identifier;
-
-        DOMDataTransactionAdapter(final @NonNull DOMDataTreeReadTransaction readDelegate) {
-            this.readDelegate = requireNonNull(readDelegate);
-            this.identifier = readDelegate.getIdentifier();
-            this.writeDelegate = null;
-        }
-
-        DOMDataTransactionAdapter(final @NonNull DOMDataTreeWriteTransaction writeDelegate) {
-            this.writeDelegate = requireNonNull(writeDelegate);
-            this.identifier = writeDelegate.getIdentifier();
-            this.readDelegate = null;
-        }
-
-        DOMDataTransactionAdapter(final @NonNull DOMDataTreeReadWriteTransaction rwDelegate) {
-            this.readDelegate = requireNonNull(rwDelegate);
-            this.writeDelegate = rwDelegate;
-            this.identifier = readDelegate.getIdentifier();
-        }
-
-        DOMDataTreeReadTransaction readDelegate() {
-            return readDelegate;
-        }
-
-        DOMDataTreeWriteTransaction writeDelegate() {
-            return writeDelegate;
-        }
-
-        @Override
-        public Object getIdentifier() {
-            return identifier;
-        }
-
-        @Override
-        public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-                final YangInstanceIdentifier path) {
-            return MappingCheckedFuture.create(readDelegate().read(store.toMdsal(), path).transform(
-                Optional::fromJavaUtil, MoreExecutors.directExecutor()), ReadFailedExceptionAdapter.INSTANCE);
-        }
-
-        @Override
-        public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                final YangInstanceIdentifier path) {
-            return MappingCheckedFuture.create(readDelegate().exists(store.toMdsal(), path),
-                    ReadFailedExceptionAdapter.INSTANCE);
-        }
-
-        @Override
-        public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-            writeDelegate().delete(store.toMdsal(), path);
-        }
-
-        @Override
-        public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                final NormalizedNode<?, ?> data) {
-            writeDelegate().put(store.toMdsal(), path, data);
-        }
-
-        @Override
-        public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-                final NormalizedNode<?, ?> data) {
-            writeDelegate().merge(store.toMdsal(), path, data);
-        }
-
-        @Override
-        public boolean cancel() {
-            return writeDelegate().cancel();
-        }
-
-        @Override
-        public FluentFuture<? extends CommitInfo> commit() {
-            final SettableFuture<CommitInfo> resultFuture = SettableFuture.create();
-            writeDelegate().commit().addCallback(new FutureCallback<CommitInfo>() {
-                @Override
-                public void onSuccess(final CommitInfo result) {
-                    resultFuture.set(result);
-                }
-
-                @Override
-                public void onFailure(final Throwable ex) {
-                    if (ex instanceof Exception) {
-                        resultFuture.setException(COMMIT_EX_MAPPER.apply((Exception)ex));
-                    } else {
-                        resultFuture.setException(ex);
-                    }
-                }
-            }, MoreExecutors.directExecutor());
-
-            return resultFuture;
-        }
-    }
-
-    private static class DOMDataReadOnlyTransactionAdapter implements DOMDataReadOnlyTransaction {
-        private final DOMDataTransactionAdapter adapter;
-
-        DOMDataReadOnlyTransactionAdapter(final DOMDataTreeReadTransaction delegateTx) {
-            adapter = new DOMDataTransactionAdapter(delegateTx);
-        }
-
-        @Override
-        public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-                final YangInstanceIdentifier path) {
-            return adapter.read(store, path);
-        }
-
-        @Override
-        public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-                final YangInstanceIdentifier path) {
-            return adapter.exists(store, path);
-        }
-
-        @Override
-        public Object getIdentifier() {
-            return adapter.getIdentifier();
-        }
-
-        @Override
-        public void close() {
-            adapter.readDelegate().close();
-        }
-    }
-
-    private static class ProxyListener extends ForwardingObject
-            implements org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener {
-        private final DOMDataTreeChangeListener delegate;
-
-        ProxyListener(final DOMDataTreeChangeListener delegate) {
-            this.delegate = requireNonNull(delegate);
-        }
-
-        @Override
-        public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
-            delegate.onDataTreeChanged(changes);
-        }
-
-        @Override
-        protected DOMDataTreeChangeListener delegate() {
-            return delegate;
-        }
-    }
-
-    private static final class ClusteredProxyListener extends ProxyListener
-            implements org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener {
-
-        ClusteredProxyListener(DOMDataTreeChangeListener delegate) {
-            super(delegate);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMNotificationServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMNotificationServiceAdapter.java
deleted file mode 100644 (file)
index 293fcb3..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import java.time.Instant;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Date;
-import org.opendaylight.controller.md.sal.dom.api.DOMEvent;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-@Deprecated
-public class LegacyDOMNotificationServiceAdapter extends ForwardingObject implements DOMNotificationService {
-    private final org.opendaylight.mdsal.dom.api.DOMNotificationService delegate;
-
-    public LegacyDOMNotificationServiceAdapter(final org.opendaylight.mdsal.dom.api.DOMNotificationService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final Collection<SchemaPath> types) {
-        final ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMNotificationListener> reg =
-                delegate().registerNotificationListener(notification -> {
-                    if (notification instanceof DOMNotification) {
-                        listener.onNotification((DOMNotification)notification);
-                        return;
-                    }
-
-                    if (notification instanceof org.opendaylight.mdsal.dom.api.DOMEvent) {
-                        listener.onNotification(new DefaultDOMEvent(notification,
-                            (org.opendaylight.mdsal.dom.api.DOMEvent)notification));
-                        return;
-                    }
-
-                    listener.onNotification(new DefaultDOMNotification(notification));
-                }, types);
-
-        return new AbstractListenerRegistration<T>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final SchemaPath... types) {
-        return registerNotificationListener(listener, Arrays.asList(types));
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.api.DOMNotificationService delegate() {
-        return delegate;
-    }
-
-    private static class DefaultDOMNotification implements DOMNotification {
-        private final org.opendaylight.mdsal.dom.api.DOMNotification delegate;
-
-        DefaultDOMNotification(final org.opendaylight.mdsal.dom.api.DOMNotification delegate) {
-            this.delegate = requireNonNull(delegate);
-        }
-
-        @Override
-        public SchemaPath getType() {
-            return delegate.getType();
-        }
-
-        @Override
-        public ContainerNode getBody() {
-            return delegate.getBody();
-        }
-    }
-
-    private static class DefaultDOMEvent extends DefaultDOMNotification implements DOMEvent {
-        private final Date eventTime;
-
-        DefaultDOMEvent(final org.opendaylight.mdsal.dom.api.DOMNotification fromNotification,
-                final org.opendaylight.mdsal.dom.api.DOMEvent fromEvent) {
-            super(fromNotification);
-            final Instant eventInstant = fromEvent.getEventInstant();
-            this.eventTime = eventInstant != null ? Date.from(eventInstant) : null;
-        }
-
-        @Override
-        public Date getEventTime() {
-            return eventTime;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcResultFutureAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcResultFutureAdapter.java
deleted file mode 100644 (file)
index 10a85fe..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationNotAvailableException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DefaultDOMRpcException;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Adapts a {@link org.opendaylight.mdsal.dom.api.DOMRpcResult} CheckedFuture to a {@link DOMRpcResult} CheckedFuture.
- *
- * @author Thomas Pantelis
- */
-public class LegacyDOMRpcResultFutureAdapter extends AbstractDOMRpcResultFutureAdapter<DOMRpcResult,
-        org.opendaylight.mdsal.dom.api.DOMRpcResult, FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult>,
-        DOMRpcException> implements CheckedFuture<DOMRpcResult, DOMRpcException> {
-
-    private static final ExceptionMapper<DOMRpcException> LEGACY_DOM_RPC_EX_MAPPER =
-            new ExceptionMapper<DOMRpcException>("rpc", DOMRpcException.class) {
-        @Override
-        protected DOMRpcException newWithCause(String message, Throwable cause) {
-            return cause instanceof DOMRpcException ? (DOMRpcException)cause
-                : cause instanceof org.opendaylight.mdsal.dom.api.DOMRpcImplementationNotAvailableException
-                    ? new DOMRpcImplementationNotAvailableException(cause.getMessage(), cause.getCause())
-                        : new DefaultDOMRpcException("RPC failed", cause);
-        }
-    };
-
-    public LegacyDOMRpcResultFutureAdapter(FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> delegate) {
-        super(delegate, LEGACY_DOM_RPC_EX_MAPPER);
-    }
-
-    @Override
-    @SuppressFBWarnings("BC_UNCONFIRMED_CAST_OF_RETURN_VALUE")
-    public DOMRpcResult checkedGet() throws DOMRpcException {
-        try {
-            return get();
-        } catch (InterruptedException | ExecutionException e) {
-            throw LEGACY_DOM_RPC_EX_MAPPER.apply(e);
-        }
-    }
-
-    @Override
-    @SuppressFBWarnings("BC_UNCONFIRMED_CAST_OF_RETURN_VALUE")
-    public DOMRpcResult checkedGet(final long timeout, final TimeUnit unit) throws TimeoutException, DOMRpcException {
-        try {
-            return get(timeout, unit);
-        } catch (InterruptedException | ExecutionException e) {
-            throw LEGACY_DOM_RPC_EX_MAPPER.apply(e);
-        }
-    }
-
-    @Override
-    protected DOMRpcResult transform(org.opendaylight.mdsal.dom.api.DOMRpcResult fromResult) {
-        return new DefaultDOMRpcResult(fromResult.getResult(), fromResult.getErrors());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcServiceAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/LegacyDOMRpcServiceAdapter.java
deleted file mode 100644 (file)
index 170d9c9..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-@Deprecated
-public class LegacyDOMRpcServiceAdapter extends ForwardingObject implements DOMRpcService {
-    private final org.opendaylight.mdsal.dom.api.DOMRpcService delegate;
-
-    public LegacyDOMRpcServiceAdapter(final org.opendaylight.mdsal.dom.api.DOMRpcService delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type,
-                                                                  final NormalizedNode<?, ?> input) {
-        final FluentFuture<org.opendaylight.mdsal.dom.api.DOMRpcResult> future = delegate().invokeRpc(type, input);
-        return future instanceof MdsalDOMRpcResultFutureAdapter ? ((MdsalDOMRpcResultFutureAdapter)future).delegate()
-                : new LegacyDOMRpcResultFutureAdapter(future);
-    }
-
-    @Override
-    public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(final T listener) {
-        final ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener> reg =
-            delegate().registerRpcListener(new RpcAvailabilityListenerAdapter<>(listener));
-
-        return new AbstractListenerRegistration<T>(listener) {
-            @Override
-            protected void removeRegistration() {
-                reg.close();
-            }
-        };
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.api.DOMRpcService delegate() {
-        return delegate;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/MdsalDOMRpcResultFutureAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/MdsalDOMRpcResultFutureAdapter.java
deleted file mode 100644 (file)
index acd341b..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.mdsal.dom.api.DOMRpcException;
-import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.mdsal.dom.api.DefaultDOMRpcException;
-import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Adapts a {@link org.opendaylight.controller.md.sal.dom.api.DOMRpcResult} CheckedFuture to a
- * {@link DOMRpcResult} CheckedFuture.
- *
- * @author Thomas Pantelis
- */
-public class MdsalDOMRpcResultFutureAdapter extends AbstractDOMRpcResultFutureAdapter<
-        DOMRpcResult, org.opendaylight.controller.md.sal.dom.api.DOMRpcResult,
-        CheckedFuture<org.opendaylight.controller.md.sal.dom.api.DOMRpcResult,
-            org.opendaylight.controller.md.sal.dom.api.DOMRpcException>, DOMRpcException> {
-    private static final ExceptionMapper<DOMRpcException> MDSAL_DOM_RPC_EX_MAPPER =
-            new ExceptionMapper<DOMRpcException>("rpc", DOMRpcException.class) {
-        @Override
-        protected DOMRpcException newWithCause(String message, Throwable cause) {
-            return cause instanceof DOMRpcException ? (DOMRpcException) cause
-                    : new DefaultDOMRpcException("RPC failed", cause);
-        }
-    };
-
-    public MdsalDOMRpcResultFutureAdapter(CheckedFuture<org.opendaylight.controller.md.sal.dom.api.DOMRpcResult,
-            org.opendaylight.controller.md.sal.dom.api.DOMRpcException> delegate) {
-        super(delegate, MDSAL_DOM_RPC_EX_MAPPER);
-    }
-
-    @Override
-    protected DOMRpcResult transform(org.opendaylight.controller.md.sal.dom.api.DOMRpcResult fromResult) {
-        return new DefaultDOMRpcResult(fromResult.getResult(), fromResult.getErrors());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/ReadFailedExceptionAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/ReadFailedExceptionAdapter.java
deleted file mode 100644 (file)
index a2fe06b..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2018 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
-
-/**
- * Adapter that maps the controller API-based ReadFailedException to the mdsal API-based ReadFailedException.
- *
- * @author Thomas Pantelis
- */
-public final class ReadFailedExceptionAdapter extends ExceptionMapper<ReadFailedException> {
-    public static final ReadFailedExceptionAdapter INSTANCE = new ReadFailedExceptionAdapter();
-
-    private ReadFailedExceptionAdapter() {
-        super("read", ReadFailedException.class);
-    }
-
-    @Override
-    protected ReadFailedException newWithCause(String message, Throwable cause) {
-        if (cause instanceof org.opendaylight.mdsal.common.api.ReadFailedException) {
-            return new ReadFailedException(cause.getMessage(), cause.getCause());
-        }
-
-        return new ReadFailedException(message, cause);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/RpcAvailabilityListenerAdapter.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/RpcAvailabilityListenerAdapter.java
deleted file mode 100644 (file)
index 717eda2..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Collection;
-import java.util.stream.Collectors;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-
-@Deprecated
-public class RpcAvailabilityListenerAdapter<T extends DOMRpcAvailabilityListener> extends ForwardingObject
-        implements org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener {
-    private final @NonNull T delegate;
-
-    public RpcAvailabilityListenerAdapter(final T delegate) {
-        this.delegate = requireNonNull(delegate);
-    }
-
-    @Override
-    public void onRpcAvailable(final Collection<org.opendaylight.mdsal.dom.api.DOMRpcIdentifier> rpcs) {
-        delegate.onRpcAvailable(convert(rpcs));
-    }
-
-    @Override
-    public void onRpcUnavailable(final Collection<org.opendaylight.mdsal.dom.api.DOMRpcIdentifier> rpcs) {
-        delegate.onRpcUnavailable(convert(rpcs));
-    }
-
-    @Override
-    protected T delegate() {
-        return delegate;
-    }
-
-    private static @NonNull Collection<DOMRpcIdentifier> convert(
-            final Collection<org.opendaylight.mdsal.dom.api.DOMRpcIdentifier> from) {
-        return from.stream().map(DOMRpcIdentifier::fromMdsal).collect(Collectors.toList());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/TransactionUtils.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/TransactionUtils.java
deleted file mode 100644 (file)
index 50e697a..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.fromMdsal;
-
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Optional;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-final class TransactionUtils {
-    private TransactionUtils() {
-
-    }
-
-    static FluentFuture<Boolean> exists(final DOMDataReadTransaction tx,
-            final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return FluentFuture.from(tx.exists(fromMdsal(store), path));
-    }
-
-    static FluentFuture<Optional<NormalizedNode<?, ?>>> read(final DOMDataReadTransaction tx,
-            final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        return FluentFuture.from(tx.read(fromMdsal(store), path)).transform(opt -> opt.toJavaUtil(),
-            MoreExecutors.directExecutor());
-    }
-
-    static void delete(final DOMDataWriteTransaction tx, final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        tx.delete(fromMdsal(store), path);
-    }
-
-    static void merge(final DOMDataWriteTransaction tx, final LogicalDatastoreType store,
-            final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        tx.merge(fromMdsal(store), path, data);
-    }
-
-    static void put(final DOMDataWriteTransaction tx, final LogicalDatastoreType store,
-            final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        tx.put(fromMdsal(store), path, data);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/package-info.java b/opendaylight/md-sal/sal-dom-compat/src/main/java/org/opendaylight/controller/sal/core/compat/package-info.java
deleted file mode 100644 (file)
index d6021f7..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-/**
- * Controller/MDSAL compatibility library. Contains classes useful for bridging Controller's sal-core-{api,spi}
- * to MD-SAL's mdsal-dom-{api,spi}.
- */
-package org.opendaylight.controller.sal.core.compat;
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapterTest.java b/opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/DOMMountPointAdapterTest.java
deleted file mode 100644 (file)
index 4c57f1c..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2018 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import java.util.Optional;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.mockito.Mock;
-import org.mockito.junit.MockitoJUnitRunner;
-import org.opendaylight.controller.md.sal.dom.api.DOMActionService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.mdsal.dom.api.DOMMountPoint;
-
-@Deprecated
-@RunWith(MockitoJUnitRunner.StrictStubs.class)
-public class DOMMountPointAdapterTest {
-    @Mock
-    private DOMMountPoint delegate;
-
-    private DOMMountPointAdapter adapter;
-
-    @Before
-    public void before() {
-        doReturn(Optional.empty()).when(delegate).getService(any());
-        adapter = new DOMMountPointAdapter(delegate);
-    }
-
-    @Test
-    public void testDOMActionService() {
-        assertFalse(adapter.getService(DOMActionService.class).isPresent());
-
-        org.opendaylight.mdsal.dom.api.DOMActionService mdsal =
-                mock(org.opendaylight.mdsal.dom.api.DOMActionService.class);
-
-        doReturn(Optional.of(mdsal)).when(delegate).getService(org.opendaylight.mdsal.dom.api.DOMActionService.class);
-        assertTrue(adapter.getService(DOMActionService.class).isPresent());
-    }
-
-    @Test
-    public void testDOMDataBroker() {
-        assertFalse(adapter.getService(DOMDataBroker.class).isPresent());
-
-        org.opendaylight.mdsal.dom.api.DOMDataBroker mdsal = mock(org.opendaylight.mdsal.dom.api.DOMDataBroker.class);
-        doReturn(ImmutableClassToInstanceMap.of()).when(mdsal).getExtensions();
-
-        doReturn(Optional.of(mdsal)).when(delegate).getService(org.opendaylight.mdsal.dom.api.DOMDataBroker.class);
-        assertTrue(adapter.getService(DOMDataBroker.class).isPresent());
-    }
-
-    @Test
-    public void testDOMNotificationService() {
-        assertFalse(adapter.getService(DOMNotificationService.class).isPresent());
-
-        org.opendaylight.mdsal.dom.api.DOMNotificationService mdsal =
-                mock(org.opendaylight.mdsal.dom.api.DOMNotificationService.class);
-
-        doReturn(Optional.of(mdsal)).when(delegate).getService(
-            org.opendaylight.mdsal.dom.api.DOMNotificationService.class);
-        assertTrue(adapter.getService(DOMNotificationService.class).isPresent());
-    }
-
-    @Test
-    public void testDOMRpcService() {
-        assertFalse(adapter.getService(DOMRpcService.class).isPresent());
-
-        org.opendaylight.mdsal.dom.api.DOMRpcService mdsal = mock(org.opendaylight.mdsal.dom.api.DOMRpcService.class);
-
-        doReturn(Optional.of(mdsal)).when(delegate).getService(org.opendaylight.mdsal.dom.api.DOMRpcService.class);
-        assertTrue(adapter.getService(DOMRpcService.class).isPresent());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapterTest.java b/opendaylight/md-sal/sal-dom-compat/src/test/java/org/opendaylight/controller/sal/core/compat/LegacyDOMDataBrokerAdapterTest.java
deleted file mode 100644 (file)
index ce375ed..0000000
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Copyright (c) 2017 Inocybe Technologies and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.compat;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyObject;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Optional;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.mockito.InOrder;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.DataStoreUnavailableException;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-import org.opendaylight.mdsal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.broker.SerializedDOMDataBroker;
-import org.opendaylight.mdsal.dom.spi.store.DOMStore;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTransactionChain;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreTreeChangePublisher;
-import org.opendaylight.mdsal.dom.spi.store.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataValidationFailedException;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-
-/**
- * Unit tests for LegacyDOMDataBrokerAdapter.
- *
- * @author Thomas Pantelis
- */
-public class LegacyDOMDataBrokerAdapterTest {
-    public static final QName TEST_QNAME = QName.create("test", "2018-07-11", "test");
-    private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
-
-    @Mock
-    private TestDOMStore mockOperStore;
-
-    @Mock
-    private TestDOMStore mockConfigStore;
-
-    @Mock
-    private DOMStoreReadTransaction mockConfigReadTx;
-
-    @Mock
-    private DOMStoreWriteTransaction mockConfigWriteTx;
-
-    @Mock
-    private DOMStoreReadWriteTransaction mockConfigReadWriteTx;
-
-    @Mock
-    private DOMStoreThreePhaseCommitCohort mockConfigCommitCohort;
-
-    @Mock
-    private DOMStoreReadTransaction mockOperReadTx;
-
-    @Mock
-    private DOMStoreWriteTransaction mockOperWriteTx;
-
-    @Mock
-    private DOMStoreReadWriteTransaction mockOperReadWriteTx;
-
-    @Mock
-    private DOMStoreThreePhaseCommitCohort mockOperCommitCohort;
-
-    @Mock
-    private DOMDataTreeCommitCohortRegistry mockCommitCohortRegistry;
-
-    private LegacyDOMDataBrokerAdapter adapter;
-    private NormalizedNode<?,?> dataNode;
-
-    @Before
-    public void setup() {
-        MockitoAnnotations.initMocks(this);
-
-        SerializedDOMDataBroker backendBroker = new SerializedDOMDataBroker(ImmutableMap.of(
-                org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL, mockOperStore,
-                org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION, mockConfigStore),
-                MoreExecutors.newDirectExecutorService()) {
-            @Override
-            public ClassToInstanceMap<DOMDataBrokerExtension> getExtensions() {
-                return ImmutableClassToInstanceMap.<DOMDataBrokerExtension>builder().putAll(super.getExtensions())
-                        .put(DOMDataTreeCommitCohortRegistry.class, mockCommitCohortRegistry).build();
-            }
-        };
-
-        adapter = new LegacyDOMDataBrokerAdapter(backendBroker);
-
-        doReturn(Futures.immediateFuture(Boolean.TRUE)).when(mockConfigCommitCohort).canCommit();
-        doReturn(Futures.immediateFuture(null)).when(mockConfigCommitCohort).preCommit();
-        doReturn(Futures.immediateFuture(null)).when(mockConfigCommitCohort).commit();
-        doReturn(Futures.immediateFuture(null)).when(mockConfigCommitCohort).abort();
-
-        dataNode = ImmutableNodes.containerNode(TEST_QNAME);
-
-        doReturn(mockConfigWriteTx).when(mockConfigStore).newWriteOnlyTransaction();
-        doNothing().when(mockConfigWriteTx).write(TEST_PATH, dataNode);
-        doNothing().when(mockConfigWriteTx).merge(TEST_PATH, dataNode);
-        doNothing().when(mockConfigWriteTx).delete(TEST_PATH);
-        doNothing().when(mockConfigWriteTx).close();
-        doReturn(mockConfigCommitCohort).when(mockConfigWriteTx).ready();
-
-        doReturn(mockConfigReadTx).when(mockConfigStore).newReadOnlyTransaction();
-        doReturn(FluentFutures.immediateFluentFuture(Optional.of(dataNode))).when(mockConfigReadTx).read(TEST_PATH);
-        doReturn(FluentFutures.immediateFluentFuture(Boolean.TRUE)).when(mockConfigReadTx).exists(TEST_PATH);
-
-        doReturn(mockConfigReadWriteTx).when(mockConfigStore).newReadWriteTransaction();
-        doNothing().when(mockConfigReadWriteTx).write(TEST_PATH, dataNode);
-        doReturn(mockConfigCommitCohort).when(mockConfigReadWriteTx).ready();
-        doReturn(FluentFutures.immediateFluentFuture(Optional.of(dataNode)))
-                .when(mockConfigReadWriteTx).read(TEST_PATH);
-
-        DOMStoreTransactionChain mockTxChain = mock(DOMStoreTransactionChain.class);
-        doReturn(mockConfigReadTx).when(mockTxChain).newReadOnlyTransaction();
-        doReturn(mockConfigWriteTx).when(mockTxChain).newWriteOnlyTransaction();
-        doReturn(mockConfigReadWriteTx).when(mockTxChain).newReadWriteTransaction();
-        doReturn(mockTxChain).when(mockConfigStore).createTransactionChain();
-
-        doReturn(mock(DOMStoreTransactionChain.class)).when(mockOperStore).createTransactionChain();
-
-        doReturn(Futures.immediateFuture(Boolean.TRUE)).when(mockOperCommitCohort).canCommit();
-        doReturn(Futures.immediateFuture(null)).when(mockOperCommitCohort).preCommit();
-        doReturn(Futures.immediateFuture(null)).when(mockOperCommitCohort).commit();
-        doReturn(Futures.immediateFuture(null)).when(mockOperCommitCohort).abort();
-
-        doReturn(mockOperReadTx).when(mockOperStore).newReadOnlyTransaction();
-
-        doReturn(mockOperWriteTx).when(mockOperStore).newWriteOnlyTransaction();
-        doReturn(mockOperCommitCohort).when(mockOperWriteTx).ready();
-
-        doReturn(mockOperReadWriteTx).when(mockOperStore).newReadWriteTransaction();
-        doReturn(mockOperCommitCohort).when(mockOperReadWriteTx).ready();
-
-        DOMStoreTransactionChain mockOperTxChain = mock(DOMStoreTransactionChain.class);
-        doReturn(mockOperReadTx).when(mockOperTxChain).newReadOnlyTransaction();
-        doReturn(mockOperWriteTx).when(mockOperTxChain).newWriteOnlyTransaction();
-        doReturn(mockOperReadWriteTx).when(mockOperTxChain).newReadWriteTransaction();
-        doReturn(mockOperTxChain).when(mockOperStore).createTransactionChain();
-    }
-
-    @Test
-    public void testReadOnlyTransaction() throws Exception {
-        DOMDataReadOnlyTransaction tx = adapter.newReadOnlyTransaction();
-
-        // Test successful read
-
-        CheckedFuture<com.google.common.base.Optional<NormalizedNode<?, ?>>, ReadFailedException> readFuture =
-                tx.read(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        com.google.common.base.Optional<NormalizedNode<?, ?>> readOptional = readFuture.get();
-        assertEquals("isPresent", true, readOptional.isPresent());
-        assertEquals("NormalizedNode", dataNode, readOptional.get());
-
-        // Test successful exists
-
-        CheckedFuture<Boolean, ReadFailedException> existsFuture =
-                tx.exists(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        assertEquals("exists", Boolean.TRUE, existsFuture.get());
-
-        // Test failed read
-
-        String errorMsg = "mock read error";
-        Throwable cause = new RuntimeException();
-        doReturn(Futures.immediateFailedCheckedFuture(new org.opendaylight.mdsal.common.api.ReadFailedException(
-                errorMsg, cause))).when(mockConfigReadTx).read(TEST_PATH);
-
-        try {
-            tx.read(LogicalDatastoreType.CONFIGURATION, TEST_PATH).checkedGet();
-            fail("Expected ReadFailedException");
-        } catch (ReadFailedException e) {
-            assertEquals("getMessage", errorMsg, e.getMessage());
-            assertEquals("getCause", cause, e.getCause());
-        }
-
-        // Test close
-        tx.close();
-        verify(mockConfigReadTx).close();
-    }
-
-    @Test
-    public void testWriteOnlyTransaction() throws Exception {
-        // Test successful write operations and submit
-
-        DOMDataWriteTransaction tx = adapter.newWriteOnlyTransaction();
-
-        tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        verify(mockConfigWriteTx).write(TEST_PATH, dataNode);
-
-        tx.merge(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        verify(mockConfigWriteTx).merge(TEST_PATH, dataNode);
-
-        tx.delete(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        verify(mockConfigWriteTx).delete(TEST_PATH);
-
-        tx.commit().get(5, TimeUnit.SECONDS);
-
-        InOrder inOrder = inOrder(mockConfigCommitCohort);
-        inOrder.verify(mockConfigCommitCohort).canCommit();
-        inOrder.verify(mockConfigCommitCohort).preCommit();
-        inOrder.verify(mockConfigCommitCohort).commit();
-
-        // Test cancel
-
-        tx = adapter.newWriteOnlyTransaction();
-        tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        tx.cancel();
-        verify(mockConfigWriteTx).close();
-
-        // Test submit with OptimisticLockFailedException
-
-        String errorMsg = "mock OptimisticLockFailedException";
-        Throwable cause = new ConflictingModificationAppliedException(TEST_PATH, "mock");
-        doReturn(Futures.immediateFailedFuture(new org.opendaylight.mdsal.common.api.OptimisticLockFailedException(
-                errorMsg, cause))).when(mockConfigCommitCohort).canCommit();
-
-        try {
-            tx = adapter.newWriteOnlyTransaction();
-            tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(tx);
-            fail("Expected OptimisticLockFailedException");
-        } catch (OptimisticLockFailedException e) {
-            assertEquals("getMessage", errorMsg, e.getMessage());
-            assertEquals("getCause", cause, e.getCause());
-        }
-
-        // Test submit with TransactionCommitFailedException
-
-        errorMsg = "mock TransactionCommitFailedException";
-        cause = new DataValidationFailedException(TEST_PATH, "mock");
-        doReturn(Futures.immediateFailedFuture(new org.opendaylight.mdsal.common.api.TransactionCommitFailedException(
-                errorMsg, cause))).when(mockConfigCommitCohort).canCommit();
-
-        try {
-            tx = adapter.newWriteOnlyTransaction();
-            tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(tx);
-            fail("Expected TransactionCommitFailedException");
-        } catch (TransactionCommitFailedException e) {
-            assertEquals("getMessage", errorMsg, e.getMessage());
-            assertEquals("getCause", cause, e.getCause());
-        }
-
-        // Test submit with DataStoreUnavailableException
-
-        errorMsg = "mock NoShardLeaderException";
-        cause = new DataStoreUnavailableException("mock", new RuntimeException());
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockConfigCommitCohort).canCommit();
-
-        try {
-            tx = adapter.newWriteOnlyTransaction();
-            tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(tx);
-            fail("Expected TransactionCommitFailedException");
-        } catch (TransactionCommitFailedException e) {
-            assertEquals("getCause type", DataStoreUnavailableException.class, e.getCause().getClass());
-        }
-
-        // Test submit with RuntimeException
-
-        errorMsg = "mock RuntimeException";
-        cause = new RuntimeException(errorMsg);
-        doReturn(Futures.immediateFailedFuture(cause)).when(mockConfigCommitCohort).canCommit();
-
-        try {
-            tx = adapter.newWriteOnlyTransaction();
-            tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(tx);
-            fail("Expected TransactionCommitFailedException");
-        } catch (TransactionCommitFailedException e) {
-            assertEquals("getCause", cause, e.getCause());
-        }
-    }
-
-    @Test
-    public void testReadWriteTransaction() throws Exception {
-        DOMDataReadWriteTransaction tx = adapter.newReadWriteTransaction();
-
-        CheckedFuture<com.google.common.base.Optional<NormalizedNode<?, ?>>, ReadFailedException> readFuture =
-                tx.read(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        com.google.common.base.Optional<NormalizedNode<?, ?>> readOptional = readFuture.get();
-        assertEquals("isPresent", true, readOptional.isPresent());
-        assertEquals("NormalizedNode", dataNode, readOptional.get());
-
-        tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        verify(mockConfigReadWriteTx).write(TEST_PATH, dataNode);
-
-        tx.commit().get(5, TimeUnit.SECONDS);
-
-        InOrder inOrder = inOrder(mockConfigCommitCohort);
-        inOrder.verify(mockConfigCommitCohort).canCommit();
-        inOrder.verify(mockConfigCommitCohort).preCommit();
-        inOrder.verify(mockConfigCommitCohort).commit();
-    }
-
-    @SuppressWarnings("rawtypes")
-    @Test
-    public void testTransactionChain() throws Exception {
-        TransactionChainListener mockListener = mock(TransactionChainListener.class);
-        doNothing().when(mockListener).onTransactionChainSuccessful(anyObject());
-        doNothing().when(mockListener).onTransactionChainFailed(anyObject(), anyObject(), anyObject());
-
-        DOMTransactionChain chain = adapter.createTransactionChain(mockListener);
-
-        // Test read-only tx
-
-        DOMDataReadOnlyTransaction readTx = chain.newReadOnlyTransaction();
-
-        CheckedFuture<com.google.common.base.Optional<NormalizedNode<?, ?>>, ReadFailedException> readFuture =
-                readTx.read(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        com.google.common.base.Optional<NormalizedNode<?, ?>> readOptional = readFuture.get();
-        assertEquals("isPresent", true, readOptional.isPresent());
-        assertEquals("NormalizedNode", dataNode, readOptional.get());
-
-        // Test write-only tx
-
-        DOMDataWriteTransaction writeTx = chain.newWriteOnlyTransaction();
-
-        writeTx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        verify(mockConfigWriteTx).write(TEST_PATH, dataNode);
-        writeTx.commit().get(5, TimeUnit.SECONDS);
-
-        InOrder inOrder = inOrder(mockConfigCommitCohort);
-        inOrder.verify(mockConfigCommitCohort).canCommit();
-        inOrder.verify(mockConfigCommitCohort).preCommit();
-        inOrder.verify(mockConfigCommitCohort).commit();
-
-        // Test read-write tx
-
-        DOMDataReadWriteTransaction readWriteTx = chain.newReadWriteTransaction();
-
-        readFuture = readWriteTx.read(LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        readOptional = readFuture.get();
-        assertEquals("isPresent", true, readOptional.isPresent());
-        assertEquals("NormalizedNode", dataNode, readOptional.get());
-
-        chain.close();
-        verify(mockListener).onTransactionChainSuccessful(chain);
-
-        // Test failed chain
-
-        doReturn(Futures.immediateFailedFuture(new org.opendaylight.mdsal.common.api.TransactionCommitFailedException(
-                "mock", (Throwable)null))).when(mockConfigCommitCohort).canCommit();
-
-        chain = adapter.createTransactionChain(mockListener);
-
-        writeTx = chain.newWriteOnlyTransaction();
-
-        try {
-            writeTx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(writeTx);
-            fail("Expected TransactionCommitFailedException");
-        } catch (TransactionCommitFailedException e) {
-            // expected
-        }
-
-        ArgumentCaptor<AsyncTransaction> failedTx = ArgumentCaptor.forClass(AsyncTransaction.class);
-        verify(mockListener).onTransactionChainFailed(eq(chain), failedTx.capture(),
-                any(TransactionCommitFailedException.class));
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testDataTreeChangeListener() {
-        DOMDataTreeChangeService domDTCLService =
-                (DOMDataTreeChangeService) adapter.getSupportedExtensions().get(DOMDataTreeChangeService.class);
-        assertNotNull("DOMDataTreeChangeService not found", domDTCLService);
-
-        ArgumentCaptor<org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> storeDTCL =
-                ArgumentCaptor.forClass(org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener.class);
-        ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> mockReg =
-                mock(ListenerRegistration.class);
-        doNothing().when(mockReg).close();
-        doAnswer(invocation -> storeDTCL.getValue()).when(mockReg).getInstance();
-        doReturn(mockReg).when(mockConfigStore).registerTreeChangeListener(eq(TEST_PATH),
-                storeDTCL.capture());
-
-        DOMDataTreeChangeListener brokerDTCL = mock(DOMDataTreeChangeListener.class);
-        ListenerRegistration<DOMDataTreeChangeListener> reg = domDTCLService.registerDataTreeChangeListener(
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TEST_PATH), brokerDTCL);
-        assertEquals("getInstance", brokerDTCL, reg.getInstance());
-
-        Collection<DataTreeCandidate> changes = Arrays.asList(mock(DataTreeCandidate.class));
-        storeDTCL.getValue().onDataTreeChanged(changes);
-        verify(brokerDTCL).onDataTreeChanged(changes);
-
-        reg.close();
-        verify(mockReg).close();
-
-        // Test ClusteredDOMDataTreeChangeListener
-
-        ArgumentCaptor<org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> storeClusteredDTCL =
-                ArgumentCaptor.forClass(org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener.class);
-        mockReg = mock(ListenerRegistration.class);
-        doReturn(mockReg).when(mockConfigStore).registerTreeChangeListener(eq(TEST_PATH),
-                storeClusteredDTCL.capture());
-
-        final ClusteredDOMDataTreeChangeListener brokerClusteredDTCL = mock(ClusteredDOMDataTreeChangeListener.class);
-        domDTCLService.registerDataTreeChangeListener(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
-                TEST_PATH), brokerClusteredDTCL);
-
-        assertTrue("Expected ClusteredDOMDataTreeChangeListener: " + storeClusteredDTCL.getValue(),
-                storeClusteredDTCL.getValue()
-                    instanceof org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener);
-        storeClusteredDTCL.getValue().onDataTreeChanged(changes);
-        verify(brokerClusteredDTCL).onDataTreeChanged(changes);
-    }
-
-    @SuppressWarnings("unchecked")
-    @Test
-    public void testDataTreeCommitCohortRegistry() {
-        org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry domCohortRegistry =
-            (org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry)
-                adapter.getSupportedExtensions().get(
-                    org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry.class);
-        assertNotNull("DOMDataTreeCommitCohortRegistry not found", domCohortRegistry);
-
-        DOMDataTreeCommitCohort mockCohort = mock(DOMDataTreeCommitCohort.class);
-        org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier treeId =
-                new org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier(
-                    org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION, TEST_PATH);
-        DOMDataTreeCommitCohortRegistration<DOMDataTreeCommitCohort> mockReg =
-                mock(DOMDataTreeCommitCohortRegistration.class);
-        doReturn(mockReg).when(mockCommitCohortRegistry).registerCommitCohort(treeId, mockCohort);
-
-        DOMDataTreeCommitCohortRegistration<DOMDataTreeCommitCohort> reg = domCohortRegistry.registerCommitCohort(
-                treeId, mockCohort);
-        assertEquals("DOMDataTreeCommitCohortRegistration", mockReg, reg);
-
-        verify(mockCommitCohortRegistry).registerCommitCohort(treeId, mockCohort);
-    }
-
-    @Test
-    @Deprecated
-    public void testSubmit() throws Exception {
-        DOMDataWriteTransaction tx = adapter.newWriteOnlyTransaction();
-
-        tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-        verify(mockConfigWriteTx).write(TEST_PATH, dataNode);
-
-        tx.submit().get(5, TimeUnit.SECONDS);
-
-        InOrder inOrder = inOrder(mockConfigCommitCohort);
-        inOrder.verify(mockConfigCommitCohort).canCommit();
-        inOrder.verify(mockConfigCommitCohort).preCommit();
-        inOrder.verify(mockConfigCommitCohort).commit();
-
-        String errorMsg = "mock OptimisticLockFailedException";
-        Throwable cause = new ConflictingModificationAppliedException(TEST_PATH, "mock");
-        doReturn(Futures.immediateFailedFuture(new org.opendaylight.mdsal.common.api.TransactionCommitFailedException(
-                errorMsg, cause))).when(mockConfigCommitCohort).canCommit();
-
-        try {
-            tx = adapter.newWriteOnlyTransaction();
-            tx.put(LogicalDatastoreType.CONFIGURATION, TEST_PATH, dataNode);
-            commit(tx);
-            fail("Expected TransactionCommitFailedException");
-        } catch (TransactionCommitFailedException e) {
-            assertEquals("getMessage", errorMsg, e.getMessage());
-            assertEquals("getCause", cause, e.getCause());
-        }
-    }
-
-    @SuppressWarnings("checkstyle:AvoidHidingCauseException")
-    private static void commit(DOMDataWriteTransaction tx)
-            throws TransactionCommitFailedException, InterruptedException, TimeoutException {
-        try {
-            tx.commit().get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            assertTrue("Expected TransactionCommitFailedException. Actual: " + e.getCause(),
-                    e.getCause() instanceof TransactionCommitFailedException);
-            throw (TransactionCommitFailedException)e.getCause();
-        }
-    }
-
-    private interface TestDOMStore extends DOMStore, DOMStoreTreeChangePublisher,
-            org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry {
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/pom.xml b/opendaylight/md-sal/sal-dom-spi/pom.xml
deleted file mode 100644 (file)
index 9422de7..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <groupId>org.opendaylight.controller</groupId>
-  <artifactId>sal-core-spi</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-spi</artifactId>
-    </dependency>
-  </dependencies>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/mount/SimpleDOMMountPoint.java
deleted file mode 100644 (file)
index 65aeb89..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.broker.spi.mount;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ClassToInstanceMap;
-import com.google.common.collect.ImmutableClassToInstanceMap;
-import java.util.Map.Entry;
-import java.util.Set;
-import org.opendaylight.controller.md.sal.dom.api.DOMMountPoint;
-import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public final class SimpleDOMMountPoint implements DOMMountPoint {
-
-    private final YangInstanceIdentifier identifier;
-    private final ClassToInstanceMap<DOMService> services;
-    private final SchemaContext schemaContext;
-
-    public static SimpleDOMMountPoint create(final YangInstanceIdentifier identifier,
-            final ClassToInstanceMap<DOMService> services, final SchemaContext ctx) {
-        return new SimpleDOMMountPoint(identifier, services, ctx);
-    }
-
-    private SimpleDOMMountPoint(final YangInstanceIdentifier identifier,
-            final ClassToInstanceMap<DOMService> services, final SchemaContext ctx) {
-        this.identifier = identifier;
-        this.services = ImmutableClassToInstanceMap.copyOf(services);
-        this.schemaContext = ctx;
-    }
-
-    @Override
-    public YangInstanceIdentifier getIdentifier() {
-        return identifier;
-    }
-
-    @Override
-    public SchemaContext getSchemaContext() {
-        return schemaContext;
-    }
-
-    @Override
-    public <T extends DOMService> Optional<T> getService(final Class<T> cls) {
-        return Optional.fromNullable(services.getInstance(cls));
-    }
-
-    public Set<Entry<Class<? extends DOMService>, DOMService>> getAllServices() {
-        return services.entrySet();
-    }
-
-    public ClassToInstanceMap<DOMService> getServices() {
-        return services;
-    }
-
-    @Override
-    public int hashCode() {
-        return identifier.hashCode();
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        return this == obj || obj instanceof DOMMountPoint && identifier.equals(((DOMMountPoint) obj).getIdentifier());
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/rpc/RpcRoutingStrategy.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/broker/spi/rpc/RpcRoutingStrategy.java
deleted file mode 100644 (file)
index 9c3191f..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.broker.spi.rpc;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
-import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
-import org.opendaylight.yangtools.yang.model.api.UnknownSchemaNode;
-
-public abstract class RpcRoutingStrategy implements Identifiable<QName> {
-
-    private static final QName CONTEXT_REFERENCE = QName.create("urn:opendaylight:yang:extension:yang-ext",
-            "2013-07-09", "context-reference").intern();
-    private final QName identifier;
-
-    private RpcRoutingStrategy(final QName identifier) {
-        this.identifier = Preconditions.checkNotNull(identifier);
-    }
-
-    /**
-     * Returns leaf QName in which RPC Route is stored.
-     *
-     * @return leaf QName in which RPC Route is stored
-     * @throws UnsupportedOperationException If RPC is not content routed.
-     *     ({@link #isContextBasedRouted()} returned <code>false</code>)
-     */
-    public abstract QName getLeaf();
-
-    /**
-     * Returns identity QName which represents RPC Routing context.
-     *
-     * @return identity QName which represents RPC Routing context
-     * @throws UnsupportedOperationException If RPC is not content routed.
-     *     ({@link #isContextBasedRouted()} returned <code>false</code>)
-     */
-    public abstract QName getContext();
-
-    @Override
-    public final QName getIdentifier() {
-        return identifier;
-    }
-
-    /**
-     * Returns true if RPC is routed by context.
-     *
-     * @return true if RPC is routed by content.
-     */
-    public abstract boolean isContextBasedRouted();
-
-    public static RpcRoutingStrategy from(final RpcDefinition rpc) {
-        ContainerSchemaNode input = rpc.getInput();
-        if (input != null) {
-            for (DataSchemaNode schemaNode : input.getChildNodes()) {
-                Optional<QName> context = getRoutingContext(schemaNode);
-                if (context.isPresent()) {
-                    return new RoutedRpcStrategy(rpc.getQName(), context.get(), schemaNode.getQName());
-                }
-            }
-        }
-        return new GlobalRpcStrategy(rpc.getQName());
-    }
-
-    public static Optional<QName> getRoutingContext(final DataSchemaNode schemaNode) {
-        for (UnknownSchemaNode extension : schemaNode.getUnknownSchemaNodes()) {
-            if (CONTEXT_REFERENCE.equals(extension.getNodeType())) {
-                return Optional.fromNullable(extension.getQName());
-            }
-        }
-        return Optional.absent();
-    }
-
-    private static final class RoutedRpcStrategy extends RpcRoutingStrategy {
-        private final QName context;
-        private final QName leaf;
-
-        private RoutedRpcStrategy(final QName identifier, final QName ctx, final QName leaf) {
-            super(identifier);
-            this.context = Preconditions.checkNotNull(ctx);
-            this.leaf = Preconditions.checkNotNull(leaf);
-        }
-
-        @Override
-        public QName getContext() {
-            return context;
-        }
-
-        @Override
-        public QName getLeaf() {
-            return leaf;
-        }
-
-        @Override
-        public boolean isContextBasedRouted() {
-            return true;
-        }
-    }
-
-    private static final class GlobalRpcStrategy extends RpcRoutingStrategy {
-
-        GlobalRpcStrategy(final QName identifier) {
-            super(identifier);
-        }
-
-        @Override
-        public boolean isContextBasedRouted() {
-            return false;
-        }
-
-        @Override
-        public QName getContext() {
-            throw new UnsupportedOperationException("Non-routed strategy does not have a context");
-        }
-
-        @Override
-        public QName getLeaf() {
-            throw new UnsupportedOperationException("Non-routed strategy does not have a context");
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMDataTreeChangeListenerRegistration.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMDataTreeChangeListenerRegistration.java
deleted file mode 100644 (file)
index d1d4586..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
-
-/**
- * Abstract implementation of a ListenerRegistration constrained to subclasses
- * of {@link DOMDataTreeChangeListener}.
- *
- * @param <T> type of listener
- */
-public abstract class AbstractDOMDataTreeChangeListenerRegistration<T extends DOMDataTreeChangeListener>
-        extends AbstractListenerRegistration<T> {
-    protected AbstractDOMDataTreeChangeListenerRegistration(final T listener) {
-        super(listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcImplementationRegistration.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcImplementationRegistration.java
deleted file mode 100644 (file)
index 895509c..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
-
-/**
- * Abstract base class for {@link DOMRpcImplementationRegistration} implementations.
- */
-public abstract class AbstractDOMRpcImplementationRegistration<T extends DOMRpcImplementation>
-        extends AbstractObjectRegistration<T> implements DOMRpcImplementationRegistration<T> {
-    protected AbstractDOMRpcImplementationRegistration(final T instance) {
-        super(instance);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcProviderService.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/AbstractDOMRpcProviderService.java
deleted file mode 100644 (file)
index 31a114b..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ImmutableSet;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-
-/**
- * Convenience abstract base class for {@link DOMRpcProviderService} implementations.
- */
-public abstract class AbstractDOMRpcProviderService implements DOMRpcProviderService {
-    @Override
-    public final <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            final T implementation, final DOMRpcIdentifier... types) {
-        return registerRpcImplementation(implementation, ImmutableSet.copyOf(types));
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListener.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListener.java
deleted file mode 100644 (file)
index d16c153..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.annotations.Beta;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-
-/**
- * Listener which is notified when subscriptions changes and
- * provides set of notification types for which currently
- * subscriptions are in place.
- *
- */
-@Beta
-@SuppressFBWarnings(value = "NM_SAME_SIMPLE_NAME_AS_INTERFACE", justification = "Migration")
-public interface DOMNotificationSubscriptionListener
-        extends org.opendaylight.mdsal.dom.spi.DOMNotificationSubscriptionListener {
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListenerRegistry.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DOMNotificationSubscriptionListenerRegistry.java
deleted file mode 100644 (file)
index 77e0d35..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-
-/**
- * Registry of {@link DOMNotificationSubscriptionListener}
- * which listens for changes in notification types.
- *
- */
-@Beta
-public interface DOMNotificationSubscriptionListenerRegistry  {
-
-    <L extends DOMNotificationSubscriptionListener> ListenerRegistration<L> registerSubscriptionListener(L listener);
-
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DefaultDOMRpcResult.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/DefaultDOMRpcResult.java
deleted file mode 100644 (file)
index 925a356..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.Beta;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.io.Serializable;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Objects;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.concepts.Immutable;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility class implementing {@link DefaultDOMRpcResult}.
- */
-@Beta
-public final class DefaultDOMRpcResult implements DOMRpcResult, Immutable, Serializable {
-    private static final long serialVersionUID = 1L;
-
-    // Flagged as "Non-transient non-serializable instance field" - the Collection is Serializable but the RpcError
-    // interface isn't. In lieu of changing the interface, we assume the implementation is Serializable which is
-    // reasonable since the only implementation that is actually used is from the RpcResultBuilder.
-    @SuppressFBWarnings("SE_BAD_FIELD")
-    private final Collection<? extends RpcError> errors;
-
-    // Unfortunately the NormalizedNode interface isn't Serializable but we assume the implementations are.
-    @SuppressFBWarnings("SE_BAD_FIELD")
-    private final NormalizedNode<?, ?> result;
-
-    private static Collection<RpcError> asCollection(final RpcError... errors) {
-        if (errors.length == 0) {
-            return Collections.emptyList();
-        } else {
-            return Arrays.asList(errors);
-        }
-    }
-
-    public DefaultDOMRpcResult(final NormalizedNode<?, ?> result, final RpcError... errors) {
-        this(result, asCollection(errors));
-    }
-
-    public DefaultDOMRpcResult(final RpcError... errors) {
-        this(null, asCollection(errors));
-    }
-
-    public DefaultDOMRpcResult(final NormalizedNode<?, ?> result) {
-        this(result, Collections.emptyList());
-    }
-
-    public DefaultDOMRpcResult(final NormalizedNode<?, ?> result,
-            final @NonNull Collection<? extends RpcError> errors) {
-        this.result = result;
-        this.errors = requireNonNull(errors);
-    }
-
-    public DefaultDOMRpcResult(final @NonNull Collection<RpcError> errors) {
-        this(null, errors);
-    }
-
-    @Override
-    public Collection<? extends RpcError> getErrors() {
-        return errors;
-    }
-
-    @Override
-    public NormalizedNode<?, ?> getResult() {
-        return result;
-    }
-
-    @Override
-    public int hashCode() {
-        int ret = errors.hashCode();
-        if (result != null) {
-            ret = 31 * ret + result.hashCode();
-        }
-        return ret;
-    }
-
-    @Override
-    public boolean equals(final Object obj) {
-        if (this == obj) {
-            return true;
-        }
-        if (!(obj instanceof DefaultDOMRpcResult)) {
-            return false;
-        }
-
-        final DefaultDOMRpcResult other = (DefaultDOMRpcResult) obj;
-        if (!errors.equals(other.errors)) {
-            return false;
-        }
-        return Objects.equals(result, other.result);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataBroker.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataBroker.java
deleted file mode 100644 (file)
index 89b8311..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Map;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBrokerExtension;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-
-/**
- * Utility {@link DOMDataBroker} implementation which forwards all interface
- * method invocation to a delegate instance.
- */
-public abstract class ForwardingDOMDataBroker extends ForwardingObject implements DOMDataBroker {
-    @Override
-    protected abstract @NonNull DOMDataBroker delegate();
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        return delegate().newReadOnlyTransaction();
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return delegate().newReadWriteTransaction();
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return delegate().newWriteOnlyTransaction();
-    }
-
-    @Override
-    public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        return delegate().createTransactionChain(listener);
-    }
-
-    @Override
-    public Map<Class<? extends DOMDataBrokerExtension>, DOMDataBrokerExtension> getSupportedExtensions() {
-        return delegate().getSupportedExtensions();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadOnlyTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadOnlyTransaction.java
deleted file mode 100644 (file)
index 8174ab3..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility {@link DOMDataReadOnlyTransaction} implementation which forwards all interface
- * method invocation to a delegate instance.
- */
-public abstract class ForwardingDOMDataReadOnlyTransaction extends ForwardingObject
-        implements DOMDataReadOnlyTransaction {
-    @Override
-    protected abstract @NonNull DOMDataReadOnlyTransaction delegate();
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return delegate().read(store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return delegate().exists(store, path);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public void close() {
-        delegate().close();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataReadWriteTransaction.java
deleted file mode 100644 (file)
index 69e7cc2..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.base.Optional;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.FluentFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility {@link DOMDataReadWriteTransaction} implementation which forwards all interface
- * method invocation to a delegate instance.
- */
-public abstract class ForwardingDOMDataReadWriteTransaction extends ForwardingObject
-        implements DOMDataReadWriteTransaction {
-    @Override
-    protected abstract @NonNull DOMDataReadWriteTransaction delegate();
-
-    @Override
-    public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return delegate().read(store, path);
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final LogicalDatastoreType store,
-            final YangInstanceIdentifier path) {
-        return delegate().exists(store, path);
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        delegate().put(store, path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        delegate().merge(store, path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate().cancel();
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        delegate().delete(store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate().commit();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMDataWriteTransaction.java
deleted file mode 100644 (file)
index b260937..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.FluentFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility {@link DOMDataWriteTransaction} implementation which forwards all interface
- * method invocation to a delegate instance.
- */
-public abstract class ForwardingDOMDataWriteTransaction extends ForwardingObject implements DOMDataWriteTransaction {
-    @Override
-    protected abstract @NonNull DOMDataWriteTransaction delegate();
-
-    @Override
-    public Object getIdentifier() {
-        return delegate().getIdentifier();
-    }
-
-    @Override
-    public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        delegate().put(store, path, data);
-    }
-
-    @Override
-    public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        delegate().merge(store, path, data);
-    }
-
-    @Override
-    public boolean cancel() {
-        return delegate().cancel();
-    }
-
-    @Override
-    public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        delegate().delete(store, path);
-    }
-
-    @Override
-    public FluentFuture<? extends CommitInfo> commit() {
-        return delegate().commit();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationPublishService.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationPublishService.java
deleted file mode 100644 (file)
index 351e885..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotification;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationPublishService;
-
-/**
- * Utility implementations of {@link DOMNotificationPublishService} which forwards
- * all requests to a delegate instance.
- */
-public abstract class ForwardingDOMNotificationPublishService extends ForwardingObject
-        implements DOMNotificationPublishService {
-    @Override
-    protected abstract DOMNotificationPublishService delegate();
-
-    @Override
-    public ListenableFuture<?> putNotification(final DOMNotification notification) throws InterruptedException {
-        return delegate().putNotification(notification);
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final DOMNotification notification) {
-        return delegate().offerNotification(notification);
-    }
-
-    @Override
-    public ListenableFuture<?> offerNotification(final DOMNotification notification, final long timeout,
-            final TimeUnit unit) throws InterruptedException {
-        return delegate().offerNotification(notification, timeout, unit);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationService.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMNotificationService.java
deleted file mode 100644 (file)
index b40c428..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Collection;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMNotificationService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Utility implementation of a {@link DOMNotificationService} which forwards all requests
- * to a delegate instance.
- */
-public abstract class ForwardingDOMNotificationService extends ForwardingObject implements DOMNotificationService {
-    @Override
-    protected abstract DOMNotificationService delegate();
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final Collection<SchemaPath> types) {
-        return delegate().registerNotificationListener(listener, types);
-    }
-
-    @Override
-    public <T extends DOMNotificationListener> ListenerRegistration<T> registerNotificationListener(final T listener,
-            final SchemaPath... types) {
-        return delegate().registerNotificationListener(listener, types);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcImplementation.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcImplementation.java
deleted file mode 100644 (file)
index a0b2075..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility implementation which implements {@link DOMRpcImplementation} by forwarding it to
- * a backing delegate.
- */
-public abstract class ForwardingDOMRpcImplementation extends ForwardingObject implements DOMRpcImplementation {
-    @Override
-    protected abstract @NonNull DOMRpcImplementation delegate();
-
-    @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final DOMRpcIdentifier type,
-            final NormalizedNode<?, ?> input) {
-        return delegate().invokeRpc(type, input);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcProviderService.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcProviderService.java
deleted file mode 100644 (file)
index 8c1ac53..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Set;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-
-/**
- * Utility class which implements {@link DOMRpcProviderService} by forwarding
- * requests to a backing instance.
- */
-public abstract class ForwardingDOMRpcProviderService extends ForwardingObject implements DOMRpcProviderService {
-    @Override
-    protected abstract @NonNull DOMRpcProviderService delegate();
-
-    @Override
-    public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            final T implementation, final DOMRpcIdentifier... types) {
-        return delegate().registerRpcImplementation(implementation, types);
-    }
-
-    @Override
-    public <T extends DOMRpcImplementation> DOMRpcImplementationRegistration<T> registerRpcImplementation(
-            final T implementation, final Set<DOMRpcIdentifier> types) {
-        return delegate().registerRpcImplementation(implementation, types);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcResult.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcResult.java
deleted file mode 100644 (file)
index d5e04e2..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import java.util.Collection;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * Utility class which implements {@link DOMRpcResult} by forwarding all methods
- * to a backing instance.
- */
-public abstract class ForwardingDOMRpcResult extends ForwardingObject implements DOMRpcResult {
-    @Override
-    protected abstract @NonNull DOMRpcResult delegate();
-
-    @Override
-    public Collection<? extends RpcError> getErrors() {
-        return delegate().getErrors();
-    }
-
-    @Override
-    public NormalizedNode<?, ?> getResult() {
-        return delegate().getResult();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcService.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMRpcService.java
deleted file mode 100644 (file)
index 887ae8a..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Utility {@link DOMRpcService} which forwards all requests to a backing delegate instance.
- */
-public abstract class ForwardingDOMRpcService extends ForwardingObject implements DOMRpcService {
-    @Override
-    protected abstract @NonNull DOMRpcService delegate();
-
-    @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final SchemaPath type,
-            final NormalizedNode<?, ?> input) {
-        return delegate().invokeRpc(type, input);
-    }
-
-    @Override
-    public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(final T listener) {
-        return delegate().registerRpcListener(listener);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMTransactionChain.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/md/sal/dom/spi/ForwardingDOMTransactionChain.java
deleted file mode 100644 (file)
index 891079a..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.spi;
-
-import com.google.common.collect.ForwardingObject;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
-
-/**
- * Utility {@link DOMTransactionChain} implementation which forwards all interface
- * method invocation to a delegate instance.
- */
-public abstract class ForwardingDOMTransactionChain extends ForwardingObject implements DOMTransactionChain {
-    @Override
-    protected abstract @NonNull DOMTransactionChain delegate();
-
-    @Override
-    public void close() {
-        delegate().close();
-    }
-
-    @Override
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
-        return delegate().newReadOnlyTransaction();
-    }
-
-    @Override
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
-        return delegate().newReadWriteTransaction();
-    }
-
-    @Override
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
-        return delegate().newWriteOnlyTransaction();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTransaction.java
deleted file mode 100644 (file)
index 20166a4..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import static java.util.Objects.requireNonNull;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.MoreObjects;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import org.eclipse.jdt.annotation.NonNull;
-import org.eclipse.jdt.annotation.Nullable;
-
-/**
- * Abstract DOM Store Transaction.
- *
- * <p>
- * Convenience super implementation of DOM Store transaction which provides
- * common implementation of {@link #toString()} and {@link #getIdentifier()}.
- *
- * <p>
- * It can optionally capture the context where it was allocated.
- *
- * @param <T> identifier type
- */
-@Beta
-public abstract class AbstractDOMStoreTransaction<T> implements DOMStoreTransaction {
-    private final Throwable debugContext;
-    private final @NonNull T identifier;
-
-    protected AbstractDOMStoreTransaction(final @NonNull T identifier) {
-        this(identifier, false);
-    }
-
-    protected AbstractDOMStoreTransaction(final @NonNull T identifier, final boolean debug) {
-        this.identifier = requireNonNull(identifier, "Identifier must not be null.");
-        this.debugContext = debug ? new Throwable().fillInStackTrace() : null;
-    }
-
-    @Override
-    public final T getIdentifier() {
-        return identifier;
-    }
-
-    /**
-     * Return the context in which this transaction was allocated.
-     *
-     * @return The context in which this transaction was allocated, or null if the context was not recorded.
-     */
-    public final @Nullable Throwable getDebugContext() {
-        return debugContext;
-    }
-
-    @Override
-    public final String toString() {
-        return addToStringAttributes(MoreObjects.toStringHelper(this)).toString();
-    }
-
-    /**
-     * Add class-specific toString attributes.
-     *
-     * @param toStringHelper
-     *            ToStringHelper instance
-     * @return ToStringHelper instance which was passed in
-     */
-    protected ToStringHelper addToStringAttributes(final @NonNull ToStringHelper toStringHelper) {
-        return toStringHelper.add("id", identifier);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTreeChangePublisher.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTreeChangePublisher.java
deleted file mode 100644 (file)
index 0ee2d79..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import java.util.Collection;
-import java.util.List;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
-import org.opendaylight.mdsal.dom.spi.AbstractRegistrationTree;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeNode;
-import org.opendaylight.mdsal.dom.spi.RegistrationTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Abstract base class for {@link DOMStoreTreeChangePublisher} implementations.
- */
-public abstract class AbstractDOMStoreTreeChangePublisher
-        extends AbstractRegistrationTree<AbstractDOMDataTreeChangeListenerRegistration<?>>
-        implements DOMStoreTreeChangePublisher {
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMStoreTreeChangePublisher.class);
-
-    /**
-     * Callback for subclass to notify specified registrations of a candidate at a specified path. This method is
-     * guaranteed to be only called from within {@link #processCandidateTree(DataTreeCandidate)}.
-     *
-     * @param registrations Registrations which are affected by the candidate node
-     * @param path Path of changed candidate node. Guaranteed to match the path specified by the registration
-     * @param node Candidate node
-     */
-    protected abstract void notifyListeners(@NonNull Collection<AbstractDOMDataTreeChangeListenerRegistration<?>>
-            registrations, @NonNull YangInstanceIdentifier path, @NonNull DataTreeCandidateNode node);
-
-    /**
-     * Callback notifying the subclass that the specified registration is being closed and it's user no longer
-     * wishes to receive notifications. This notification is invoked while the
-     * {@link org.opendaylight.yangtools.concepts.ListenerRegistration#close()}
-     * method is executing. Subclasses can use this callback to properly remove any delayed notifications pending
-     * towards the registration.
-     *
-     * @param registration Registration which is being closed
-     */
-    protected abstract void registrationRemoved(@NonNull AbstractDOMDataTreeChangeListenerRegistration<?> registration);
-
-    /**
-     * Process a candidate tree with respect to registered listeners.
-     *
-     * @param candidate candidate three which needs to be processed
-     */
-    protected final void processCandidateTree(final @NonNull DataTreeCandidate candidate) {
-        final DataTreeCandidateNode node = candidate.getRootNode();
-        if (node.getModificationType() == ModificationType.UNMODIFIED) {
-            LOG.debug("Skipping unmodified candidate {}", candidate);
-            return;
-        }
-
-        try (RegistrationTreeSnapshot<AbstractDOMDataTreeChangeListenerRegistration<?>> snapshot = takeSnapshot()) {
-            lookupAndNotify(candidate.getRootPath().getPathArguments(), 0, snapshot.getRootNode(), candidate);
-        }
-    }
-
-    @Override
-    public final <L extends DOMDataTreeChangeListener> AbstractDOMDataTreeChangeListenerRegistration<L>
-            registerTreeChangeListener(final YangInstanceIdentifier treeId, final L listener) {
-        // Take the write lock
-        takeLock();
-        try {
-            final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> node =
-                    findNodeFor(treeId.getPathArguments());
-            final AbstractDOMDataTreeChangeListenerRegistration<L> reg =
-                new AbstractDOMDataTreeChangeListenerRegistration<L>(listener) {
-                    @Override
-                    protected void removeRegistration() {
-                        AbstractDOMStoreTreeChangePublisher.this.removeRegistration(node, this);
-                        registrationRemoved(this);
-                    }
-            };
-
-            addRegistration(node, reg);
-            return reg;
-        } finally {
-            // Always release the lock
-            releaseLock();
-        }
-    }
-
-    private void lookupAndNotify(final List<PathArgument> args, final int offset,
-            final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> node,
-            final DataTreeCandidate candidate) {
-        if (args.size() != offset) {
-            final PathArgument arg = args.get(offset);
-
-            final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> exactChild =
-                    node.getExactChild(arg);
-            if (exactChild != null) {
-                lookupAndNotify(args, offset + 1, exactChild, candidate);
-            }
-
-            for (RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> c :
-                    node.getInexactChildren(arg)) {
-                lookupAndNotify(args, offset + 1, c, candidate);
-            }
-        } else {
-            notifyNode(candidate.getRootPath(), node, candidate.getRootNode());
-        }
-    }
-
-    private void notifyNode(final YangInstanceIdentifier path,
-            final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> regNode,
-            final DataTreeCandidateNode candNode) {
-        if (candNode.getModificationType() == ModificationType.UNMODIFIED) {
-            LOG.debug("Skipping unmodified candidate {}", path);
-            return;
-        }
-
-        final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> regs = regNode.getRegistrations();
-        if (!regs.isEmpty()) {
-            notifyListeners(regs, path, candNode);
-        }
-
-        for (DataTreeCandidateNode candChild : candNode.getChildNodes()) {
-            if (candChild.getModificationType() != ModificationType.UNMODIFIED) {
-                final RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> regChild =
-                        regNode.getExactChild(candChild.getIdentifier());
-                if (regChild != null) {
-                    notifyNode(path.node(candChild.getIdentifier()), regChild, candChild);
-                }
-
-                for (RegistrationTreeNode<AbstractDOMDataTreeChangeListenerRegistration<?>> rc :
-                        regNode.getInexactChildren(candChild.getIdentifier())) {
-                    notifyNode(path.node(candChild.getIdentifier()), rc, candChild);
-                }
-            }
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractSnapshotBackedTransactionChain.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractSnapshotBackedTransactionChain.java
deleted file mode 100644 (file)
index f80c800..0000000
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Preconditions;
-import java.util.AbstractMap.SimpleEntry;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Abstract implementation of the {@link DOMStoreTransactionChain} interface relying on {@link DataTreeSnapshot}
- * supplier and backend commit coordinator.
- *
- * @param <T> transaction identifier type
- */
-@Beta
-public abstract class AbstractSnapshotBackedTransactionChain<T> extends TransactionReadyPrototype<T>
-        implements DOMStoreTransactionChain {
-    private abstract static class State {
-        /**
-         * Allocate a new snapshot.
-         *
-         * @return A new snapshot
-         */
-        protected abstract DataTreeSnapshot getSnapshot(Object transactionId);
-    }
-
-    private static final class Idle extends State {
-        private final AbstractSnapshotBackedTransactionChain<?> chain;
-
-        Idle(final AbstractSnapshotBackedTransactionChain<?> chain) {
-            this.chain = Preconditions.checkNotNull(chain);
-        }
-
-        @Override
-        protected DataTreeSnapshot getSnapshot(Object transactionId) {
-            return chain.takeSnapshot();
-        }
-    }
-
-    /**
-     * We have a transaction out there.
-     */
-    private static final class Allocated extends State {
-        private static final AtomicReferenceFieldUpdater<Allocated, DataTreeSnapshot> SNAPSHOT_UPDATER =
-                AtomicReferenceFieldUpdater.newUpdater(Allocated.class, DataTreeSnapshot.class, "snapshot");
-        private final DOMStoreWriteTransaction transaction;
-        private volatile DataTreeSnapshot snapshot;
-
-        Allocated(final DOMStoreWriteTransaction transaction) {
-            this.transaction = Preconditions.checkNotNull(transaction);
-        }
-
-        public DOMStoreWriteTransaction getTransaction() {
-            return transaction;
-        }
-
-        @Override
-        protected DataTreeSnapshot getSnapshot(Object transactionId) {
-            final DataTreeSnapshot ret = snapshot;
-            Preconditions.checkState(ret != null,
-                    "Could not get snapshot for transaction %s - previous transaction %s is not ready yet",
-                    transactionId, transaction.getIdentifier());
-            return ret;
-        }
-
-        void setSnapshot(final DataTreeSnapshot snapshot) {
-            final boolean success = SNAPSHOT_UPDATER.compareAndSet(this, null, snapshot);
-            Preconditions.checkState(success, "Transaction %s has already been marked as ready",
-                    transaction.getIdentifier());
-        }
-    }
-
-    /**
-     * Chain is logically shut down, no further allocation allowed.
-     */
-    private static final class Shutdown extends State {
-        private final String message;
-
-        Shutdown(final String message) {
-            this.message = Preconditions.checkNotNull(message);
-        }
-
-        @Override
-        protected DataTreeSnapshot getSnapshot(Object transactionId) {
-            throw new IllegalStateException(message);
-        }
-    }
-
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<AbstractSnapshotBackedTransactionChain, State> STATE_UPDATER =
-            AtomicReferenceFieldUpdater.newUpdater(AbstractSnapshotBackedTransactionChain.class, State.class, "state");
-    private static final Logger LOG = LoggerFactory.getLogger(AbstractSnapshotBackedTransactionChain.class);
-    private static final Shutdown CLOSED = new Shutdown("Transaction chain is closed");
-    private static final Shutdown FAILED = new Shutdown("Transaction chain has failed");
-    private final Idle idleState;
-    private volatile State state;
-
-    protected AbstractSnapshotBackedTransactionChain() {
-        idleState = new Idle(this);
-        state = idleState;
-    }
-
-    private Entry<State, DataTreeSnapshot> getSnapshot(T transactionId) {
-        final State localState = state;
-        return new SimpleEntry<>(localState, localState.getSnapshot(transactionId));
-    }
-
-    private boolean recordTransaction(final State expected, final DOMStoreWriteTransaction transaction) {
-        final State localState = new Allocated(transaction);
-        return STATE_UPDATER.compareAndSet(this, expected, localState);
-    }
-
-    @Override
-    public final DOMStoreReadTransaction newReadOnlyTransaction() {
-        return newReadOnlyTransaction(nextTransactionIdentifier());
-    }
-
-    protected DOMStoreReadTransaction newReadOnlyTransaction(T transactionId) {
-        final Entry<State, DataTreeSnapshot> entry = getSnapshot(transactionId);
-        return SnapshotBackedTransactions.newReadTransaction(transactionId, getDebugTransactions(), entry.getValue());
-    }
-
-    @Override
-    public final DOMStoreReadWriteTransaction newReadWriteTransaction() {
-        return newReadWriteTransaction(nextTransactionIdentifier());
-    }
-
-    protected DOMStoreReadWriteTransaction newReadWriteTransaction(T transactionId) {
-        Entry<State, DataTreeSnapshot> entry;
-        DOMStoreReadWriteTransaction ret;
-
-        do {
-            entry = getSnapshot(transactionId);
-            ret = new SnapshotBackedReadWriteTransaction<>(transactionId, getDebugTransactions(), entry.getValue(),
-                    this);
-        } while (!recordTransaction(entry.getKey(), ret));
-
-        return ret;
-    }
-
-    @Override
-    public final DOMStoreWriteTransaction newWriteOnlyTransaction() {
-        return newWriteOnlyTransaction(nextTransactionIdentifier());
-    }
-
-    protected DOMStoreWriteTransaction newWriteOnlyTransaction(T transactionId) {
-        Entry<State, DataTreeSnapshot> entry;
-        DOMStoreWriteTransaction ret;
-
-        do {
-            entry = getSnapshot(transactionId);
-            ret = new SnapshotBackedWriteTransaction<>(transactionId, getDebugTransactions(), entry.getValue(), this);
-        } while (!recordTransaction(entry.getKey(), ret));
-
-        return ret;
-    }
-
-    @Override
-    protected final void transactionAborted(final SnapshotBackedWriteTransaction<T> tx) {
-        final State localState = state;
-        if (localState instanceof Allocated) {
-            final Allocated allocated = (Allocated)localState;
-            if (allocated.getTransaction().equals(tx)) {
-                final boolean success = STATE_UPDATER.compareAndSet(this, localState, idleState);
-                if (!success) {
-                    LOG.warn(
-                        "Transaction {} aborted, but chain {} state already transitioned from {} to {}, very strange",
-                        tx, this, localState, state);
-                }
-            }
-        }
-    }
-
-    @Override
-    protected final DOMStoreThreePhaseCommitCohort transactionReady(
-            final SnapshotBackedWriteTransaction<T> tx,
-            final DataTreeModification tree,
-            final Exception readyError) {
-
-        final State localState = state;
-
-        if (localState instanceof Allocated) {
-            final Allocated allocated = (Allocated)localState;
-            final DOMStoreWriteTransaction transaction = allocated.getTransaction();
-            Preconditions.checkState(tx.equals(transaction), "Mis-ordered ready transaction %s last allocated was %s",
-                    tx, transaction);
-            allocated.setSnapshot(tree);
-        } else {
-            LOG.debug("Ignoring transaction {} readiness due to state {}", tx, localState);
-        }
-
-        return createCohort(tx, tree, readyError);
-    }
-
-    @Override
-    public final void close() {
-        final State localState = state;
-
-        do {
-            Preconditions.checkState(!CLOSED.equals(localState), "Transaction chain %s has been closed", this);
-
-            if (FAILED.equals(localState)) {
-                LOG.debug("Ignoring user close in failed state");
-                return;
-            }
-        } while (!STATE_UPDATER.compareAndSet(this, localState, CLOSED));
-    }
-
-    /**
-     * Notify the base logic that a previously-submitted transaction has been committed successfully.
-     *
-     * @param transaction Transaction which completed successfully.
-     */
-    protected final void onTransactionCommited(final SnapshotBackedWriteTransaction<T> transaction) {
-        // If the committed transaction was the one we allocated last,
-        // we clear it and the ready snapshot, so the next transaction
-        // allocated refers to the data tree directly.
-        final State localState = state;
-
-        if (!(localState instanceof Allocated)) {
-            // This can legally happen if the chain is shut down before the transaction was committed
-            // by the backend.
-            LOG.debug("Ignoring successful transaction {} in state {}", transaction, localState);
-            return;
-        }
-
-        final Allocated allocated = (Allocated)localState;
-        final DOMStoreWriteTransaction tx = allocated.getTransaction();
-        if (!tx.equals(transaction)) {
-            LOG.debug("Ignoring non-latest successful transaction {} in state {}", transaction, allocated);
-            return;
-        }
-
-        if (!STATE_UPDATER.compareAndSet(this, localState, idleState)) {
-            LOG.debug("Transaction chain {} has already transitioned from {} to {}, not making it idle",
-                    this, localState, state);
-        }
-    }
-
-    /**
-     * Notify the base logic that a previously-submitted transaction has failed.
-     *
-     * @param transaction Transaction which failed.
-     * @param cause Failure cause
-     */
-    protected final void onTransactionFailed(final SnapshotBackedWriteTransaction<T> transaction,
-            final Throwable cause) {
-        LOG.debug("Transaction chain {} failed on transaction {}", this, transaction, cause);
-        state = FAILED;
-    }
-
-    /**
-     * Return the next transaction identifier.
-     *
-     * @return transaction identifier.
-     */
-    protected abstract T nextTransactionIdentifier();
-
-    /**
-     * Inquire as to whether transactions should record their allocation context.
-     *
-     * @return True if allocation context should be recorded.
-     */
-    protected abstract boolean getDebugTransactions();
-
-    /**
-     * Take a fresh {@link DataTreeSnapshot} from the backend.
-     *
-     * @return A new snapshot.
-     */
-    protected abstract DataTreeSnapshot takeSnapshot();
-
-    /**
-     * Create a cohort for driving the transaction through the commit process.
-     *
-     * @param transaction Transaction handle
-     * @param modification {@link DataTreeModification} which needs to be applied to the backend
-     * @param operationError Any previous error that could be reported through three phase commit
-     * @return A {@link DOMStoreThreePhaseCommitCohort} cohort.
-     */
-    protected abstract DOMStoreThreePhaseCommitCohort createCohort(SnapshotBackedWriteTransaction<T> transaction,
-                                                                   DataTreeModification modification,
-                                                                   Exception operationError);
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStore.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStore.java
deleted file mode 100644 (file)
index 8adebe1..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-/**
- * DOM Data Store
- *
- * <p>
- * DOM Data Store provides transactional tree-like storage for YANG-modeled
- * entities described by YANG schema and represented by {@link NormalizedNode}.
- *
- * <p>
- * Read and write access to stored data is provided only via transactions
- * created using {@link #newReadOnlyTransaction()},
- * {@link #newWriteOnlyTransaction()} and {@link #newReadWriteTransaction()}, or
- * by creating {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain}.
- *
- */
-public interface DOMStore extends DOMStoreTransactionFactory {
-    /**
-     * Creates new transaction chain.
-     *
-     * <p>
-     * Transactions in a chain need to be committed in sequence and each
-     * transaction should see the effects of previous transactions as if they
-     * happened.
-     *
-     * @see DOMStoreTransactionChain for more information.
-     * @return Newly created transaction chain.
-     */
-    DOMStoreTransactionChain createTransactionChain();
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadTransaction.java
deleted file mode 100644 (file)
index ede5bae..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public interface DOMStoreReadTransaction extends DOMStoreTransaction {
-
-    /**
-     * Reads data from provided logical data store located at provided path.
-     *
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            read
-     * @return a CheckFuture containing the result of the read. The Future blocks until the
-     *         commit operation is complete. Once complete:
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns an Optional object
-     *         containing the data.</li>
-     *         <li>If the data at the supplied path does not exist, the Future returns
-     *         Optional#absent().</li>
-     *         <li>If the read of the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(YangInstanceIdentifier path);
-
-    /**
-     * Checks if data is available in the logical data store located at provided path.
-     *
-     * <p>
-     * Note: a successful result from this method makes no guarantee that a subsequent call to {@link #read}
-     * will succeed. It is possible that the data resides in a data store on a remote node and, if that
-     * node goes down or a network failure occurs, a subsequent read would fail. Another scenario is if
-     * the data is deleted in between the calls to <code>exists</code> and <code>read</code>
-     *
-     * @param path
-     *            Path which uniquely identifies subtree which client want to
-     *            check existence of
-     * @return a CheckFuture containing the result of the check.
-     *         <ul>
-     *         <li>If the data at the supplied path exists, the Future returns a Boolean
-     *         whose value is true, false otherwise</li>
-     *         <li>If checking for the data fails, the Future will fail with a
-     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
-     *         </ul>
-     */
-    CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path);
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadWriteTransaction.java
deleted file mode 100644 (file)
index 7277406..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-/**
- * Combination of a {@link DOMStoreReadTransaction} and {@link DOMStoreWriteTransaction}.
- */
-public interface DOMStoreReadWriteTransaction extends DOMStoreReadTransaction, DOMStoreWriteTransaction {
-
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index db2afd0..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- * Interface implemented by the {@link DOMStore} and exposed for each {@link DOMStoreWriteTransaction}
- * upon its transition to Ready state. The frontend (DOMStore user) uses this interface to drive the
- * commit procedure across potentially multiple DOMStores using the Three-Phase-Commit (3PC) Protocol,
- * as described in <a href="https://en.wikipedia.org/wiki/Three-phase_commit">Three-Phase-Commit Protocol</a>.
- */
-public interface DOMStoreThreePhaseCommitCohort {
-
-    /**
-     * Sends transaction associated with this three phase commit instance to the
-     * participant, participant votes on the transaction, if the transaction
-     * should be committed or aborted.
-     *
-     * @return ListenableFuture with vote of the participant. Vote
-     *         {@link ListenableFuture#get()} is following:
-     *         <ul>
-     *         <li>
-     *         true if transaction is approved by data store.
-     *         <li>false if the transaction is not approved by data store and
-     *         should be aborted.
-     *         </ul>
-     */
-    ListenableFuture<Boolean> canCommit();
-
-    /**
-     * Initiates a pre-commit phase of associated transaction on datastore.
-     *
-     * <p>
-     * This message is valid only and only if and only if the participant responded
-     * on {@link #canCommit()} call with positive response.
-     *
-     * @return ListenableFuture representing acknowledgment for participant
-     *        that pre-commit message was received and processed.
-     */
-    ListenableFuture<Void> preCommit();
-
-    /**
-     * Initiates a abort phase of associated transaction on data store.
-     *
-     * @return ListenableFuture representing acknowledgment for participant
-     *        that abort message was received.
-     */
-    ListenableFuture<Void> abort();
-
-    /**
-     * Initiates a commit phase on of associated transaction on data store.
-     *
-     * @return ListenableFuture representing acknowledgment for participant
-     *        that commit message was received and commit of transaction was
-     *        processed.
-     */
-    ListenableFuture<Void> commit();
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransaction.java
deleted file mode 100644 (file)
index 86679d3..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import org.opendaylight.yangtools.concepts.Identifiable;
-
-/**
- * DOM Data Store transaction.
- *
- * @see DOMStoreReadTransaction
- * @see DOMStoreWriteTransaction
- * @see DOMStoreReadWriteTransaction
- */
-public interface DOMStoreTransaction extends AutoCloseable, Identifiable<Object> {
-    /**
-     * Unique identifier of the transaction.
-     */
-    @Override
-    Object getIdentifier();
-
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionChain.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionChain.java
deleted file mode 100644 (file)
index dfffd3e..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-/**
- * A chain of transactions. Transactions in a chain need to be committed in
- * sequence and each transaction must see the effects of previous transactions
- * as if they happened. A chain makes no guarantees of atomicity, in fact
- * transactions are committed as soon as possible.
- */
-public interface DOMStoreTransactionChain extends DOMStoreTransactionFactory, AutoCloseable {
-
-    /**
-     * Create a new read only transaction which will continue the chain. The
-     * previous write transaction has to be either READY or CANCELLED.
-     *
-     * <p>
-     * If previous write transaction was already commited to data store, new
-     * read-only transaction is same as obtained via {@link DOMStore#newReadOnlyTransaction()}
-     * and contains merged result of previous one and current state of data store.
-     *
-     * <p>
-     * Otherwise read-only transaction presents isolated view as if previous read-write
-     * transaction was successful. State which was introduced by other transactions
-     * outside this transaction chain after creation of previous transaction is not visible.
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not READY or CANCELLED, or
-     *             if the chain has been closed.
-     */
-    @Override
-    DOMStoreReadTransaction newReadOnlyTransaction();
-
-    /**
-     * Create a new read write transaction which will continue the chain. The
-     * previous read-write transaction has to be either COMMITED or CANCELLED.
-     *
-     * <p>
-     * If previous write transaction was already commited to data store, new
-     * read-write transaction is same as obtained via {@link DOMStore#newReadWriteTransaction()}
-     * and contains merged result of previous one and current state of data store.
-     *
-     * <p>
-     * Otherwise read-write transaction presents isolated view as if previous read-write
-     * transaction was successful. State which was introduced by other transactions
-     * outside this transaction chain after creation of previous transaction is not visible.
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not READY or CANCELLED, or
-     *             if the chain has been closed.
-     */
-    @Override
-    DOMStoreReadWriteTransaction newReadWriteTransaction();
-
-    /**
-     * Create a new write-only transaction which will continue the chain. The
-     * previous read-write transaction has to be either READY or CANCELLED.
-     *
-     *
-     * @return New transaction in the chain.
-     * @throws IllegalStateException
-     *             if the previous transaction was not READY or CANCELLED, or
-     *             if the chain has been closed.
-     */
-    @Override
-    DOMStoreWriteTransaction newWriteOnlyTransaction();
-
-    /**
-     * Closes Transaction Chain.
-     *
-     * <p>
-     * Close method of transaction chain does not guarantee that
-     * last alocated transaction is ready or was submitted.
-     *
-     * @throws IllegalStateException If any of the outstanding created transactions was not canceled or ready.
-     */
-    @Override
-    void close();
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionFactory.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTransactionFactory.java
deleted file mode 100644 (file)
index f33c0a4..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-/**
- * Factory for DOM Store Transactions
- *
- * <p>
- * Factory provides method to construct read-only, read-write and write-only
- * transactions, which may be used to retrieve and modify stored information in
- * Underlying {@link DOMStore} or {@link DOMStoreTransactionChain}.
- *
- * <p>
- * See DOMStore, DOMStoreTransactionChain for concrete variations of this factory.
- *
- * <p>
- * <b>Note:</b> This interface is used only to define common functionality
- * between {@link DOMStore} and {@link DOMStoreTransactionChain}, which
- * further specify behaviour of returned transactions.
- *
- */
-public interface DOMStoreTransactionFactory {
-
-    /**
-     * Creates a read only transaction.
-     *
-     * <p>
-     * Creates a new read-only transaction, which provides read access to
-     * snapshot of current state.
-     *
-     * @see DOMStoreReadTransaction for more information.
-     * @return new {@link DOMStoreReadTransaction}
-     * @throws IllegalStateException
-     *             If state of factory prevents allocating new transaction.
-     *
-     */
-    DOMStoreReadTransaction newReadOnlyTransaction();
-
-    /**
-     * Creates write only transaction.
-     *
-     * @see DOMStoreWriteTransaction for more information.
-     * @return new {@link DOMStoreWriteTransaction}
-     * @throws IllegalStateException If state of factory prevents allocating new transaction.
-     */
-    DOMStoreWriteTransaction newWriteOnlyTransaction();
-
-    /**
-     * Creates Read-Write transaction.
-     *
-     * @see DOMStoreReadWriteTransaction for more information.
-     * @return  new {@link DOMStoreWriteTransaction}
-     * @throws IllegalStateException If state of factory prevents allocating new transaction.
-     */
-    DOMStoreReadWriteTransaction newReadWriteTransaction();
-
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTreeChangePublisher.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreTreeChangePublisher.java
deleted file mode 100644 (file)
index 6665bd1..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-/**
- * Interface implemented by DOMStore implementations which allow registration
- * of {@link DOMDataTreeChangeListener} instances.
- */
-public interface DOMStoreTreeChangePublisher {
-    /**
-     * Registers a {@link DOMDataTreeChangeListener} to receive
-     * notifications when data changes under a given path in the conceptual data
-     * tree.
-     *
-     * <p>
-     * You are able to register for notifications  for any node or subtree
-     * which can be represented using {@link YangInstanceIdentifier}.
-     *
-     * <p>
-     * You are able to register for data change notifications for a subtree or leaf
-     * even if it does not exist. You will receive notification once that node is
-     * created.
-     *
-     * <p>
-     * If there is any pre-existing data in data tree on path for which you are
-     * registering, you will receive initial data change event, which will
-     * contain all pre-existing data, marked as created.
-     *
-     * <p>
-     * This method returns a {@link ListenerRegistration} object. To
-     * "unregister" your listener for changes call the {@link ListenerRegistration#close()}
-     * method on this returned object.
-     *
-     * <p>
-     * You MUST explicitly unregister your listener when you no longer want to receive
-     * notifications. This is especially true in OSGi environments, where failure to
-     * do so during bundle shutdown can lead to stale listeners being still registered.
-     *
-     * @param treeId
-     *            Data tree identifier of the subtree which should be watched for
-     *            changes.
-     * @param listener
-     *            Listener instance which is being registered
-     * @return Listener registration object, which may be used to unregister
-     *         your listener using {@link ListenerRegistration#close()} to stop
-     *         delivery of change events.
-     */
-    <L extends DOMDataTreeChangeListener> @NonNull ListenerRegistration<L> registerTreeChangeListener(
-            @NonNull YangInstanceIdentifier treeId, @NonNull L listener);
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreWriteTransaction.java
deleted file mode 100644 (file)
index 3aaf006..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-public interface DOMStoreWriteTransaction extends DOMStoreTransaction {
-
-    /**
-     * Store a provided data at specified path. This acts as a add / replace
-     * operation, which is to say that whole subtree will be replaced by
-     * specified path.
-     *
-     * <p>
-     * If you need add or merge of current object with specified use
-     * {@link #merge(YangInstanceIdentifier, NormalizedNode)}.
-     *
-     * @param path the path to write
-     * @param data data object to be written
-     * @throws IllegalStateException
-     *             if the client code already sealed transaction and invoked
-     *             {@link #ready()}
-     */
-    void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
-
-    /**
-     * Store a provided data at specified path. This acts as a add / replace
-     * operation, which is to say that whole subtree will be replaced by
-     * specified path.
-     *
-     * @param path the path to write
-     * @param data data object to be written
-     * @throws IllegalStateException
-     *             if the client code already sealed transaction and invoked
-     *             {@link #ready()}
-     */
-    void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
-
-    /**
-     * Deletes data and whole subtree located at provided path.
-     *
-     * @param path path to delete
-     * @throws IllegalStateException
-     *             if the client code already sealed transaction and invoked
-     *             {@link #ready()}
-     */
-    void delete(YangInstanceIdentifier path);
-
-    /**
-     * Seals transaction, and returns three-phase commit cohort associated
-     * with this transaction and DOM Store to be coordinated by coordinator.
-     *
-     * @return Three Phase Commit Cohort instance for this transaction.
-     */
-    DOMStoreThreePhaseCommitCohort ready();
-
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java
deleted file mode 100644 (file)
index c6e70fe..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import com.google.common.annotations.Beta;
-import com.google.common.collect.ForwardingObject;
-import com.google.common.util.concurrent.ListenableFuture;
-
-/**
- * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} implementations,
- * which forward most of their functionality to a backend {@link #delegate()}.
- */
-@Beta
-public abstract class ForwardingDOMStoreThreePhaseCommitCohort extends ForwardingObject
-        implements DOMStoreThreePhaseCommitCohort {
-    @Override
-    protected abstract DOMStoreThreePhaseCommitCohort delegate();
-
-    @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return delegate().canCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegate().preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate().abort();
-    }
-
-    @Override
-    public ListenableFuture<Void> commit() {
-        return delegate().commit();
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadTransaction.java
deleted file mode 100644 (file)
index 688d1e3..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of read-only transaction backed by {@link DataTreeSnapshot}.
- *
- * <p>
- * Implementation of read-only transaction backed by {@link DataTreeSnapshot}
- * which delegates most of its calls to similar methods provided by underlying snapshot.
- *
- * @param <T> identifier type
- */
-@Beta
-public final class SnapshotBackedReadTransaction<T> extends AbstractDOMStoreTransaction<T>
-        implements DOMStoreReadTransaction {
-    private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadTransaction.class);
-    private volatile DataTreeSnapshot stableSnapshot;
-
-    /**
-     * Creates a new read-only transaction.
-     *
-     * @param identifier Transaction Identifier
-     * @param debug Enable transaction debugging
-     * @param snapshot Snapshot which will be modified.
-     */
-    SnapshotBackedReadTransaction(final T identifier, final boolean debug, final DataTreeSnapshot snapshot) {
-        super(identifier, debug);
-        this.stableSnapshot = Preconditions.checkNotNull(snapshot);
-        LOG.debug("ReadOnly Tx: {} allocated with snapshot {}", identifier, snapshot);
-    }
-
-    @Override
-    public void close() {
-        LOG.debug("Store transaction: {} : Closed", getIdentifier());
-        stableSnapshot = null;
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
-        LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
-        checkNotNull(path, "Path must not be null.");
-
-        final DataTreeSnapshot snapshot = stableSnapshot;
-        if (snapshot == null) {
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
-        }
-
-        try {
-            return Futures.immediateCheckedFuture(Optional.fromJavaUtil(snapshot.readNode(path)));
-        } catch (RuntimeException e) {
-            LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
-        }
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
-        LOG.debug("Tx: {} Exists: {}", getIdentifier(), path);
-        checkNotNull(path, "Path must not be null.");
-
-        try {
-            return Futures.immediateCheckedFuture(read(path).checkedGet().isPresent());
-        } catch (ReadFailedException e) {
-            return Futures.immediateFailedCheckedFuture(e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedReadWriteTransaction.java
deleted file mode 100644 (file)
index 2d938fe..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import static com.google.common.base.Preconditions.checkNotNull;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of Read-Write transaction which is backed by {@link DataTreeSnapshot}
- * and executed according to {@link TransactionReadyPrototype}.
- *
- * @param <T> identifier type
- */
-@Beta
-public final class SnapshotBackedReadWriteTransaction<T> extends SnapshotBackedWriteTransaction<T>
-        implements DOMStoreReadWriteTransaction {
-    private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
-
-    SnapshotBackedReadWriteTransaction(final T identifier, final boolean debug,
-            final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
-        super(identifier, debug, snapshot, readyImpl);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
-        LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
-        checkNotNull(path, "Path must not be null.");
-
-        final Optional<NormalizedNode<?, ?>> result;
-        try {
-            result = readSnapshotNode(path);
-        } catch (RuntimeException e) {
-            LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
-        }
-
-        if (result == null) {
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
-        } else {
-            return Futures.immediateCheckedFuture(result);
-        }
-    }
-
-    @Override
-    public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
-        try {
-            return Futures.immediateCheckedFuture(
-                read(path).checkedGet().isPresent());
-        } catch (ReadFailedException e) {
-            return Futures.immediateFailedCheckedFuture(e);
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedTransactions.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedTransactions.java
deleted file mode 100644 (file)
index a82a65e..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import com.google.common.annotations.Beta;
-import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-
-/**
- * Public utility class for instantiating snapshot-backed transactions.
- */
-@Beta
-public final class SnapshotBackedTransactions {
-    private SnapshotBackedTransactions() {
-        throw new UnsupportedOperationException("Utility class");
-    }
-
-    /**
-     * Creates a new read-only transaction.
-     *
-     * @param identifier Transaction Identifier
-     * @param debug Enable transaction debugging
-     * @param snapshot Snapshot which will be modified.
-     */
-    public static <T> SnapshotBackedReadTransaction<T> newReadTransaction(final T identifier, final boolean debug,
-            final DataTreeSnapshot snapshot) {
-        return new SnapshotBackedReadTransaction<>(identifier, debug, snapshot);
-    }
-
-    /**
-     * Creates a new read-write transaction.
-     *
-     * @param identifier transaction Identifier
-     * @param debug Enable transaction debugging
-     * @param snapshot Snapshot which will be modified.
-     * @param readyImpl Implementation of ready method.
-     */
-    public static <T> SnapshotBackedReadWriteTransaction<T> newReadWriteTransaction(final T identifier,
-            final boolean debug, final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
-        return new SnapshotBackedReadWriteTransaction<>(identifier, debug, snapshot, readyImpl);
-    }
-
-    /**
-     * Creates a new write-only transaction.
-     *
-     * @param identifier transaction Identifier
-     * @param debug Enable transaction debugging
-     * @param snapshot Snapshot which will be modified.
-     * @param readyImpl Implementation of ready method.
-     */
-    public static <T> SnapshotBackedWriteTransaction<T> newWriteTransaction(final T identifier, final boolean debug,
-            final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
-        return new SnapshotBackedWriteTransaction<>(identifier, debug, snapshot, readyImpl);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedWriteTransaction.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/SnapshotBackedWriteTransaction.java
deleted file mode 100644 (file)
index 01f4008..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
-
-import static com.google.common.base.Preconditions.checkState;
-
-import com.google.common.annotations.Beta;
-import com.google.common.base.MoreObjects.ToStringHelper;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implementation of Write transaction which is backed by
- * {@link DataTreeSnapshot} and executed according to
- * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
- *
- * @param <T> Identifier type
- */
-@Beta
-public class SnapshotBackedWriteTransaction<T> extends AbstractDOMStoreTransaction<T>
-        implements DOMStoreWriteTransaction {
-    private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype>
-        READY_UPDATER = AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class,
-                TransactionReadyPrototype.class, "readyImpl");
-    @SuppressWarnings("rawtypes")
-    private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification>
-        TREE_UPDATER = AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class,
-                DataTreeModification.class, "mutableTree");
-
-    // non-null when not ready
-    private volatile TransactionReadyPrototype<T> readyImpl;
-    // non-null when not committed/closed
-    private volatile DataTreeModification mutableTree;
-
-    SnapshotBackedWriteTransaction(final T identifier, final boolean debug,
-            final DataTreeSnapshot snapshot, final TransactionReadyPrototype<T> readyImpl) {
-        super(identifier, debug);
-        this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
-        mutableTree = snapshot.newModification();
-        LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        checkNotReady();
-
-        final DataTreeModification tree = mutableTree;
-        LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
-
-        try {
-            tree.write(path, data);
-            // FIXME: Add checked exception
-        } catch (RuntimeException e) {
-            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
-            // Rethrow original ones if they are subclasses of RuntimeException
-            // or Error
-            Throwables.propagateIfPossible(e);
-            // FIXME: Introduce proper checked exception
-            throw new IllegalArgumentException("Illegal input data.", e);
-        }
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        checkNotReady();
-
-        final DataTreeModification tree = mutableTree;
-        LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
-
-        try {
-            tree.merge(path, data);
-            // FIXME: Add checked exception
-        } catch (RuntimeException e) {
-            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
-            // Rethrow original ones if they are subclasses of RuntimeException
-            // or Error
-            Throwables.propagateIfPossible(e);
-            // FIXME: Introduce proper checked exception
-            throw new IllegalArgumentException("Illegal input data.", e);
-        }
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public void delete(final YangInstanceIdentifier path) {
-        checkNotReady();
-
-        final DataTreeModification tree = mutableTree;
-        LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
-
-        try {
-            tree.delete(path);
-            // FIXME: Add checked exception
-        } catch (RuntimeException e) {
-            LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
-            // Rethrow original ones if they are subclasses of RuntimeException
-            // or Error
-            Throwables.propagateIfPossible(e);
-            // FIXME: Introduce proper checked exception
-            throw new IllegalArgumentException("Illegal path to delete.", e);
-        }
-    }
-
-    /**
-     * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
-     * not allow data access after the transaction has been closed or readied.
-     *
-     * @param path Path to read
-     * @return null if the the transaction has been closed;
-     */
-    final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
-        return readyImpl == null ? null : Optional.fromJavaUtil(mutableTree.readNode(path));
-    }
-
-    private void checkNotReady() {
-        checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.",
-                getIdentifier());
-    }
-
-    @Override
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    public DOMStoreThreePhaseCommitCohort ready() {
-        @SuppressWarnings("unchecked")
-        final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
-        checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
-
-        LOG.debug("Store transaction: {} : Ready", getIdentifier());
-
-        final DataTreeModification tree = mutableTree;
-        TREE_UPDATER.lazySet(this, null);
-        try {
-            tree.ready();
-            return wasReady.transactionReady(this, tree, null);
-        } catch (RuntimeException e) {
-            LOG.debug("Store transaction: {}: unexpected failure when readying", getIdentifier(), e);
-            return wasReady.transactionReady(this, tree, e);
-        }
-    }
-
-    @Override
-    public void close() {
-        @SuppressWarnings("unchecked")
-        final TransactionReadyPrototype<T> wasReady = READY_UPDATER.getAndSet(this, null);
-        if (wasReady != null) {
-            LOG.debug("Store transaction: {} : Closed", getIdentifier());
-            TREE_UPDATER.lazySet(this, null);
-            wasReady.transactionAborted(this);
-        } else {
-            LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
-        }
-    }
-
-    @Override
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("ready", readyImpl == null);
-    }
-
-    /**
-     * This class is intended to be implemented by Transaction factories responsible for allocation of
-     * {@link org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction} and
-     * providing underlying logic for applying implementation.
-     *
-     * @param <T> identifier type
-     */
-    public abstract static class TransactionReadyPrototype<T> {
-        /**
-         * Called when a transaction is closed without being readied. This is not invoked for
-         * transactions which are ready.
-         *
-         * @param tx Transaction which got aborted.
-         */
-        protected abstract void transactionAborted(SnapshotBackedWriteTransaction<T> tx);
-
-        /**
-         * Returns a commit coordinator associated with supplied transactions.
-         *
-         * <p>
-         * This call must not fail.
-         *
-         * @param tx
-         *            Transaction on which ready was invoked.
-         * @param tree
-         *            Modified data tree which has been constructed.
-         * @param readyError
-         *            Any error that has already happened when readying.
-         * @return DOMStoreThreePhaseCommitCohort associated with transaction
-         */
-        protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction<T> tx,
-                                                                           DataTreeModification tree,
-                                                                           @Nullable Exception readyError);
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/package-info.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/package-info.java
deleted file mode 100644 (file)
index 8c8ee3d..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.core.spi.data;
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/statistics/DOMStoreStatsTracker.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/statistics/DOMStoreStatsTracker.java
deleted file mode 100644 (file)
index 4a6019e..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.sal.core.spi.data.statistics;
-
-import java.util.concurrent.ExecutorService;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
-
-/**
- * Interface for a class that tracks statistics for a data store.
- *
- * @author Thomas Pantelis
- */
-public interface DOMStoreStatsTracker {
-
-    /**
-     * Sets the executor used for DataChangeListener notifications.
-     *
-     * @param dclExecutor the executor
-     */
-    void setDataChangeListenerExecutor(@NonNull ExecutorService dclExecutor);
-
-    /**
-     * Sets the executor used internally by the data store.
-     *
-     * @param dsExecutor the executor
-     */
-    void setDataStoreExecutor(@NonNull ExecutorService dsExecutor);
-
-    /**
-     * Sets the QueuedNotificationManager use for DataChangeListener notifications.
-     *
-     * @param manager the manager
-     */
-    void setNotificationManager(@NonNull QueuedNotificationManager<?, ?> manager);
-}
index 52ea3f99dd547ff6d0b59b37b914679ce7311ced..7e1cffa2ed65f2304eee5090783d379fe4427114 100644 (file)
@@ -4,57 +4,14 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../parent</relativePath>
   </parent>
 
-  <groupId>org.opendaylight.controller</groupId>
   <artifactId>sal-dummy-distributed-datastore</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-actor_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-cluster_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-persistence_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-remote_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-testkit_2.12</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-slf4j_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.typesafe.akka</groupId>
-      <artifactId>akka-osgi_2.12</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-clustering-commons</artifactId>
-    </dependency>
-
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-akka-raft</artifactId>
@@ -67,9 +24,8 @@
 
     <!-- Test Dependencies -->
     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-testkit_2.13</artifactId>
     </dependency>
 
     <dependency>
index 05f83368f6ec334e8c5ccc2bda7503c14f63bec8..3112c49d92d41b7823a0805952736af5bc86c30f 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.dummy.datastore;
 import akka.actor.Props;
 import akka.actor.UntypedAbstractActor;
 
-public class DummyShardManager extends UntypedAbstractActor {
+public final class DummyShardManager extends UntypedAbstractActor {
     public DummyShardManager(final Configuration configuration, final String memberName, final String[] shardNames,
             final String type) {
         new DummyShardsCreator(configuration, getContext(), memberName, shardNames, type).create();
index 28a35ecb4c8b55e642fb042e25e9a90dc6fb7a02..76b485724190f2025812cbfab54ceff9e544f63e 100644 (file)
@@ -36,8 +36,6 @@ odl-cluster-data {
     cluster {
       seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2553"]
 
-      auto-down-unreachable-after = 10s
-
       roles = [
         "member-2"
       ]
index f9afe04278c40309925cf0dc5b7192716359161f..c4254378019899b9ba4063de708a076285ca82f4 100644 (file)
@@ -36,8 +36,6 @@ odl-cluster-data {
     cluster {
       seed-nodes = ["akka://opendaylight-cluster-data@127.0.0.1:2550", "akka://opendaylight-cluster-data@127.0.0.1:2554"]
 
-      auto-down-unreachable-after = 10s
-
       roles = [
         "member-3"
       ]
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/pom.xml b/opendaylight/md-sal/sal-inmemory-datastore/pom.xml
deleted file mode 100644 (file)
index 94e7ecc..0000000
+++ /dev/null
@@ -1,132 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <relativePath>../parent</relativePath>
-  </parent>
-
-  <artifactId>sal-inmemory-datastore</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
-  <packaging>bundle</packaging>
-
-  <dependencies>
-    <!-- SAL Dependencies -->
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-api</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-common-util</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-core-compat</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>concepts</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>util</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>yang-binding</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-data-impl</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-dom-inmemory-datastore</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-api</artifactId>
-    </dependency>
-
-    <!-- Test Dependencies -->
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.slf4j</groupId>
-      <artifactId>slf4j-simple</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-generator-impl</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>mockito-configuration</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-test-model</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.opendaylight.yangtools</groupId>
-      <artifactId>yang-test-util</artifactId>
-    </dependency>
-  </dependencies>
-
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <extensions>true</extensions>
-        <configuration>
-          <instructions>
-            <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
-            <Export-Package>org.opendaylight.controller.md.sal.dom.store.impl.*</Export-Package>
-          </instructions>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Architecture:Clustering</url>
-  </scm>
-
-</project>
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java
deleted file mode 100644 (file)
index 5975502..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import java.util.concurrent.ExecutorService;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.core.compat.DOMStoreAdapter;
-import org.opendaylight.yangtools.concepts.Identifiable;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-
-/**
- * In-memory DOM Data Store providing Controller MD-SAL APIs on top of MD-SAL's
- * {@link org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore}.
- *
- * @deprecated Please use {@link org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore} instead.
- */
-@Deprecated
-public class InMemoryDOMDataStore
-        extends DOMStoreAdapter<org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore>
-        implements Identifiable<String>, SchemaContextListener, AutoCloseable {
-    private final org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore delegate;
-
-    public InMemoryDOMDataStore(final String name, final ExecutorService dataChangeListenerExecutor) {
-        this(name, LogicalDatastoreType.OPERATIONAL, dataChangeListenerExecutor,
-            InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
-    }
-
-    public InMemoryDOMDataStore(final String name, final LogicalDatastoreType type,
-            final ExecutorService dataChangeListenerExecutor,
-            final int maxDataChangeListenerQueueSize, final boolean debugTransactions) {
-        delegate = new org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore(name, type.toMdsal(),
-            dataChangeListenerExecutor, maxDataChangeListenerQueueSize, debugTransactions);
-    }
-
-    @Override
-    protected org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStore delegate() {
-        return delegate;
-    }
-
-    public void setCloseable(final AutoCloseable closeable) {
-        delegate.setCloseable(closeable);
-    }
-
-    public QueuedNotificationManager<?, ?> getDataChangeListenerNotificationManager() {
-        return delegate.getDataChangeListenerNotificationManager();
-    }
-
-    @Override
-    public final String getIdentifier() {
-        return delegate.getIdentifier();
-    }
-
-    @Override
-    public void onGlobalContextUpdated(final SchemaContext ctx) {
-        delegate.onGlobalContextUpdated(ctx);
-    }
-
-    @Override
-    public void close() {
-        delegate.close();
-    }
-
-    public final boolean getDebugTransactions() {
-        return delegate.getDebugTransactions();
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreConfigProperties.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreConfigProperties.java
deleted file mode 100644 (file)
index 7266fb7..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-/**
- * Holds configuration properties when creating an {@link InMemoryDOMDataStore} instance via the
- * {@link InMemoryDOMDataStoreFactory}.
- *
- * @author Thomas Pantelis
- * @see InMemoryDOMDataStoreFactory
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreConfigProperties} instead.
- */
-@Deprecated
-public final class InMemoryDOMDataStoreConfigProperties {
-
-    public static final int DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
-    public static final int DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
-    public static final int DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE = 1000;
-    public static final int DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
-
-    private static final InMemoryDOMDataStoreConfigProperties DEFAULT =
-            create(DEFAULT_MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE,
-                    DEFAULT_MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE,
-                    DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE,
-                    DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE);
-
-    private final int maxDataChangeExecutorQueueSize;
-    private final int maxDataChangeExecutorPoolSize;
-    private final int maxDataChangeListenerQueueSize;
-    private final int maxDataStoreExecutorQueueSize;
-
-    /**
-     * Constructs an instance with the given property values.
-     *
-     * @param maxDataChangeExecutorPoolSize
-     *            maximum thread pool size for the data change notification executor.
-     * @param maxDataChangeExecutorQueueSize
-     *            maximum queue size for the data change notification executor.
-     * @param maxDataChangeListenerQueueSize
-     *            maximum queue size for the data change listeners.
-     * @param maxDataStoreExecutorQueueSize
-     *            maximum queue size for the data store executor.
-     */
-    public static InMemoryDOMDataStoreConfigProperties create(int maxDataChangeExecutorPoolSize,
-            int maxDataChangeExecutorQueueSize, int maxDataChangeListenerQueueSize,
-            int maxDataStoreExecutorQueueSize) {
-        return new InMemoryDOMDataStoreConfigProperties(maxDataChangeExecutorPoolSize,
-                maxDataChangeExecutorQueueSize, maxDataChangeListenerQueueSize,
-                maxDataStoreExecutorQueueSize);
-    }
-
-    public static InMemoryDOMDataStoreConfigProperties create(int maxDataChangeExecutorPoolSize,
-            int maxDataChangeExecutorQueueSize, int maxDataChangeListenerQueueSize) {
-        return new InMemoryDOMDataStoreConfigProperties(maxDataChangeExecutorPoolSize,
-                maxDataChangeExecutorQueueSize, maxDataChangeListenerQueueSize,
-                DEFAULT_MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE);
-    }
-
-    /**
-     * Returns the InMemoryDOMDataStoreConfigProperties instance with default values.
-     */
-    public static InMemoryDOMDataStoreConfigProperties getDefault() {
-        return DEFAULT;
-    }
-
-    private InMemoryDOMDataStoreConfigProperties(int maxDataChangeExecutorPoolSize,
-            int maxDataChangeExecutorQueueSize, int maxDataChangeListenerQueueSize,
-            int maxDataStoreExecutorQueueSize) {
-        this.maxDataChangeExecutorQueueSize = maxDataChangeExecutorQueueSize;
-        this.maxDataChangeExecutorPoolSize = maxDataChangeExecutorPoolSize;
-        this.maxDataChangeListenerQueueSize = maxDataChangeListenerQueueSize;
-        this.maxDataStoreExecutorQueueSize = maxDataStoreExecutorQueueSize;
-    }
-
-    /**
-     * Returns the maximum queue size for the data change notification executor.
-     */
-    public int getMaxDataChangeExecutorQueueSize() {
-        return maxDataChangeExecutorQueueSize;
-    }
-
-    /**
-     * Returns the maximum thread pool size for the data change notification executor.
-     */
-    public int getMaxDataChangeExecutorPoolSize() {
-        return maxDataChangeExecutorPoolSize;
-    }
-
-    /**
-     * Returns the maximum queue size for the data change listeners.
-     */
-    public int getMaxDataChangeListenerQueueSize() {
-        return maxDataChangeListenerQueueSize;
-    }
-
-    /**
-     * Returns the maximum queue size for the data store executor.
-     */
-    public int getMaxDataStoreExecutorQueueSize() {
-        return maxDataStoreExecutorQueueSize;
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java
deleted file mode 100644 (file)
index 6571c60..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import java.util.concurrent.ExecutorService;
-import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-
-/**
- * A factory for creating InMemoryDOMDataStore instances.
- *
- * @author Thomas Pantelis
- *
- * @deprecated Use {@link org.opendaylight.mdsal.dom.store.inmemory.InMemoryDOMDataStoreFactory} instead.
- */
-@Deprecated
-public final class InMemoryDOMDataStoreFactory {
-
-    private InMemoryDOMDataStoreFactory() {
-    }
-
-    public static InMemoryDOMDataStore create(final String name, final @Nullable DOMSchemaService schemaService) {
-        return create(name, schemaService, null);
-    }
-
-    /**
-     * Creates an InMemoryDOMDataStore instance.
-     *
-     * @param name the name of the data store
-     * @param schemaService the SchemaService to which to register the data store.
-     * @param properties configuration properties for the InMemoryDOMDataStore instance. If null,
-     *                   default property values are used.
-     * @return an InMemoryDOMDataStore instance
-     */
-    public static InMemoryDOMDataStore create(final String name, final @Nullable DOMSchemaService schemaService,
-            final @Nullable InMemoryDOMDataStoreConfigProperties properties) {
-        return create(name, LogicalDatastoreType.OPERATIONAL, schemaService, false, properties);
-    }
-
-    /**
-     * Creates an InMemoryDOMDataStore instance.
-     *
-     * @param name the name of the data store
-     * @param type Data store type
-     * @param schemaService the SchemaService to which to register the data store.
-     * @param debugTransactions enable transaction debugging
-     * @param properties configuration properties for the InMemoryDOMDataStore instance. If null,
-     *                   default property values are used.
-     * @return an InMemoryDOMDataStore instance
-     */
-    public static InMemoryDOMDataStore create(final String name, final LogicalDatastoreType type,
-            final @Nullable DOMSchemaService schemaService, final boolean debugTransactions,
-            final @Nullable InMemoryDOMDataStoreConfigProperties properties) {
-
-        InMemoryDOMDataStoreConfigProperties actualProperties = properties;
-        if (actualProperties == null) {
-            actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
-        }
-
-        // For DataChangeListener notifications we use an executor that provides the fastest
-        // task execution time to get higher throughput as DataChangeListeners typically provide
-        // much of the business logic for a data model. If the executor queue size limit is reached,
-        // subsequent submitted notifications will block the calling thread.
-        int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
-        int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
-
-        ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
-                dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL", InMemoryDOMDataStore.class);
-
-        final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name, type, dataChangeListenerExecutor,
-                actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
-
-        if (schemaService != null) {
-            schemaService.registerSchemaContextListener(dataStore);
-        }
-
-        return dataStore;
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMStoreTreeChangePublisher.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMStoreTreeChangePublisher.java
deleted file mode 100644 (file)
index e9b5594..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Optional;
-import java.util.concurrent.ExecutorService;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.spi.AbstractDOMDataTreeChangeListenerRegistration;
-import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invoker;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidates;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-final class InMemoryDOMStoreTreeChangePublisher extends AbstractDOMStoreTreeChangePublisher {
-    private static final Invoker<AbstractDOMDataTreeChangeListenerRegistration<?>, DataTreeCandidate> MANAGER_INVOKER =
-        (listener, notification) -> {
-            // FIXME: this is inefficient, as we could grab the entire queue for the listener and post it
-            listener.getInstance().onDataTreeChanged(Collections.singletonList(notification));
-        };
-
-    private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMStoreTreeChangePublisher.class);
-    private final QueuedNotificationManager<AbstractDOMDataTreeChangeListenerRegistration<?>, DataTreeCandidate>
-            notificationManager;
-
-    InMemoryDOMStoreTreeChangePublisher(final ExecutorService listenerExecutor, final int maxQueueSize) {
-        notificationManager = new QueuedNotificationManager<>(listenerExecutor, MANAGER_INVOKER, maxQueueSize,
-                "DataTreeChangeListenerQueueMgr");
-    }
-
-    private InMemoryDOMStoreTreeChangePublisher(QueuedNotificationManager<
-            AbstractDOMDataTreeChangeListenerRegistration<?>, DataTreeCandidate> notificationManager) {
-        this.notificationManager = notificationManager;
-    }
-
-    QueuedNotificationManager<AbstractDOMDataTreeChangeListenerRegistration<?>, DataTreeCandidate>
-            getNotificationManager() {
-        return notificationManager;
-    }
-
-    @Override
-    protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations,
-            final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
-        final DataTreeCandidate candidate = DataTreeCandidates.newDataTreeCandidate(path, node);
-
-        for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
-            LOG.debug("Enqueueing candidate {} to registration {}", candidate, registrations);
-            notificationManager.submitNotification(reg, candidate);
-        }
-    }
-
-    @Override
-    protected synchronized void registrationRemoved(
-            final AbstractDOMDataTreeChangeListenerRegistration<?> registration) {
-        LOG.debug("Closing registration {}", registration);
-
-        // FIXME: remove the queue for this registration and make sure we clear it
-    }
-
-    <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(
-            final YangInstanceIdentifier treeId, final L listener, final DataTreeSnapshot snapshot) {
-        final AbstractDOMDataTreeChangeListenerRegistration<L> reg = registerTreeChangeListener(treeId, listener);
-
-        final Optional<NormalizedNode<?, ?>> node = snapshot.readNode(YangInstanceIdentifier.EMPTY);
-        if (node.isPresent()) {
-            final DataTreeCandidate candidate = DataTreeCandidates.fromNormalizedNode(
-                    YangInstanceIdentifier.EMPTY, node.get());
-
-            InMemoryDOMStoreTreeChangePublisher publisher =
-                    new InMemoryDOMStoreTreeChangePublisher(notificationManager);
-            publisher.registerTreeChangeListener(treeId, listener);
-            publisher.publishChange(candidate);
-        }
-
-        return reg;
-    }
-
-    synchronized void publishChange(final @NonNull DataTreeCandidate candidate) {
-        // Runs synchronized with registrationRemoved()
-        processCandidateTree(candidate);
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java
deleted file mode 100644 (file)
index 7c80fd0..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
-
-import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
-import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
-import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.yangtools.concepts.AbstractRegistration;
-import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
-
-/**
- * Wrapper class for data store MXbeans.
- *
- * @author Thomas Pantelis
- */
-public class InMemoryDataStoreStats extends AbstractRegistration {
-
-    private final AbstractMXBean notificationExecutorStatsBean;
-    private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
-
-    public InMemoryDataStoreStats(final String beanType, final QueuedNotificationManager<?, ?> manager) {
-
-        notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
-                "notification-manager", beanType, null);
-        notificationManagerStatsBean.registerMBean();
-
-        notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
-                "notification-executor", beanType, null);
-        if (notificationExecutorStatsBean != null) {
-            notificationExecutorStatsBean.registerMBean();
-        }
-    }
-
-    public InMemoryDataStoreStats(final String name, final InMemoryDOMDataStore dataStore) {
-        this(name, dataStore.getDataChangeListenerNotificationManager());
-    }
-
-    @Override
-    protected void removeRegistration() {
-        if (notificationExecutorStatsBean != null) {
-            notificationExecutorStatsBean.unregisterMBean();
-        }
-
-        if (notificationManagerStatsBean != null) {
-            notificationManagerStatsBean.unregisterMBean();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/AbstractDataTreeChangeListenerTest.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/AbstractDataTreeChangeListenerTest.java
deleted file mode 100644 (file)
index 5dd1f55..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import java.util.Collection;
-import java.util.Map;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.WriteTransactionCustomizer;
-import org.opendaylight.mdsal.binding.generator.impl.ModuleInfoBackedContext;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedList;
-import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.api.schema.OrderedMapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeAttrBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.DataContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public abstract class AbstractDataTreeChangeListenerTest {
-
-    protected static final YangInstanceIdentifier TOP_LEVEL = YangInstanceIdentifier
-            .of(Top.QNAME);
-    private static final QName NAME_QNAME = QName.create(Top.QNAME, "name");
-    protected static final String FOO = "foo";
-    protected static final String BAR = "bar";
-    protected static final String BAZ = "baz";
-
-    private InMemoryDOMDataStore datastore;
-    private SchemaContext schemaContext;
-    private TestDCLExecutorService dclExecutorService;
-
-    @Before
-    public final void setup() throws Exception {
-        final YangModuleInfo moduleInfo = BindingReflections
-                .getModuleInfo(TwoLevelList.class);
-        final ModuleInfoBackedContext context = ModuleInfoBackedContext.create();
-        context.registerModuleInfo(moduleInfo);
-        this.schemaContext = context.tryToCreateSchemaContext().get();
-
-        this.dclExecutorService = new TestDCLExecutorService(
-                SpecialExecutors.newBlockingBoundedFastThreadPool(1, 10, "DCL",
-                    AbstractDataTreeChangeListenerTest.class));
-
-        this.datastore = new InMemoryDOMDataStore("TEST", this.dclExecutorService);
-        this.datastore.onGlobalContextUpdated(this.schemaContext);
-    }
-
-    @After
-    public void tearDown() {
-        if (this.dclExecutorService != null) {
-            this.dclExecutorService.shutdownNow();
-        }
-    }
-
-    /**
-     * Create a new test task. The task will operate on the backed database,
-     * and will use the proper background executor service.
-     *
-     * @return Test task initialized to clean up {@value #TOP_LEVEL} and its
-     *         children.
-     */
-    public final DatastoreTestTask newTestTask() {
-        return new DatastoreTestTask(this.datastore, this.dclExecutorService).cleanup(DatastoreTestTask
-                .simpleDelete(TOP_LEVEL));
-    }
-
-
-    public static final YangInstanceIdentifier path(final String topName,
-            final String nestedName) {
-        return path(topName).node(NestedList.QNAME).node(
-                new NodeIdentifierWithPredicates(NestedList.QNAME, NAME_QNAME,
-                        nestedName));
-    }
-
-    public static final YangInstanceIdentifier path(final String topName) {
-        return TOP_LEVEL.node(TopLevelList.QNAME).node(
-                new NodeIdentifierWithPredicates(TopLevelList.QNAME,
-                        NAME_QNAME, topName));
-    }
-
-    protected static DataContainerNodeAttrBuilder<NodeIdentifier, ContainerNode> top() {
-        return Builders.containerBuilder().withNodeIdentifier(
-                new NodeIdentifier(Top.QNAME));
-    }
-
-
-
-    protected static void assertEmpty(final Collection<?> set) {
-        Assert.assertTrue(set.isEmpty());
-    }
-
-    protected static void assertEmpty(final Map<?,?> set) {
-        Assert.assertTrue(set.isEmpty());
-    }
-
-    protected static <K> void assertContains(final Collection<K> set, final K... values) {
-        for (final K key : values) {
-            Assert.assertTrue(set.contains(key));
-        }
-
-    }
-
-    protected static <K> void assertContains(final Map<K,?> map, final K... values) {
-        for (final K key : values) {
-            Assert.assertTrue(map.containsKey(key));
-        }
-    }
-
-    protected static <K> void assertNotContains(final Collection<K> set, final K... values) {
-        for (final K key : values) {
-            Assert.assertFalse(set.contains(key));
-        }
-    }
-
-    protected static <K> void assertNotContains(final Map<K,?> map, final K... values) {
-        for (final K key : values) {
-            Assert.assertFalse(map.containsKey(key));
-        }
-    }
-
-    protected static CollectionNodeBuilder<MapEntryNode, MapNode> topLevelMap() {
-        return ImmutableNodes.mapNodeBuilder(TopLevelList.QNAME);
-    }
-
-    protected static CollectionNodeBuilder<MapEntryNode, OrderedMapNode> nestedMap() {
-        return Builders.orderedMapBuilder().withNodeIdentifier(new NodeIdentifier(NestedList.QNAME));
-    }
-
-    public static DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode> topLevelList(
-            final String key) {
-        return ImmutableNodes.mapEntryBuilder(TopLevelList.QNAME, NAME_QNAME,
-                key);
-    }
-
-    public static DataContainerNodeBuilder<NodeIdentifierWithPredicates, MapEntryNode> nestedList(
-            final String key) {
-        return ImmutableNodes
-                .mapEntryBuilder(NestedList.QNAME, NAME_QNAME, key);
-    }
-
-    public static final WriteTransactionCustomizer writeOneTopMultipleNested(
-            final String topName, final String... nestedName) {
-        final CollectionNodeBuilder<MapEntryNode, OrderedMapNode> nestedMapBuilder = nestedMap();
-        for (final String nestedItem : nestedName) {
-            nestedMapBuilder.addChild(nestedList(nestedItem).build());
-        }
-
-        final ContainerNode data = top().addChild(
-                topLevelMap().addChild(
-                        topLevelList(topName)
-                                .addChild(nestedMapBuilder.build()).build())
-                        .build()).build();
-
-        return DatastoreTestTask.simpleWrite(TOP_LEVEL, data);
-    }
-
-    public static final  WriteTransactionCustomizer deleteNested(final String topName,
-            final String nestedName) {
-        return DatastoreTestTask.simpleDelete(path(topName, nestedName));
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DatastoreTestTask.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DatastoreTestTask.java
deleted file mode 100644 (file)
index a62c0ba..0000000
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.SettableFuture;
-import com.google.common.util.concurrent.Uninterruptibles;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.function.Function;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.sal.core.spi.data.DOMStore;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.ModificationType;
-
-public class DatastoreTestTask {
-
-    private final DOMStore store;
-
-    private WriteTransactionCustomizer setup;
-    private WriteTransactionCustomizer write;
-    private ReadTransactionVerifier read;
-    private WriteTransactionCustomizer cleanup;
-    private YangInstanceIdentifier changePath;
-    private DOMStoreTreeChangePublisher storeTreeChangePublisher;
-    private ChangeEventListener internalListener;
-    private final TestDCLExecutorService dclExecutorService;
-
-    public DatastoreTestTask(final DOMStore datastore, final TestDCLExecutorService dclExecutorService) {
-        this.store = datastore;
-        this.dclExecutorService = dclExecutorService;
-    }
-
-    @SafeVarargs
-    public final DatastoreTestTask changeListener(final YangInstanceIdentifier path,
-            Function<DataTreeCandidate, Boolean>... matchers) {
-        assertTrue(store instanceof DOMStoreTreeChangePublisher);
-        this.storeTreeChangePublisher = (DOMStoreTreeChangePublisher)store;
-        this.changePath = path;
-        this.internalListener = new ChangeEventListener(matchers);
-        return this;
-    }
-
-    public static Function<DataTreeCandidate, Boolean> added(YangInstanceIdentifier path) {
-        return candidate -> candidate.getRootNode().getModificationType() == ModificationType.WRITE
-                && path.equals(candidate.getRootPath()) && !candidate.getRootNode().getDataBefore().isPresent()
-                && candidate.getRootNode().getDataAfter().isPresent();
-    }
-
-    public static Function<DataTreeCandidate, Boolean> replaced(YangInstanceIdentifier path) {
-        return candidate -> candidate.getRootNode().getModificationType() == ModificationType.WRITE
-                && path.equals(candidate.getRootPath()) && candidate.getRootNode().getDataBefore().isPresent()
-                && candidate.getRootNode().getDataAfter().isPresent();
-    }
-
-    public static Function<DataTreeCandidate, Boolean> deleted(YangInstanceIdentifier path) {
-        return candidate -> candidate.getRootNode().getModificationType() == ModificationType.DELETE
-                && path.equals(candidate.getRootPath()) && candidate.getRootNode().getDataBefore().isPresent()
-                && !candidate.getRootNode().getDataAfter().isPresent();
-    }
-
-    public static Function<DataTreeCandidate, Boolean> subtreeModified(YangInstanceIdentifier path) {
-        return candidate -> candidate.getRootNode().getModificationType() == ModificationType.SUBTREE_MODIFIED
-                && path.equals(candidate.getRootPath()) && candidate.getRootNode().getDataBefore().isPresent()
-                && candidate.getRootNode().getDataAfter().isPresent();
-    }
-
-    public DatastoreTestTask setup(final WriteTransactionCustomizer customizer) {
-        this.setup = customizer;
-        return this;
-    }
-
-    public DatastoreTestTask test(final WriteTransactionCustomizer customizer) {
-        this.write = customizer;
-        return this;
-    }
-
-    public DatastoreTestTask read(final ReadTransactionVerifier customizer) {
-        this.read = customizer;
-        return this;
-    }
-
-    public DatastoreTestTask cleanup(final WriteTransactionCustomizer customizer) {
-        this.cleanup = customizer;
-        return this;
-    }
-
-    public void run() throws Exception {
-        if (setup != null) {
-            execute(setup);
-        }
-        ListenerRegistration<ChangeEventListener> registration = null;
-        if (changePath != null) {
-            registration = storeTreeChangePublisher.registerTreeChangeListener(changePath, internalListener);
-        }
-
-        Preconditions.checkState(write != null, "Write Transaction must be set.");
-
-        dclExecutorService.afterTestSetup();
-
-        execute(write);
-        if (registration != null) {
-            registration.close();
-        }
-
-        if (read != null) {
-            read.verify(store.newReadOnlyTransaction());
-        }
-        if (cleanup != null) {
-            execute(cleanup);
-        }
-    }
-
-    public void verifyChangeEvents() {
-        internalListener.verifyChangeEvents();
-    }
-
-    public void verifyNoChangeEvent() {
-        internalListener.verifyNoChangeEvent();
-    }
-
-    private void execute(final WriteTransactionCustomizer writeCustomizer) throws InterruptedException,
-            ExecutionException {
-        DOMStoreReadWriteTransaction tx = store.newReadWriteTransaction();
-        writeCustomizer.customize(tx);
-        DOMStoreThreePhaseCommitCohort cohort = tx.ready();
-        assertTrue(cohort.canCommit().get());
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    public interface WriteTransactionCustomizer {
-        void customize(DOMStoreReadWriteTransaction tx);
-    }
-
-    public interface ReadTransactionVerifier {
-        void verify(DOMStoreReadTransaction tx);
-    }
-
-    private final class ChangeEventListener implements DOMDataTreeChangeListener {
-
-        final SettableFuture<Collection<DataTreeCandidate>> future = SettableFuture.create();
-        final Collection<DataTreeCandidate> accumulatedChanges = new ArrayList<>();
-        final Function<DataTreeCandidate, Boolean>[] matchers;
-        final int expChangeCount;
-
-        ChangeEventListener(Function<DataTreeCandidate, Boolean>[] matchers) {
-            this.expChangeCount = matchers.length;
-            this.matchers = matchers;
-        }
-
-        Collection<DataTreeCandidate> changes() {
-            try {
-                Collection<DataTreeCandidate> changes = internalListener.future.get(10, TimeUnit.SECONDS);
-                Uninterruptibles.sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
-                return changes;
-            } catch (TimeoutException e) {
-                throw new AssertionError(String.format(
-                        "Data tree change notifications not received. Expected: %s. Actual: %s - %s",
-                        expChangeCount, accumulatedChanges.size(), accumulatedChanges), e);
-            } catch (InterruptedException | ExecutionException e) {
-                throw new AssertionError("Data tree change notifications failed", e);
-            }
-        }
-
-        void verifyChangeEvents() {
-            Collection<DataTreeCandidate> changes = new ArrayList<>(changes());
-            Iterator<DataTreeCandidate> iter = changes.iterator();
-            while (iter.hasNext()) {
-                DataTreeCandidate dataTreeModification = iter.next();
-                for (Function<DataTreeCandidate, Boolean> matcher: matchers) {
-                    if (matcher.apply(dataTreeModification)) {
-                        iter.remove();
-                        break;
-                    }
-                }
-            }
-
-            if (!changes.isEmpty()) {
-                DataTreeCandidate mod = changes.iterator().next();
-                fail(String.format("Received unexpected notification: type: %s, path: %s, before: %s, after: %s",
-                        mod.getRootNode().getModificationType(), mod.getRootPath(),
-                        mod.getRootNode().getDataBefore(), mod.getRootNode().getDataAfter()));
-            }
-        }
-
-        void verifyNoChangeEvent() {
-            try {
-                Object unexpected = internalListener.future.get(500, TimeUnit.MILLISECONDS);
-                fail("Got unexpected Data tree change notifications: " + unexpected);
-            } catch (TimeoutException e) {
-                // Expected
-            } catch (InterruptedException | ExecutionException e) {
-                throw new AssertionError("Data tree change notifications failed", e);
-            }
-        }
-
-        @Override
-        public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
-            synchronized (accumulatedChanges) {
-                accumulatedChanges.addAll(changes);
-                if (expChangeCount == accumulatedChanges.size()) {
-                    future.set(new ArrayList<>(accumulatedChanges));
-                }
-            }
-        }
-    }
-
-    public static final WriteTransactionCustomizer simpleWrite(final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        return tx -> tx.write(path, data);
-    }
-
-    public static final WriteTransactionCustomizer simpleMerge(final YangInstanceIdentifier path,
-            final NormalizedNode<?, ?> data) {
-        return tx -> tx.merge(path, data);
-    }
-
-    public static final WriteTransactionCustomizer simpleDelete(final YangInstanceIdentifier path) {
-        return tx -> tx.delete(path);
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DefaultDataTreeChangeListenerTestSuite.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DefaultDataTreeChangeListenerTestSuite.java
deleted file mode 100644 (file)
index 2e92044..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import org.junit.Test;
-
-/**
- * Base template for a test suite for testing DataTreeChangeListener functionality.
- */
-public abstract class DefaultDataTreeChangeListenerTestSuite extends AbstractDataTreeChangeListenerTest {
-
-    protected static final String FOO_SIBLING = "foo-sibling";
-
-    @Test
-    public final void putTopLevelOneNested() throws Exception {
-
-        DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR));
-        putTopLevelOneNestedSetup(task);
-        task.run();
-        putTopLevelOneNestedVerify(task);
-    }
-
-    protected abstract void putTopLevelOneNestedSetup(DatastoreTestTask task);
-
-    protected abstract void putTopLevelOneNestedVerify(DatastoreTestTask task);
-
-    @Test
-    public final void existingTopWriteSibling() throws Exception {
-        DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
-            tx -> tx.write(path(FOO_SIBLING), topLevelList(FOO_SIBLING).build()));
-        existingTopWriteSiblingSetup(task);
-        task.run();
-        existingTopWriteSiblingVerify(task);
-    }
-
-    protected abstract void existingTopWriteSiblingSetup(DatastoreTestTask task);
-
-    protected abstract void existingTopWriteSiblingVerify(DatastoreTestTask task);
-
-    @Test
-    public final void existingTopWriteTwoNested() throws Exception {
-        DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
-            tx -> {
-                tx.write(path(FOO,BAR), nestedList(BAR).build());
-                tx.write(path(FOO,BAZ), nestedList(BAZ).build());
-            });
-        existingTopWriteTwoNestedSetup(task);
-        task.run();
-        existingTopWriteTwoNestedVerify(task);
-    }
-
-    protected abstract void existingTopWriteTwoNestedSetup(DatastoreTestTask task);
-
-    protected abstract void existingTopWriteTwoNestedVerify(DatastoreTestTask task);
-
-
-    @Test
-    public final void existingOneNestedWriteAdditionalNested() throws Exception {
-        DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
-            tx -> tx.write(path(FOO,BAZ), nestedList(BAZ).build()));
-        existingOneNestedWriteAdditionalNestedSetup(task);
-        task.run();
-        existingOneNestedWriteAdditionalNestedVerify(task);
-    }
-
-    protected abstract void existingOneNestedWriteAdditionalNestedSetup(DatastoreTestTask task);
-
-    protected abstract void existingOneNestedWriteAdditionalNestedVerify(DatastoreTestTask task);
-
-    @Test
-    public final void replaceTopLevelNestedChanged() throws Exception {
-        DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
-                writeOneTopMultipleNested(FOO, BAZ));
-        replaceTopLevelNestedSetup(task);
-        task.run();
-        replaceTopLevelNestedVerify(task);
-    }
-
-    protected abstract void replaceTopLevelNestedSetup(DatastoreTestTask task);
-
-    protected abstract void replaceTopLevelNestedVerify(DatastoreTestTask task);
-
-    @Test
-    public final void putTopLevelWithTwoNested() throws Exception {
-
-        DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR, BAZ));
-        putTopLevelWithTwoNestedSetup(task);
-        task.run();
-        putTopLevelWithTwoNestedVerify(task);
-    }
-
-    protected abstract void putTopLevelWithTwoNestedSetup(DatastoreTestTask task);
-
-    protected abstract void putTopLevelWithTwoNestedVerify(DatastoreTestTask task);
-
-    @Test
-    public final void twoNestedExistsOneIsDeleted() throws Exception {
-
-        DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR, BAZ)).test(
-                deleteNested(FOO, BAZ));
-        twoNestedExistsOneIsDeletedSetup(task);
-        task.run();
-        twoNestedExistsOneIsDeletedVerify(task);
-    }
-
-    protected abstract void twoNestedExistsOneIsDeletedSetup(DatastoreTestTask task);
-
-    protected abstract void twoNestedExistsOneIsDeletedVerify(DatastoreTestTask task);
-
-    @Test
-    public final void nestedListExistsRootDeleted() throws Exception {
-
-        DatastoreTestTask task = newTestTask().cleanup(null).setup(writeOneTopMultipleNested(FOO, BAR, BAZ))
-                .test(DatastoreTestTask.simpleDelete(TOP_LEVEL));
-        nestedListExistsRootDeletedSetup(task);
-        task.run();
-        nestedListExistsRootDeletedVerify(task);
-    }
-
-    protected abstract void nestedListExistsRootDeletedSetup(DatastoreTestTask task);
-
-    protected abstract void nestedListExistsRootDeletedVerify(DatastoreTestTask task);
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDataStoreTest.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDataStoreTest.java
deleted file mode 100644 (file)
index 4b80a16..0000000
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutionException;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedTransactions;
-import org.opendaylight.controller.sal.core.spi.data.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class InMemoryDataStoreTest {
-
-    private SchemaContext schemaContext;
-    private InMemoryDOMDataStore domStore;
-
-    @Before
-    public void setupStore() {
-        domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.newDirectExecutorService());
-        schemaContext = TestModel.createTestContext();
-        domStore.onGlobalContextUpdated(schemaContext);
-    }
-
-    @Test
-    public void testTransactionIsolation() throws InterruptedException, ExecutionException {
-
-        assertNotNull(domStore);
-
-        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        /*
-         * Writes /test in writeTx
-         */
-        NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        writeTx.write(TestModel.TEST_PATH, testNode);
-
-        /*
-         * Reads /test from writeTx Read should return container.
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx.read(TestModel.TEST_PATH);
-        assertEquals("read: isPresent", true, writeTxContainer.get().isPresent());
-        assertEquals("read: data", testNode, writeTxContainer.get().get());
-
-        /*
-         * Reads /test from readTx Read should return Absent.
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> readTxContainer = readTx.read(TestModel.TEST_PATH);
-        assertEquals("read: isPresent", false, readTxContainer.get().isPresent());
-    }
-
-    @Test
-    public void testTransactionCommit() throws InterruptedException, ExecutionException {
-
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        /*
-         * Writes /test in writeTx
-         */
-        NormalizedNode<?, ?> testNode = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-        writeTx.write(TestModel.TEST_PATH, testNode);
-
-        /*
-         * Reads /test from writeTx Read should return container.
-         */
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = writeTx.read(TestModel.TEST_PATH);
-        assertEquals("read: isPresent", true, writeTxContainer.get().isPresent());
-        assertEquals("read: data", testNode, writeTxContainer.get().get());
-
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-
-        assertThreePhaseCommit(cohort);
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH)
-                .get();
-        assertEquals("After commit read: isPresent", true, afterCommitRead.isPresent());
-        assertEquals("After commit read: data", testNode, afterCommitRead.get());
-    }
-
-    @Test
-    public void testDelete() throws Exception {
-
-        DOMStoreWriteTransaction writeTx = domStore.newWriteOnlyTransaction();
-        assertNotNull(writeTx);
-
-        // Write /test and commit
-
-        writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        assertThreePhaseCommit(writeTx.ready());
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH)
-                .get();
-        assertEquals("After commit read: isPresent", true, afterCommitRead.isPresent());
-
-        // Delete /test and verify
-
-        writeTx = domStore.newWriteOnlyTransaction();
-
-        writeTx.delete(TestModel.TEST_PATH);
-
-        assertThreePhaseCommit(writeTx.ready());
-
-        afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH).get();
-        assertEquals("After commit read: isPresent", false, afterCommitRead.isPresent());
-    }
-
-    @Test
-    public void testMerge() throws Exception {
-
-        DOMStoreWriteTransaction writeTx = domStore.newWriteOnlyTransaction();
-        assertNotNull(writeTx);
-
-        ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                .addChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-                        .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build())
-                .build();
-
-        writeTx.merge(TestModel.TEST_PATH, containerNode);
-
-        assertThreePhaseCommit(writeTx.ready());
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH)
-                .get();
-        assertEquals("After commit read: isPresent", true, afterCommitRead.isPresent());
-        assertEquals("After commit read: data", containerNode, afterCommitRead.get());
-
-        // Merge a new list entry node
-
-        writeTx = domStore.newWriteOnlyTransaction();
-        assertNotNull(writeTx);
-
-        containerNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                .addChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-                        .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1))
-                        .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 2)).build())
-                .build();
-
-        writeTx.merge(TestModel.TEST_PATH, containerNode);
-
-        assertThreePhaseCommit(writeTx.ready());
-
-        afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH).get();
-        assertEquals("After commit read: isPresent", true, afterCommitRead.isPresent());
-        assertEquals("After commit read: data", containerNode, afterCommitRead.get());
-    }
-
-
-    @Test
-    public void testExistsForExistingData() throws Exception {
-
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME))
-                .addChild(ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME)
-                        .addChild(ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, 1)).build())
-                .build();
-
-        writeTx.merge(TestModel.TEST_PATH, containerNode);
-
-        CheckedFuture<Boolean, ReadFailedException> exists =
-            writeTx.exists(TestModel.TEST_PATH);
-
-        assertEquals(true, exists.checkedGet());
-
-        DOMStoreThreePhaseCommitCohort ready = writeTx.ready();
-
-        ready.preCommit().get();
-
-        ready.commit().get();
-
-        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        exists =
-            readTx.exists(TestModel.TEST_PATH);
-
-        assertEquals(true, exists.checkedGet());
-    }
-
-    @Test
-    public void testExistsForNonExistingData() throws Exception {
-
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        CheckedFuture<Boolean, ReadFailedException> exists =
-            writeTx.exists(TestModel.TEST_PATH);
-
-        assertEquals(false, exists.checkedGet());
-
-        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        exists =
-            readTx.exists(TestModel.TEST_PATH);
-
-        assertEquals(false, exists.checkedGet());
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testExistsThrowsReadFailedException() throws Exception {
-
-        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        readTx.close();
-
-        readTx.exists(TestModel.TEST_PATH).checkedGet();
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithReadOnlyTransactionClosed() throws Exception {
-
-        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
-        assertNotNull(readTx);
-
-        readTx.close();
-
-        doReadAndThrowEx(readTx);
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithReadOnlyTransactionFailure() throws Exception {
-
-        DataTreeSnapshot mockSnapshot = Mockito.mock(DataTreeSnapshot.class);
-        Mockito.doThrow(new RuntimeException("mock ex")).when(mockSnapshot)
-                .readNode(Mockito.any(YangInstanceIdentifier.class));
-
-        DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadTransaction("1", true, mockSnapshot);
-
-        doReadAndThrowEx(readTx);
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithReadWriteTransactionClosed() throws Exception {
-
-        DOMStoreReadTransaction readTx = domStore.newReadWriteTransaction();
-        assertNotNull(readTx);
-
-        readTx.close();
-
-        doReadAndThrowEx(readTx);
-    }
-
-    @Test(expected = ReadFailedException.class)
-    public void testReadWithReadWriteTransactionFailure() throws Exception {
-
-        DataTreeSnapshot mockSnapshot = Mockito.mock(DataTreeSnapshot.class);
-        DataTreeModification mockModification = Mockito.mock(DataTreeModification.class);
-        Mockito.doThrow(new RuntimeException("mock ex")).when(mockModification)
-                .readNode(Mockito.any(YangInstanceIdentifier.class));
-        Mockito.doReturn(mockModification).when(mockSnapshot).newModification();
-        @SuppressWarnings("unchecked")
-        TransactionReadyPrototype<String> mockReady = Mockito.mock(TransactionReadyPrototype.class);
-        DOMStoreReadTransaction readTx = SnapshotBackedTransactions.newReadWriteTransaction("1", false, mockSnapshot,
-                mockReady);
-
-        doReadAndThrowEx(readTx);
-    }
-
-    private static void doReadAndThrowEx(final DOMStoreReadTransaction readTx) throws ReadFailedException  {
-        readTx.read(TestModel.TEST_PATH).checkedGet();
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testWriteWithTransactionReady() {
-
-        DOMStoreWriteTransaction writeTx = domStore.newWriteOnlyTransaction();
-
-        writeTx.ready();
-
-        // Should throw ex
-        writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-    }
-
-    @Test(expected = IllegalStateException.class)
-    public void testReadyWithTransactionAlreadyReady() {
-
-        DOMStoreWriteTransaction writeTx = domStore.newWriteOnlyTransaction();
-
-        writeTx.ready();
-
-        // Should throw ex
-        writeTx.ready();
-    }
-
-    @Test
-    public void testReadyWithMissingMandatoryData() throws InterruptedException {
-        DOMStoreWriteTransaction writeTx = domStore.newWriteOnlyTransaction();
-        NormalizedNode<?, ?> testNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(TestModel.MANDATORY_DATA_TEST_QNAME))
-                .addChild(ImmutableNodes.leafNode(TestModel.OPTIONAL_QNAME, "data"))
-                .build();
-        writeTx.write(TestModel.MANDATORY_DATA_TEST_PATH, testNode);
-        DOMStoreThreePhaseCommitCohort ready = writeTx.ready();
-        try {
-            ready.canCommit().get();
-            Assert.fail("Expected exception on canCommit");
-        } catch (ExecutionException e) {
-            // nop
-        }
-    }
-
-    @Test
-    public void testTransactionAbort() throws InterruptedException, ExecutionException {
-
-        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        assertTestContainerWrite(writeTx);
-
-        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
-
-        assertTrue(cohort.canCommit().get().booleanValue());
-        cohort.preCommit().get();
-        cohort.abort().get();
-
-        Optional<NormalizedNode<?, ?>> afterCommitRead = domStore.newReadOnlyTransaction().read(TestModel.TEST_PATH)
-                .get();
-        assertFalse(afterCommitRead.isPresent());
-    }
-
-    @Test
-    public void testTransactionChain() throws InterruptedException, ExecutionException {
-        DOMStoreTransactionChain txChain = domStore.createTransactionChain();
-        assertNotNull(txChain);
-
-        /*
-         * We alocate new read-write transaction and write /test
-         */
-        DOMStoreReadWriteTransaction firstTx = txChain.newReadWriteTransaction();
-        assertTestContainerWrite(firstTx);
-
-        /*
-         * First transaction is marked as ready, we are able to allocate chained
-         * transactions
-         */
-        final DOMStoreThreePhaseCommitCohort firstWriteTxCohort = firstTx.ready();
-
-        /*
-         * We alocate chained transaction - read transaction, note first one is
-         * still not commited to datastore.
-         */
-        DOMStoreReadTransaction secondReadTx = txChain.newReadOnlyTransaction();
-
-        /*
-         * We test if we are able to read data from tx, read should not fail
-         * since we are using chained transaction.
-         */
-        assertTestContainerExists(secondReadTx);
-
-        /*
-         * We alocate next transaction, which is still based on first one, but
-         * is read-write.
-         */
-        DOMStoreReadWriteTransaction thirdDeleteTx = txChain.newReadWriteTransaction();
-
-        /*
-         * We test existence of /test in third transaction container should
-         * still be visible from first one (which is still uncommmited).
-         */
-        assertTestContainerExists(thirdDeleteTx);
-
-        /*
-         * We delete node in third transaction
-         */
-        thirdDeleteTx.delete(TestModel.TEST_PATH);
-
-        /*
-         * third transaction is sealed.
-         */
-        DOMStoreThreePhaseCommitCohort thirdDeleteTxCohort = thirdDeleteTx.ready();
-
-        /*
-         * We commit first transaction
-         */
-        assertThreePhaseCommit(firstWriteTxCohort);
-
-        // Alocates store transacion
-        DOMStoreReadTransaction storeReadTx = domStore.newReadOnlyTransaction();
-
-        /*
-         * We verify transaction is commited to store, container should exists
-         * in datastore.
-         */
-        assertTestContainerExists(storeReadTx);
-
-        /*
-         * We commit third transaction
-         */
-        assertThreePhaseCommit(thirdDeleteTxCohort);
-    }
-
-    @Test
-    @Ignore
-    public void testTransactionConflict() throws InterruptedException, ExecutionException {
-        DOMStoreReadWriteTransaction txOne = domStore.newReadWriteTransaction();
-        DOMStoreReadWriteTransaction txTwo = domStore.newReadWriteTransaction();
-        assertTestContainerWrite(txOne);
-        assertTestContainerWrite(txTwo);
-
-        /*
-         * Commits transaction
-         */
-        assertThreePhaseCommit(txOne.ready());
-
-        /*
-         * Asserts that txTwo could not be commited
-         */
-        assertFalse(txTwo.ready().canCommit().get());
-    }
-
-    private static void assertThreePhaseCommit(final DOMStoreThreePhaseCommitCohort cohort)
-            throws InterruptedException, ExecutionException {
-        assertTrue(cohort.canCommit().get().booleanValue());
-        cohort.preCommit().get();
-        cohort.commit().get();
-    }
-
-    private static Optional<NormalizedNode<?, ?>> assertTestContainerWrite(final DOMStoreReadWriteTransaction writeTx)
-            throws InterruptedException, ExecutionException {
-        /*
-         * Writes /test in writeTx
-         */
-        writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
-
-        return assertTestContainerExists(writeTx);
-    }
-
-    /**
-     * Reads /test from readTx Read should return container.
-     */
-    private static Optional<NormalizedNode<?, ?>> assertTestContainerExists(final DOMStoreReadTransaction readTx)
-            throws InterruptedException, ExecutionException {
-
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> writeTxContainer = readTx.read(TestModel.TEST_PATH);
-        assertTrue(writeTxContainer.get().isPresent());
-        return writeTxContainer.get();
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/RootScopeSubtreeTest.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/RootScopeSubtreeTest.java
deleted file mode 100644 (file)
index c5af406..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.added;
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.deleted;
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.replaced;
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.subtreeModified;
-
-public class RootScopeSubtreeTest extends DefaultDataTreeChangeListenerTestSuite {
-
-    @Override
-    protected void putTopLevelOneNestedSetup(final DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL));
-    }
-
-    @Override
-    protected void putTopLevelOneNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void replaceTopLevelNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), replaced(TOP_LEVEL));
-    }
-
-    @Override
-    protected void replaceTopLevelNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void putTopLevelWithTwoNestedSetup(final DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL));
-    }
-
-    @Override
-    protected void putTopLevelWithTwoNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void twoNestedExistsOneIsDeletedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), subtreeModified(TOP_LEVEL));
-    }
-
-    @Override
-    protected void twoNestedExistsOneIsDeletedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void nestedListExistsRootDeletedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), deleted(TOP_LEVEL));
-    }
-
-    @Override
-    protected void nestedListExistsRootDeletedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingOneNestedWriteAdditionalNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), subtreeModified(TOP_LEVEL));
-    }
-
-    @Override
-    protected void existingOneNestedWriteAdditionalNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingTopWriteTwoNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), subtreeModified(TOP_LEVEL));
-    }
-
-    @Override
-    protected void existingTopWriteTwoNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingTopWriteSiblingSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL, added(TOP_LEVEL), subtreeModified(TOP_LEVEL));
-    }
-
-    @Override
-    protected void existingTopWriteSiblingVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/SchemaUpdateForTransactionTest.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/SchemaUpdateForTransactionTest.java
deleted file mode 100644 (file)
index 3054e93..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import static org.junit.Assert.assertNotNull;
-
-import com.google.common.util.concurrent.MoreExecutors;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.mdsal.binding.generator.impl.ModuleInfoBackedContext;
-import org.opendaylight.mdsal.binding.spec.reflect.BindingReflections;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.bi.ba.rpcservice.rev140701.RockTheHouseInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
-import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-public class SchemaUpdateForTransactionTest {
-
-    private static final YangInstanceIdentifier TOP_PATH = YangInstanceIdentifier.of(Top.QNAME);
-    private SchemaContext schemaContext;
-    private InMemoryDOMDataStore domStore;
-
-    @Before
-    public void setupStore() throws Exception {
-        this.domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.newDirectExecutorService());
-        loadSchemas(RockTheHouseInput.class);
-    }
-
-    public void loadSchemas(final Class<?>... classes) throws Exception {
-        final ModuleInfoBackedContext context = ModuleInfoBackedContext.create();
-        for (final Class<?> clz : classes) {
-            YangModuleInfo moduleInfo = BindingReflections.getModuleInfo(clz);
-
-            context.registerModuleInfo(moduleInfo);
-        }
-        this.schemaContext = context.tryToCreateSchemaContext().get();
-        this.domStore.onGlobalContextUpdated(this.schemaContext);
-    }
-
-    /**
-     * Test suite tests allocating transaction when schema context
-     * does not contain module necessary for client write,
-     * then triggering update of global schema context
-     * and then performing write (according to new module).
-     *
-     * <p>
-     * If transaction between allocation and schema context was
-     * unmodified, it is safe to change its schema context
-     * to new one (e.g. it will be same as if allocated after
-     * schema context update.)
-     */
-    @Test
-    public void testTransactionSchemaUpdate() throws Exception {
-
-        assertNotNull(this.domStore);
-
-        // We allocate transaction, initial schema context does not
-        // contain Lists model
-        final DOMStoreReadWriteTransaction writeTx = this.domStore.newReadWriteTransaction();
-        assertNotNull(writeTx);
-
-        // we trigger schema context update to contain Lists model
-        loadSchemas(RockTheHouseInput.class, Top.class);
-
-        /*
-         * Writes /test in writeTx, this write should not fail
-         * with IllegalArgumentException since /test is in
-         * schema context.
-         */
-        writeTx.write(TOP_PATH, ImmutableNodes.containerNode(Top.QNAME));
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java
deleted file mode 100644 (file)
index cbeff79..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-import java.util.concurrent.ExecutorService;
-
-/**
- * A forwarding Executor used by unit tests for DataChangeListener notifications.
- *
- * @author Thomas Pantelis
- */
-public class TestDCLExecutorService extends ForwardingExecutorService {
-
-    // Start with a same thread executor to avoid timing issues during test setup.
-    private volatile ExecutorService currentExecutor = MoreExecutors.newDirectExecutorService();
-
-    // The real executor to use when test setup is complete.
-    private final ExecutorService postSetupExecutor;
-
-
-    public TestDCLExecutorService(final ExecutorService postSetupExecutor) {
-        this.postSetupExecutor = postSetupExecutor;
-    }
-
-    @Override
-    protected ExecutorService delegate() {
-        return currentExecutor;
-    }
-
-    public void afterTestSetup() {
-        // Test setup complete - switch to the real executor.
-        currentExecutor = postSetupExecutor;
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestModel.java
deleted file mode 100644 (file)
index 8e36a8c..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
-
-public final class TestModel {
-
-    public static final QName TEST_QNAME =
-            QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13", "test");
-    public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
-    public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
-    public static final QName OUTER_CHOICE_QNAME = QName.create(TEST_QNAME, "outer-choice");
-    public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
-    public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
-    public static final QName VALUE_QNAME = QName.create(TEST_QNAME, "value");
-    public static final QName TWO_QNAME = QName.create(TEST_QNAME, "two");
-    public static final QName THREE_QNAME = QName.create(TEST_QNAME, "three");
-    public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
-    public static final YangInstanceIdentifier OUTER_LIST_PATH =
-            YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
-
-    public static final QName MANDATORY_DATA_TEST_QNAME =
-            QName.create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test",
-                    "2014-03-13",
-                    "mandatory-data-test");
-    public static final QName OPTIONAL_QNAME = QName.create(MANDATORY_DATA_TEST_QNAME, "optional-data");
-    public static final QName MANDATORY_QNAME = QName.create(MANDATORY_DATA_TEST_QNAME, "mandatory-data");
-    public static final YangInstanceIdentifier MANDATORY_DATA_TEST_PATH =
-            YangInstanceIdentifier.of(MANDATORY_DATA_TEST_QNAME);
-
-    private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
-
-    private TestModel() {
-    }
-
-    public static SchemaContext createTestContext() {
-        return YangParserTestUtils.parseYangResource(DATASTORE_TEST_YANG);
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeBaseTest.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeBaseTest.java
deleted file mode 100644 (file)
index 23c35f3..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.added;
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.deleted;
-import static org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.replaced;
-
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class WildcardedScopeBaseTest extends DefaultDataTreeChangeListenerTestSuite {
-
-    private static final YangInstanceIdentifier TOP_LEVEL_LIST_ALL = TOP_LEVEL.node(TopLevelList.QNAME).node(
-            TopLevelList.QNAME);
-
-    @Override
-    protected void putTopLevelOneNestedSetup(final DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)));
-    }
-
-    @Override
-    protected void putTopLevelOneNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void replaceTopLevelNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)), replaced(path(FOO)));
-    }
-
-    @Override
-    protected void replaceTopLevelNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void putTopLevelWithTwoNestedSetup(final DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)));
-    }
-
-    @Override
-    protected void putTopLevelWithTwoNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void twoNestedExistsOneIsDeletedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)));
-    }
-
-    @Override
-    protected void twoNestedExistsOneIsDeletedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void nestedListExistsRootDeletedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)), deleted(path(FOO)));
-    }
-
-    @Override
-    protected void nestedListExistsRootDeletedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingOneNestedWriteAdditionalNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)));
-    }
-
-    @Override
-    protected void existingOneNestedWriteAdditionalNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingTopWriteTwoNestedSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)));
-    }
-
-    @Override
-    protected void existingTopWriteTwoNestedVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-
-    @Override
-    protected void existingTopWriteSiblingSetup(DatastoreTestTask task) {
-        task.changeListener(TOP_LEVEL_LIST_ALL, added(path(FOO)), added(path(FOO_SIBLING)));
-    }
-
-    @Override
-    protected void existingTopWriteSiblingVerify(final DatastoreTestTask task) {
-        task.verifyChangeEvents();
-    }
-}
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/resources/odl-datastore-test.yang b/opendaylight/md-sal/sal-inmemory-datastore/src/test/resources/odl-datastore-test.yang
deleted file mode 100644 (file)
index ffc8fe0..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-module odl-datastore-test {
-    yang-version 1;
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
-    prefix "store-test";
-    
-    revision "2014-03-13" {
-        description "Initial revision.";
-    }
-
-    container test {
-        presence "needs to be present when empty";
-
-        list outer-list {
-            key id;
-            leaf id {
-                type uint16;
-            }
-            choice outer-choice {
-                case one {
-                    leaf one {
-                        type string;
-                    }
-                }
-                case two-three {
-                    leaf two {
-                        type string;
-                    }
-                    leaf three {
-                        type string;
-                    }
-               }
-           }
-           list inner-list {
-                key name;
-                leaf name {
-                    type string;
-                }
-                leaf value {
-                    type string;
-                }
-            }
-        }
-    }
-
-    container mandatory-data-test {
-        presence "needs to be present when empty";
-
-        leaf optional-data {
-            type string;
-        }
-        leaf mandatory-data {
-            type string;
-            mandatory true;
-        }
-    }
-}
index 12be41506f8d276446f93b9497a03951b616e82f..c98a44d8eb5f99d0c55f7c27bf8f63928f64089a 100644 (file)
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>sal-remoterpc-connector</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-actor_2.12</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-cluster_2.12</artifactId>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-remote_2.12</artifactId>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-testkit_2.12</artifactId>
-            <scope>test</scope>
+            <groupId>com.typesafe</groupId>
+            <artifactId>config</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-osgi_2.12</artifactId>
+            <groupId>org.eclipse.jdt</groupId>
+            <artifactId>org.eclipse.jdt.annotation</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-slf4j_2.12</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>concepts</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.typesafe.akka</groupId>
-            <artifactId>akka-persistence_2.12</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
         </dependency>
-        <!-- SAL Dependencies -->
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-util</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-           <artifactId>mdsal-dom-api</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-codec-binfmt</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.mdsal</groupId>
-           <artifactId>mdsal-dom-spi</artifactId>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-model-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-impl</artifactId>
+            <artifactId>repackaged-akka</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-clustering-commons</artifactId>
+            <artifactId>sal-common-util</artifactId>
         </dependency>
-        <!-- Yang tools-->
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-api</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-common-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-model-api</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-dom-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-data-impl</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+           <artifactId>mdsal-dom-spi</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.yangtools</groupId>
-            <artifactId>yang-common</artifactId>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-clustering-commons</artifactId>
         </dependency>
         <dependency>
             <groupId>org.osgi</groupId>
-            <artifactId>org.osgi.core</artifactId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.slf4j</groupId>
-            <artifactId>slf4j-api</artifactId>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.metatype.annotations</artifactId>
         </dependency>
         <dependency>
             <groupId>org.scala-lang</groupId>
             <artifactId>scala-library</artifactId>
         </dependency>
+
         <!-- Test Dependencies -->
         <dependency>
-            <groupId>org.mockito</groupId>
-            <artifactId>mockito-core</artifactId>
-            <scope>test</scope>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-testkit_2.13</artifactId>
         </dependency>
         <dependency>
             <groupId>org.slf4j</groupId>
             <version>1.0</version>
             <scope>test</scope>
         </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+                       <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>util</artifactId>
+                       <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-impl</artifactId>
+                       <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.yangtools</groupId>
             <artifactId>yang-test-util</artifactId>
             <type>test-jar</type>
             <scope>test</scope>
         </dependency>
-        <dependency>
-            <groupId>commons-lang</groupId>
-            <artifactId>commons-lang</artifactId>
-            <scope>test</scope>
-        </dependency>
     </dependencies>
 
     <build>
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteFuture.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteFuture.java
new file mode 100644 (file)
index 0000000..7fd8598
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.dispatch.OnComplete;
+import com.google.common.util.concurrent.AbstractFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.eclipse.jdt.annotation.Nullable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.ExecutionContext;
+import scala.concurrent.Future;
+
+abstract class AbstractRemoteFuture<T, O, E extends Exception> extends AbstractFuture<O> {
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractRemoteFuture.class);
+
+    private final @NonNull T type;
+
+    AbstractRemoteFuture(final @NonNull T type, final Future<Object> requestFuture) {
+        this.type = requireNonNull(type);
+        requestFuture.onComplete(new FutureUpdater(), ExecutionContext.Implicits$.MODULE$.global());
+    }
+
+    @Override
+    public final O get() throws InterruptedException, ExecutionException {
+        try {
+            return super.get();
+        } catch (ExecutionException e) {
+            throw mapException(e);
+        }
+    }
+
+    @Override
+    public final O get(final long timeout, final TimeUnit unit)
+            throws InterruptedException, ExecutionException, TimeoutException {
+        try {
+            return super.get(timeout, unit);
+        } catch (final ExecutionException e) {
+            throw mapException(e);
+        }
+    }
+
+    @Override
+    protected final boolean set(final O value) {
+        final boolean ret = super.set(value);
+        if (ret) {
+            LOG.debug("Future {} for action {} successfully completed", this, type);
+        }
+        return ret;
+    }
+
+    final void failNow(final Throwable error) {
+        LOG.debug("Failing future {} for operation {}", this, type, error);
+        setException(error);
+    }
+
+    abstract @Nullable O processReply(Object reply);
+
+    abstract @NonNull Class<E> exceptionClass();
+
+    abstract @NonNull E wrapCause(Throwable cause);
+
+    private ExecutionException mapException(final ExecutionException ex) {
+        final Throwable cause = ex.getCause();
+        return exceptionClass().isInstance(cause) ? ex : new ExecutionException(ex.getMessage(), wrapCause(cause));
+    }
+
+    private final class FutureUpdater extends OnComplete<Object> {
+        @Override
+        public void onComplete(final Throwable error, final Object reply) {
+            if (error == null) {
+                final O result = processReply(reply);
+                if (result != null) {
+                    LOG.debug("Received response for operation {}: result is {}", type, result);
+                    set(result);
+                } else {
+                    failNow(new IllegalStateException("Incorrect reply type " + reply + " from Akka"));
+                }
+            } else {
+                failNow(error);
+            }
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteImplementation.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractRemoteImplementation.java
new file mode 100644 (file)
index 0000000..838252b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.pattern.Patterns;
+import akka.util.Timeout;
+import org.opendaylight.controller.remote.rpc.messages.AbstractExecute;
+import scala.concurrent.Future;
+
+/**
+ * An abstract base class for remote RPC/action implementations.
+ */
+abstract class AbstractRemoteImplementation<T extends AbstractExecute<?, ?>> {
+    // 0 for local, 1 for binding, 2 for remote
+    static final long COST = 2;
+
+    private final ActorRef remoteInvoker;
+    private final Timeout askDuration;
+
+    AbstractRemoteImplementation(final ActorRef remoteInvoker, final RemoteOpsProviderConfig config) {
+        this.remoteInvoker = requireNonNull(remoteInvoker);
+        this.askDuration = config.getAskDuration();
+    }
+
+    final Future<Object> ask(final T message) {
+        return Patterns.ask(remoteInvoker, requireNonNull(message), askDuration);
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OSGiRemoteOpsProvider.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OSGiRemoteOpsProvider.java
new file mode 100644 (file)
index 0000000..9de0152
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2020 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import org.opendaylight.controller.cluster.ActorSystemProvider;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@Component(configurationPid = "org.opendaylight.controller.remoterpc")
+@Designate(ocd = OSGiRemoteOpsProvider.Config.class)
+public final class OSGiRemoteOpsProvider {
+    @ObjectClassDefinition()
+    public @interface Config {
+        @AttributeDefinition(name = "enable-metric-capture")
+        boolean metricCapture() default true;
+        @AttributeDefinition(name = "bounded-mailbox-capacity")
+        int boundedMailboxCapacity() default 1000;
+    }
+
+    private static final Logger LOG = LoggerFactory.getLogger(OSGiRemoteOpsProvider.class);
+
+    private ActorRef opsManager;
+
+    @Activate
+    public OSGiRemoteOpsProvider(@Reference final ActorSystemProvider actorSystemProvider,
+            @Reference final DOMRpcProviderService rpcProviderService, @Reference final DOMRpcService rpcService,
+            @Reference final DOMActionProviderService actionProviderService,
+            @Reference final DOMActionService actionService, final Config config) {
+        LOG.info("Remote Operations service starting");
+        final ActorSystem actorSystem = actorSystemProvider.getActorSystem();
+        final RemoteOpsProviderConfig opsConfig = RemoteOpsProviderConfig.newInstance(actorSystem.name(),
+            config.metricCapture(), config.boundedMailboxCapacity());
+
+        opsManager = actorSystem.actorOf(OpsManager.props(rpcProviderService, rpcService, opsConfig,
+                actionProviderService, actionService), opsConfig.getRpcManagerName());
+        LOG.debug("Ops Manager started at {}", opsManager);
+        LOG.info("Remote Operations service started");
+    }
+
+    @Deactivate
+    void deactivate() {
+        LOG.info("Remote Operations service stopping");
+        LOG.debug("Stopping Ops Manager at {}", opsManager);
+        opsManager.tell(PoisonPill.getInstance(), ActorRef.noSender());
+        opsManager = null;
+        LOG.info("Remote Operations services stopped");
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsInvoker.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsInvoker.java
new file mode 100644 (file)
index 0000000..dcb930e
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.actor.Status.Failure;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collection;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.remote.rpc.messages.ActionResponse;
+import org.opendaylight.controller.remote.rpc.messages.ExecuteAction;
+import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
+import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
+import org.opendaylight.mdsal.dom.api.DOMActionResult;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+
+/**
+ * Actor receiving invocation requests from remote nodes, routing them to
+ * {@link DOMRpcService#invokeRpc(SchemaPath, NormalizedNode)} and
+ * {@link DOMActionService#invokeAction(SchemaPath, DOMDataTreeIdentifier, ContainerNode)}.
+ *
+ * <p>
+ * Note that while the two interfaces are very similar, invocation strategies are slightly different due to historic
+ * behavior of RPCs:
+ * <ul>
+ *   <li>RPCs allow both null input and output, and this is passed to the infrastructure. Furthermore any invocation
+ *       which results in errors being reported drops the output content, even if it is present -- which is wrong, as
+ *       'errors' in this case can also be just warnings.</li>
+ *   <li>Actions do not allow null input, but allow null output. If the output is present, it is passed along with any
+ *       errors reported.</li>
+ * </ul>
+ */
+final class OpsInvoker extends AbstractUntypedActor {
+    private final DOMRpcService rpcService;
+    private final DOMActionService actionService;
+
+    private OpsInvoker(final DOMRpcService rpcService, final DOMActionService actionService) {
+        this.rpcService = requireNonNull(rpcService);
+        this.actionService = requireNonNull(actionService);
+    }
+
+    public static Props props(final DOMRpcService rpcService, final DOMActionService actionService) {
+        return Props.create(OpsInvoker.class,
+            requireNonNull(rpcService, "DOMRpcService can not be null"),
+            requireNonNull(actionService, "DOMActionService can not be null"));
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        if (message instanceof ExecuteRpc executeRpc) {
+            LOG.debug("Handling ExecuteOps Message");
+            execute(executeRpc);
+        } else if (message instanceof ExecuteAction executeAction) {
+            execute(executeAction);
+        } else {
+            unknownMessage(message);
+        }
+    }
+
+    @SuppressWarnings("checkstyle:IllegalCatch")
+    private void execute(final ExecuteRpc msg) {
+        LOG.debug("Executing RPC {}", msg.getType());
+        final ActorRef sender = getSender();
+
+        final ListenableFuture<? extends DOMRpcResult> future;
+        try {
+            future = rpcService.invokeRpc(msg.getType(), msg.getInput());
+        } catch (final RuntimeException e) {
+            LOG.debug("Failed to invoke RPC {}", msg.getType(), e);
+            sender.tell(new Failure(e), self());
+            return;
+        }
+
+        Futures.addCallback(future, new AbstractCallback<QName, DOMRpcResult>(getSender(), msg.getType()) {
+            @Override
+            Object nullResponse(final QName type) {
+                LOG.warn("Execution of {} resulted in null result", type);
+                return new RpcResponse(null);
+            }
+
+            @Override
+            Object response(final QName type, final DOMRpcResult result) {
+                final Collection<? extends RpcError> errors = result.errors();
+                return errors.isEmpty() ? new RpcResponse(result.value())
+                        // This is legacy (wrong) behavior, which ignores the fact that errors may be just warnings,
+                        // discarding any output
+                        : new Failure(new RpcErrorsException(String.format("Execution of rpc %s failed", type),
+                            errors));
+            }
+        }, MoreExecutors.directExecutor());
+    }
+
+    @SuppressWarnings("checkstyle:IllegalCatch")
+    private void execute(final ExecuteAction msg) {
+        LOG.debug("Executing Action {}", msg.getType());
+
+        final ActorRef sender = getSender();
+
+        final ListenableFuture<? extends DOMActionResult> future;
+        try {
+            future = actionService.invokeAction(msg.getType(), msg.getPath(), msg.getInput());
+        } catch (final RuntimeException e) {
+            LOG.debug("Failed to invoke action {}", msg.getType(), e);
+            sender.tell(new Failure(e), self());
+            return;
+        }
+
+        Futures.addCallback(future, new AbstractCallback<Absolute, DOMActionResult>(getSender(), msg.getType()) {
+            @Override
+            Object nullResponse(final Absolute type) {
+                throw new IllegalStateException("Null invocation result of action " + type);
+            }
+
+            @Override
+            Object response(final Absolute type, final DOMActionResult result) {
+                final Collection<? extends RpcError> errors = result.getErrors();
+                return errors.isEmpty() ? new ActionResponse(result.getOutput(), result.getErrors())
+                    // This is legacy (wrong) behavior, which ignores the fact that errors may be just warnings,
+                    // discarding any output
+                    : new Failure(new RpcErrorsException(String.format("Execution of action %s failed", type),
+                        errors));
+            }
+        }, MoreExecutors.directExecutor());
+    }
+
+    private abstract class AbstractCallback<T, R> implements FutureCallback<R> {
+        private final ActorRef replyTo;
+        private final T type;
+
+        AbstractCallback(final ActorRef replyTo, final T type) {
+            this.replyTo = requireNonNull(replyTo);
+            this.type = requireNonNull(type);
+        }
+
+        @Override
+        public final void onSuccess(final R result) {
+            final Object response;
+            if (result == null) {
+                // This shouldn't happen but the FutureCallback annotates the result param with Nullable so handle null
+                // here to avoid FindBugs warning.
+                response = nullResponse(type);
+            } else {
+                response = response(type, result);
+            }
+
+            LOG.debug("Sending response for execution of {} : {}", type, response);
+            replyTo.tell(response, self());
+        }
+
+        @Override
+        public final void onFailure(final Throwable failure) {
+            LOG.debug("Failed to execute operation {}", type, failure);
+            LOG.error("Failed to execute operation {} due to {}. More details are available on DEBUG level.", type,
+                Throwables.getRootCause(failure).getMessage());
+            replyTo.tell(new Failure(failure), self());
+        }
+
+        abstract @NonNull Object nullResponse(@NonNull T type);
+
+        abstract @NonNull Object response(@NonNull T type, @NonNull R result);
+    }
+}
@@ -12,9 +12,14 @@ import static java.util.Objects.requireNonNull;
 
 import akka.actor.ActorRef;
 import java.util.Collection;
+import java.util.Set;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import org.opendaylight.mdsal.dom.api.DOMActionAvailabilityExtension;
+import org.opendaylight.mdsal.dom.api.DOMActionImplementation;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
 import org.opendaylight.mdsal.dom.api.DOMRpcAvailabilityListener;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
@@ -22,17 +27,20 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * A {@link DOMRpcAvailabilityListener} reacting to RPC implementations different than {@link RemoteRpcImplementation}.
- * The knowledge of such implementations is forwarded to {@link RpcRegistry}, which is responsible for advertising
- * their presence to other nodes.
+ * A {@link DOMRpcAvailabilityListener} reacting to RPC implementations different than
+ * {@link RemoteRpcImplementation} or {@link RemoteActionImplementation}.
+ * The knowledge of such implementations is forwarded to {@link RpcRegistry} and {@link ActionRegistry},
+ * which is responsible for advertising their presence to other nodes.
  */
-final class RpcListener implements DOMRpcAvailabilityListener {
-    private static final Logger LOG = LoggerFactory.getLogger(RpcListener.class);
+final class OpsListener implements DOMRpcAvailabilityListener, DOMActionAvailabilityExtension.AvailabilityListener {
+    private static final Logger LOG = LoggerFactory.getLogger(OpsListener.class);
 
     private final ActorRef rpcRegistry;
+    private final ActorRef actionRegistry;
 
-    RpcListener(final ActorRef rpcRegistry) {
+    OpsListener(final ActorRef rpcRegistry, final ActorRef actionRegistry) {
         this.rpcRegistry = requireNonNull(rpcRegistry);
+        this.actionRegistry = requireNonNull(actionRegistry);
     }
 
     @Override
@@ -55,4 +63,16 @@ final class RpcListener implements DOMRpcAvailabilityListener {
     public boolean acceptsImplementation(final DOMRpcImplementation impl) {
         return !(impl instanceof RemoteRpcImplementation);
     }
+
+    @Override
+    public boolean acceptsImplementation(final DOMActionImplementation impl) {
+        return !(impl instanceof RemoteActionImplementation);
+    }
+
+    @Override
+    public void onActionsChanged(final Set<DOMActionInstance> removed, final Set<DOMActionInstance> added) {
+        LOG.debug("adding registration for [{}]", added);
+        LOG.debug("removing registration for [{}]", removed);
+        actionRegistry.tell(new ActionRegistry.Messages.UpdateActions(added, removed), ActorRef.noSender());
+    }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsManager.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsManager.java
new file mode 100644 (file)
index 0000000..ee0adbc
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.OneForOneStrategy;
+import akka.actor.Props;
+import akka.actor.SupervisorStrategy;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.concepts.Registration;
+import scala.concurrent.duration.FiniteDuration;
+
+/**
+ * This class acts as a supervisor, creates all the actors, resumes them, if an exception is thrown. It also registers
+ * {@link OpsListener} with the local {@link DOMRpcService}.
+ */
+public class OpsManager extends AbstractUntypedActor {
+    private final DOMRpcProviderService rpcProvisionRegistry;
+    private final RemoteOpsProviderConfig config;
+    private final DOMRpcService rpcServices;
+    private final DOMActionProviderService actionProvisionRegistry;
+    private final DOMActionService actionService;
+
+    private Registration listenerReg;
+    private ActorRef opsInvoker;
+    private ActorRef actionRegistry;
+    private ActorRef rpcRegistry;
+    private ActorRef opsRegistrar;
+
+    OpsManager(final DOMRpcProviderService rpcProvisionRegistry, final DOMRpcService rpcServices,
+               final RemoteOpsProviderConfig config, final DOMActionProviderService actionProviderService,
+               final DOMActionService actionService) {
+        this.rpcProvisionRegistry = requireNonNull(rpcProvisionRegistry);
+        this.rpcServices = requireNonNull(rpcServices);
+        this.config = requireNonNull(config);
+        actionProvisionRegistry = requireNonNull(actionProviderService);
+        this.actionService = requireNonNull(actionService);
+    }
+
+    public static Props props(final DOMRpcProviderService rpcProvisionRegistry, final DOMRpcService rpcServices,
+                              final RemoteOpsProviderConfig config,
+                              final DOMActionProviderService actionProviderService,
+                              final DOMActionService actionService) {
+        requireNonNull(rpcProvisionRegistry, "RpcProviderService can not be null!");
+        requireNonNull(rpcServices, "RpcService can not be null!");
+        requireNonNull(config, "RemoteOpsProviderConfig can not be null!");
+        requireNonNull(actionProviderService, "ActionProviderService can not be null!");
+        requireNonNull(actionService, "ActionService can not be null!");
+        return Props.create(OpsManager.class, rpcProvisionRegistry, rpcServices, config,
+                actionProviderService, actionService);
+    }
+
+    @Override
+    public void preStart() throws Exception {
+        super.preStart();
+
+        opsInvoker = getContext().actorOf(OpsInvoker.props(rpcServices, actionService)
+                .withMailbox(config.getMailBoxName()), config.getRpcBrokerName());
+        LOG.debug("Listening for RPC invocation requests with {}", opsInvoker);
+
+        opsRegistrar = getContext().actorOf(OpsRegistrar.props(config, rpcProvisionRegistry, actionProvisionRegistry)
+                .withMailbox(config.getMailBoxName()), config.getRpcRegistrarName());
+        LOG.debug("Registering remote RPCs with {}", opsRegistrar);
+
+        rpcRegistry = getContext().actorOf(RpcRegistry.props(config, opsInvoker, opsRegistrar)
+                .withMailbox(config.getMailBoxName()), config.getRpcRegistryName());
+        LOG.debug("Propagating RPC information with {}", rpcRegistry);
+
+        actionRegistry = getContext().actorOf(ActionRegistry.props(config, opsInvoker, opsRegistrar)
+                .withMailbox(config.getMailBoxName()), config.getActionRegistryName());
+        LOG.debug("Propagating action information with {}", actionRegistry);
+
+        final OpsListener opsListener = new OpsListener(rpcRegistry, actionRegistry);
+        LOG.debug("Registering local availability listener {}", opsListener);
+        listenerReg = rpcServices.registerRpcListener(opsListener);
+    }
+
+    @Override
+    public void postStop() throws Exception {
+        if (listenerReg != null) {
+            listenerReg.close();
+            listenerReg = null;
+        }
+
+        super.postStop();
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        unknownMessage(message);
+    }
+
+    @Override
+    public SupervisorStrategy supervisorStrategy() {
+        return new OneForOneStrategy(10, FiniteDuration.create(1, TimeUnit.MINUTES), t -> {
+            LOG.error("An exception happened actor will be resumed", t);
+            return SupervisorStrategy.resume();
+        });
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsRegistrar.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/OpsRegistrar.java
new file mode 100644 (file)
index 0000000..4d11a54
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Address;
+import akka.actor.Props;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Optional;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.Messages.UpdateRemoteActionEndpoints;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.RemoteActionEndpoint;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.yangtools.concepts.Registration;
+
+/**
+ * Actor handling registration of RPCs and Actions available on remote nodes with the local
+ * {@link DOMRpcProviderService} and {@link DOMActionProviderService}.
+ */
+final class OpsRegistrar extends AbstractUntypedActor {
+    private final Map<Address, Registration> rpcRegs = new HashMap<>();
+    private final Map<Address, Registration> actionRegs = new HashMap<>();
+    private final DOMRpcProviderService rpcProviderService;
+    private final RemoteOpsProviderConfig config;
+    private final DOMActionProviderService actionProviderService;
+
+    OpsRegistrar(final RemoteOpsProviderConfig config, final DOMRpcProviderService rpcProviderService,
+                 final DOMActionProviderService actionProviderService) {
+        this.config = requireNonNull(config);
+        this.rpcProviderService = requireNonNull(rpcProviderService);
+        this.actionProviderService = requireNonNull(actionProviderService);
+    }
+
+    public static Props props(final RemoteOpsProviderConfig config, final DOMRpcProviderService rpcProviderService,
+                              final DOMActionProviderService actionProviderService) {
+        return Props.create(OpsRegistrar.class, requireNonNull(config),
+            requireNonNull(rpcProviderService, "DOMRpcProviderService cannot be null"),
+            requireNonNull(actionProviderService, "DOMActionProviderService cannot be null"));
+    }
+
+    @Override
+    public void postStop() throws Exception {
+        rpcRegs.values().forEach(Registration::close);
+        rpcRegs.clear();
+        actionRegs.values().forEach(Registration::close);
+        actionRegs.clear();
+
+        super.postStop();
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        if (message instanceof UpdateRemoteEndpoints updateEndpoints) {
+            LOG.debug("Handling updateRemoteEndpoints message");
+            updateRemoteRpcEndpoints(updateEndpoints.getRpcEndpoints());
+        } else if (message instanceof UpdateRemoteActionEndpoints updateEndpoints) {
+            LOG.debug("Handling updateRemoteActionEndpoints message");
+            updateRemoteActionEndpoints(updateEndpoints.getActionEndpoints());
+        } else {
+            unknownMessage(message);
+        }
+    }
+
+    private void updateRemoteRpcEndpoints(final Map<Address, Optional<RemoteRpcEndpoint>> rpcEndpoints) {
+        /*
+         * Updating RPC providers is a two-step process. We first add the newly-discovered RPCs and then close
+         * the old registration. This minimizes churn observed by listeners, as they will not observe RPC
+         * unavailability which would occur if we were to do it the other way around.
+         *
+         * Note that when an RPC moves from one remote node to another, we also do not want to expose the gap,
+         * hence we register all new implementations before closing all registrations.
+         */
+        final var prevRegs = new ArrayList<Registration>(rpcEndpoints.size());
+
+        for (Entry<Address, Optional<RemoteRpcEndpoint>> e : rpcEndpoints.entrySet()) {
+            LOG.debug("Updating RPC registrations for {}", e.getKey());
+
+            final Registration prevReg;
+            final Optional<RemoteRpcEndpoint> maybeEndpoint = e.getValue();
+            if (maybeEndpoint.isPresent()) {
+                final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
+                final RemoteRpcImplementation impl = new RemoteRpcImplementation(endpoint.getRouter(), config);
+                prevReg = rpcRegs.put(e.getKey(), rpcProviderService.registerRpcImplementation(impl,
+                    endpoint.getRpcs()));
+            } else {
+                prevReg = rpcRegs.remove(e.getKey());
+            }
+
+            if (prevReg != null) {
+                prevRegs.add(prevReg);
+            }
+        }
+
+        prevRegs.forEach(Registration::close);
+    }
+
+    /**
+     * Updates the action endpoints, Adding new registrations first before removing previous registrations.
+     */
+    private void updateRemoteActionEndpoints(final Map<Address, Optional<RemoteActionEndpoint>> actionEndpoints) {
+        /*
+         * Updating Action providers is a two-step process. We first add the newly-discovered RPCs and then close
+         * the old registration. This minimizes churn observed by listeners, as they will not observe RPC
+         * unavailability which would occur if we were to do it the other way around.
+         *
+         * Note that when an Action moves from one remote node to another, we also do not want to expose the gap,
+         * hence we register all new implementations before closing all registrations.
+         */
+        final var prevRegs = new ArrayList<Registration>(actionEndpoints.size());
+
+        for (Entry<Address, Optional<RemoteActionEndpoint>> e : actionEndpoints.entrySet()) {
+            LOG.debug("Updating action registrations for {}", e.getKey());
+
+            final Registration prevReg;
+            final Optional<RemoteActionEndpoint> maybeEndpoint = e.getValue();
+            if (maybeEndpoint.isPresent()) {
+                final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
+                final RemoteActionImplementation impl = new RemoteActionImplementation(endpoint.getRouter(), config);
+                prevReg = actionRegs.put(e.getKey(), actionProviderService.registerActionImplementation(impl,
+                    endpoint.getActions()));
+            } else {
+                prevReg = actionRegs.remove(e.getKey());
+            }
+
+            if (prevReg != null) {
+                prevRegs.add(prevReg);
+            }
+        }
+
+        prevRegs.forEach(Registration::close);
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteActionImplementation.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteActionImplementation.java
new file mode 100644 (file)
index 0000000..2b78806
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.actor.ActorRef;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.remote.rpc.messages.ExecuteAction;
+import org.opendaylight.mdsal.dom.api.DOMActionImplementation;
+import org.opendaylight.mdsal.dom.api.DOMActionResult;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A {@link DOMActionImplementation} which routes invocation requests to a remote invoker actor.
+ */
+final class RemoteActionImplementation extends AbstractRemoteImplementation<ExecuteAction>
+        implements DOMActionImplementation {
+    private static final Logger LOG = LoggerFactory.getLogger(RemoteActionImplementation.class);
+
+    RemoteActionImplementation(final ActorRef remoteInvoker, final RemoteOpsProviderConfig config) {
+        super(remoteInvoker, config);
+    }
+
+    /**
+     * Routes action request to a remote invoker, which will execute the action and return with result.
+     */
+    @Override
+    public ListenableFuture<DOMActionResult> invokeAction(final Absolute type, final DOMDataTreeIdentifier path,
+                                                          final ContainerNode input) {
+        LOG.debug("invoking action {} with path {}", type, path);
+        return new RemoteDOMActionFuture(type, ask(ExecuteAction.from(type, path, input)));
+    }
+
+    @Override
+    public long invocationCost() {
+        return COST;
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionException.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionException.java
new file mode 100644 (file)
index 0000000..b866499
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import org.opendaylight.mdsal.dom.api.DOMActionException;
+
+public class RemoteDOMActionException extends DOMActionException {
+    private static final long serialVersionUID = 1L;
+
+    RemoteDOMActionException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionFuture.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteDOMActionFuture.java
new file mode 100644 (file)
index 0000000..9d9e29a
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.remote.rpc.messages.ActionResponse;
+import org.opendaylight.mdsal.dom.api.DOMActionException;
+import org.opendaylight.mdsal.dom.api.DOMActionResult;
+import org.opendaylight.mdsal.dom.spi.SimpleDOMActionResult;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+import scala.concurrent.Future;
+
+final class RemoteDOMActionFuture extends AbstractRemoteFuture<Absolute, DOMActionResult, DOMActionException> {
+    RemoteDOMActionFuture(final @NonNull Absolute type, final @NonNull Future<Object> requestFuture) {
+        super(type, requestFuture);
+    }
+
+    @Override
+    DOMActionResult processReply(final Object reply) {
+        if (reply instanceof ActionResponse actionReply) {
+            final ContainerNode output = actionReply.getOutput();
+            return output == null ? new SimpleDOMActionResult(actionReply.getErrors())
+                    : new SimpleDOMActionResult(output, actionReply.getErrors());
+        }
+
+        return null;
+    }
+
+    @Override
+    Class<DOMActionException> exceptionClass() {
+        return DOMActionException.class;
+    }
+
+    @Override
+    DOMActionException wrapCause(final Throwable cause) {
+        return new RemoteDOMActionException("Exception during invoking ACTION", cause);
+    }
+}
index d5e46a9a84f8b800af5c979f06def05ec08d3d34..e112d1f9818411067b072516a034b993f04bc237 100644 (file)
@@ -7,94 +7,31 @@
  */
 package org.opendaylight.controller.remote.rpc;
 
-import static java.util.Objects.requireNonNull;
-
-import akka.dispatch.OnComplete;
-import com.google.common.util.concurrent.AbstractFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
 import org.opendaylight.mdsal.dom.api.DOMRpcException;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.ExecutionContext;
 import scala.concurrent.Future;
 
-final class RemoteDOMRpcFuture extends AbstractFuture<DOMRpcResult> {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteDOMRpcFuture.class);
-
-    private final QName rpcName;
-
-    private RemoteDOMRpcFuture(final QName rpcName) {
-        this.rpcName = requireNonNull(rpcName, "rpcName");
-    }
-
-    public static RemoteDOMRpcFuture create(final QName rpcName) {
-        return new RemoteDOMRpcFuture(rpcName);
-    }
-
-    protected void failNow(final Throwable error) {
-        LOG.debug("Failing future {} for rpc {}", this, rpcName, error);
-        setException(error);
-    }
-
-    protected void completeWith(final Future<Object> future) {
-        future.onComplete(new FutureUpdater(), ExecutionContext.Implicits$.MODULE$.global());
+final class RemoteDOMRpcFuture extends AbstractRemoteFuture<QName, DOMRpcResult, DOMRpcException> {
+    RemoteDOMRpcFuture(final @NonNull QName type, final @NonNull Future<Object> requestFuture) {
+        super(type, requestFuture);
     }
 
     @Override
-    public DOMRpcResult get() throws InterruptedException, ExecutionException {
-        try {
-            return super.get();
-        } catch (ExecutionException e) {
-            throw mapException(e);
-        }
+    DOMRpcResult processReply(final Object reply) {
+        return reply instanceof RpcResponse response ? new DefaultDOMRpcResult(response.getOutput()) : null;
     }
 
     @Override
-    public DOMRpcResult get(final long timeout, final TimeUnit unit)
-            throws InterruptedException, ExecutionException, TimeoutException {
-        try {
-            return super.get(timeout, unit);
-        } catch (final ExecutionException e) {
-            throw mapException(e);
-        }
-    }
-
-    private static ExecutionException mapException(final ExecutionException ex) {
-        final Throwable cause = ex.getCause();
-        if (cause instanceof DOMRpcException) {
-            return ex;
-        }
-        return new ExecutionException(ex.getMessage(),
-                new RemoteDOMRpcException("Exception during invoking RPC", ex.getCause()));
+    Class<DOMRpcException> exceptionClass() {
+        return DOMRpcException.class;
     }
 
-    private final class FutureUpdater extends OnComplete<Object> {
-
-        @Override
-        public void onComplete(final Throwable error, final Object reply) {
-            if (error != null) {
-                RemoteDOMRpcFuture.this.failNow(error);
-            } else if (reply instanceof RpcResponse) {
-                final RpcResponse rpcReply = (RpcResponse) reply;
-                final NormalizedNode<?, ?> result = rpcReply.getResultNormalizedNode();
-
-                LOG.debug("Received response for rpc {}: result is {}", rpcName, result);
-
-                RemoteDOMRpcFuture.this.set(new DefaultDOMRpcResult(result));
-
-                LOG.debug("Future {} for rpc {} successfully completed", RemoteDOMRpcFuture.this, rpcName);
-            } else {
-                RemoteDOMRpcFuture.this.failNow(new IllegalStateException("Incorrect reply type " + reply
-                        + "from Akka"));
-            }
-        }
+    @Override
+    DOMRpcException wrapCause(final Throwable cause) {
+        return new RemoteDOMRpcException("Exception during invoking RPC", cause);
     }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProvider.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProvider.java
new file mode 100644 (file)
index 0000000..dabe5be
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This is the base class which initialize all the actors, listeners and
+ * default RPc implementation so remote invocation of rpcs.
+ */
+public class RemoteOpsProvider implements AutoCloseable {
+
+    private static final Logger LOG = LoggerFactory.getLogger(RemoteOpsProvider.class);
+
+    private final DOMRpcProviderService rpcProvisionRegistry;
+    private final RemoteOpsProviderConfig config;
+    private final ActorSystem actorSystem;
+    private final DOMRpcService rpcService;
+    private final DOMActionProviderService actionProvisionRegistry;
+    private final DOMActionService actionService;
+
+    private ActorRef opsManager;
+
+    public RemoteOpsProvider(final ActorSystem actorSystem, final DOMRpcProviderService rpcProvisionRegistry,
+                             final DOMRpcService rpcService, final RemoteOpsProviderConfig config,
+                             final DOMActionProviderService actionProviderService,
+                             final DOMActionService actionService) {
+        this.actorSystem = requireNonNull(actorSystem);
+        this.rpcProvisionRegistry = requireNonNull(rpcProvisionRegistry);
+        this.rpcService = requireNonNull(rpcService);
+        this.config = requireNonNull(config);
+        this.actionProvisionRegistry = requireNonNull(actionProviderService);
+        this.actionService = requireNonNull(actionService);
+    }
+
+    @Override
+    public void close() {
+        if (opsManager != null) {
+            LOG.info("Stopping Ops Manager at {}", opsManager);
+            opsManager.tell(PoisonPill.getInstance(), ActorRef.noSender());
+            opsManager = null;
+        }
+    }
+
+    public void start() {
+        LOG.info("Starting Remote Ops service...");
+        opsManager = actorSystem.actorOf(OpsManager.props(rpcProvisionRegistry, rpcService, config,
+                actionProvisionRegistry, actionService), config.getRpcManagerName());
+        LOG.debug("Ops Manager started at {}", opsManager);
+    }
+}
@@ -14,25 +14,28 @@ import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
 import scala.concurrent.duration.FiniteDuration;
 
-public class RemoteRpcProviderConfig extends CommonConfig {
+public class RemoteOpsProviderConfig extends CommonConfig {
 
     protected static final String TAG_RPC_BROKER_NAME = "rpc-broker-name";
     protected static final String TAG_RPC_REGISTRAR_NAME = "rpc-registrar-name";
     protected static final String TAG_RPC_REGISTRY_NAME = "registry-name";
+    protected static final String TAG_ACTION_REGISTRY_NAME = "action-registry-name";
     protected static final String TAG_RPC_MGR_NAME = "rpc-manager-name";
     protected static final String TAG_RPC_BROKER_PATH = "rpc-broker-path";
     protected static final String TAG_RPC_REGISTRY_PATH = "rpc-registry-path";
+    protected static final String TAG_ACTION_REGISTRY_PATH = "action-registry-path";
     protected static final String TAG_RPC_MGR_PATH = "rpc-manager-path";
     protected static final String TAG_ASK_DURATION = "ask-duration";
 
     private static final String TAG_GOSSIP_TICK_INTERVAL = "gossip-tick-interval";
     private static final String TAG_RPC_REGISTRY_PERSISTENCE_ID = "rpc-registry-persistence-id";
+    private static final String TAG_ACTION_REGISTRY_PERSISTENCE_ID = "action-registry-persistence-id";
 
     //locally cached values
     private Timeout cachedAskDuration;
     private FiniteDuration cachedGossipTickInterval;
 
-    public RemoteRpcProviderConfig(final Config config) {
+    public RemoteOpsProviderConfig(final Config config) {
         super(config);
     }
 
@@ -48,6 +51,10 @@ public class RemoteRpcProviderConfig extends CommonConfig {
         return get().getString(TAG_RPC_REGISTRY_NAME);
     }
 
+    public String getActionRegistryName() {
+        return get().getString(TAG_ACTION_REGISTRY_NAME);
+    }
+
     public String getRpcManagerName() {
         return get().getString(TAG_RPC_MGR_NAME);
     }
@@ -64,6 +71,14 @@ public class RemoteRpcProviderConfig extends CommonConfig {
         return get().getString(TAG_RPC_REGISTRY_PERSISTENCE_ID);
     }
 
+    public String getActionRegistryPath() {
+        return get().getString(TAG_ACTION_REGISTRY_PATH);
+    }
+
+    public String getActionRegistryPersistenceId() {
+        return get().getString(TAG_ACTION_REGISTRY_PERSISTENCE_ID);
+    }
+
     public String getRpcManagerPath() {
         return get().getString(TAG_RPC_MGR_PATH);
     }
@@ -95,10 +110,10 @@ public class RemoteRpcProviderConfig extends CommonConfig {
      */
     @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE",
             justification = "Findbugs flags this as an unconfirmed cast of return value but the build method clearly "
-                + "returns RemoteRpcProviderConfig. Perhaps it's confused b/c the build method is overloaded and "
+                + "returns RemoteOpsProviderConfig. Perhaps it's confused b/c the build method is overloaded and "
                 + "and differs in return type from the base class.")
-    public static RemoteRpcProviderConfig newInstance(final String actorSystemName, final boolean metricCaptureEnabled,
-            final int mailboxCapacity) {
+    public static RemoteOpsProviderConfig newInstance(final String actorSystemName, final boolean metricCaptureEnabled,
+                                                      final int mailboxCapacity) {
         return new Builder(actorSystemName).metricCaptureEnabled(metricCaptureEnabled)
                 .mailboxCapacity(mailboxCapacity).build();
     }
@@ -112,11 +127,13 @@ public class RemoteRpcProviderConfig extends CommonConfig {
             configHolder.put(TAG_RPC_BROKER_NAME, "broker");
             configHolder.put(TAG_RPC_REGISTRAR_NAME, "registrar");
             configHolder.put(TAG_RPC_REGISTRY_NAME, "registry");
+            configHolder.put(TAG_ACTION_REGISTRY_NAME, "action-registry");
             configHolder.put(TAG_RPC_MGR_NAME, "rpc");
 
             //Actor paths
             configHolder.put(TAG_RPC_BROKER_PATH, "/user/rpc/broker");
             configHolder.put(TAG_RPC_REGISTRY_PATH, "/user/rpc/registry");
+            configHolder.put(TAG_ACTION_REGISTRY_PATH, "/user/action/registry");
             configHolder.put(TAG_RPC_MGR_PATH, "/user/rpc");
 
             //durations
@@ -125,6 +142,7 @@ public class RemoteRpcProviderConfig extends CommonConfig {
 
             // persistence
             configHolder.put(TAG_RPC_REGISTRY_PERSISTENCE_ID, "remote-rpc-registry");
+            configHolder.put(TAG_ACTION_REGISTRY_PERSISTENCE_ID, "remote-action-registry");
         }
 
         public Builder gossipTickInterval(final String interval) {
@@ -133,8 +151,8 @@ public class RemoteRpcProviderConfig extends CommonConfig {
         }
 
         @Override
-        public RemoteRpcProviderConfig build() {
-            return new RemoteRpcProviderConfig(merge());
+        public RemoteOpsProviderConfig build() {
+            return new RemoteOpsProviderConfig(merge());
         }
     }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactory.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactory.java
new file mode 100644 (file)
index 0000000..35043c3
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.actor.ActorSystem;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+
+public final class RemoteOpsProviderFactory {
+    private RemoteOpsProviderFactory() {
+
+    }
+
+    public static RemoteOpsProvider createInstance(final DOMRpcProviderService rpcProviderService,
+                                                   final DOMRpcService rpcService, final ActorSystem actorSystem,
+                                                   final RemoteOpsProviderConfig config,
+                                                   final DOMActionProviderService actionProviderService,
+                                                   final DOMActionService actionService) {
+
+        return new RemoteOpsProvider(actorSystem, rpcProviderService, rpcService, config,
+                actionProviderService, actionService);
+    }
+}
index 45a847f975cfe0debcaa457a59ae862f86e18fd2..3046f7e6dde0284fefa3f6e5bbbc07d3e1a9a374 100644 (file)
@@ -5,43 +5,29 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import akka.actor.ActorRef;
-import akka.pattern.Patterns;
-import akka.util.Timeout;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
 /**
  * A {@link DOMRpcImplementation} which routes invocation requests to a remote invoker actor.
  *
  * @author Robert Varga
  */
-final class RemoteRpcImplementation implements DOMRpcImplementation {
-    // 0 for local, 1 for binding, 2 for remote
-    private static final long COST = 2;
-
-    private final ActorRef remoteInvoker;
-    private final Timeout askDuration;
-
-    RemoteRpcImplementation(final ActorRef remoteInvoker, final RemoteRpcProviderConfig config) {
-        this.remoteInvoker = Preconditions.checkNotNull(remoteInvoker);
-        this.askDuration = config.getAskDuration();
+final class RemoteRpcImplementation extends AbstractRemoteImplementation<ExecuteRpc> implements DOMRpcImplementation {
+    RemoteRpcImplementation(final ActorRef remoteInvoker, final RemoteOpsProviderConfig config) {
+        super(remoteInvoker, config);
     }
 
     @Override
-    public FluentFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc,
-            final NormalizedNode<?, ?> input) {
-        final RemoteDOMRpcFuture ret = RemoteDOMRpcFuture.create(rpc.getType().getLastComponent());
-        ret.completeWith(Patterns.ask(remoteInvoker, ExecuteRpc.from(rpc, input), askDuration));
-        return ret;
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
+        return new RemoteDOMRpcFuture(rpc.getType(), ask(ExecuteRpc.from(rpc, input)));
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProvider.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProvider.java
deleted file mode 100644 (file)
index 6bf84a9..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import com.google.common.base.Preconditions;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This is the base class which initialize all the actors, listeners and
- * default RPc implementation so remote invocation of rpcs.
- */
-public class RemoteRpcProvider implements AutoCloseable {
-
-    private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcProvider.class);
-
-    private final DOMRpcProviderService rpcProvisionRegistry;
-    private final RemoteRpcProviderConfig config;
-    private final ActorSystem actorSystem;
-    private final DOMRpcService rpcService;
-
-    private ActorRef rpcManager;
-
-    public RemoteRpcProvider(final ActorSystem actorSystem, final DOMRpcProviderService rpcProvisionRegistry,
-            final DOMRpcService rpcService, final RemoteRpcProviderConfig config) {
-        this.actorSystem = Preconditions.checkNotNull(actorSystem);
-        this.rpcProvisionRegistry = Preconditions.checkNotNull(rpcProvisionRegistry);
-        this.rpcService = Preconditions.checkNotNull(rpcService);
-        this.config = Preconditions.checkNotNull(config);
-    }
-
-    @Override
-    public void close() {
-        if (rpcManager != null) {
-            LOG.info("Stopping RPC Manager at {}", rpcManager);
-            rpcManager.tell(PoisonPill.getInstance(), ActorRef.noSender());
-            rpcManager = null;
-        }
-    }
-
-    public void start() {
-        LOG.info("Starting Remote RPC service...");
-        rpcManager = actorSystem.actorOf(RpcManager.props(rpcProvisionRegistry, rpcService, config),
-                config.getRpcManagerName());
-        LOG.debug("RPC Manager started at {}", rpcManager);
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java
deleted file mode 100644 (file)
index 58a7001..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorSystem;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-
-public final class RemoteRpcProviderFactory {
-    private RemoteRpcProviderFactory() {
-
-    }
-
-    public static RemoteRpcProvider createInstance(final DOMRpcProviderService rpcProviderService,
-            final DOMRpcService rpcService, final ActorSystem actorSystem, final RemoteRpcProviderConfig config) {
-
-        return new RemoteRpcProvider(actorSystem, rpcProviderService, rpcService, config);
-    }
-}
index 01e5da8f3a2582d41acdcc1f8404185ffffdcec3..d75805c3fe30cc8b1daf716b7ce9709b6c5437e0 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import java.io.Serializable;
@@ -13,9 +12,10 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import org.opendaylight.mdsal.dom.api.DOMRpcException;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
 /**
@@ -24,21 +24,22 @@ import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
  * @author Thomas Pantelis
  */
 public class RpcErrorsException extends DOMRpcException {
-
+    @java.io.Serial
     private static final long serialVersionUID = 1L;
 
     private static class RpcErrorData implements Serializable {
+        @java.io.Serial
         private static final long serialVersionUID = 1L;
 
         final ErrorSeverity severity;
         final ErrorType errorType;
-        final String tag;
+        final ErrorTag tag;
         final String applicationTag;
         final String message;
         final String info;
         final Throwable cause;
 
-        RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final String tag,
+        RpcErrorData(final ErrorSeverity severity, final ErrorType errorType, final ErrorTag tag,
                 final String applicationTag, final String message, final String info, final Throwable cause) {
             this.severity = severity;
             this.errorType = errorType;
@@ -55,7 +56,7 @@ public class RpcErrorsException extends DOMRpcException {
     public RpcErrorsException(final String message, final Iterable<? extends RpcError> rpcErrors) {
         super(message);
 
-        for (final RpcError rpcError: rpcErrors) {
+        for (var rpcError : rpcErrors) {
             rpcErrorDataList.add(new RpcErrorData(rpcError.getSeverity(), rpcError.getErrorType(),
                     rpcError.getTag(), rpcError.getApplicationTag(), rpcError.getMessage(),
                     rpcError.getInfo(), rpcError.getCause()));
@@ -63,8 +64,8 @@ public class RpcErrorsException extends DOMRpcException {
     }
 
     public Collection<RpcError> getRpcErrors() {
-        final Collection<RpcError> rpcErrors = new ArrayList<>();
-        for (final RpcErrorData ed: rpcErrorDataList) {
+        final var rpcErrors = new ArrayList<RpcError>();
+        for (var ed : rpcErrorDataList) {
             final RpcError rpcError = ed.severity == ErrorSeverity.ERROR
                     ? RpcResultBuilder.newError(ed.errorType, ed.tag, ed.message, ed.applicationTag,
                             ed.info, ed.cause) :
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcInvoker.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcInvoker.java
deleted file mode 100644 (file)
index 4d3a66c..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
-import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
-import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Actor receiving invocation requests from remote nodes, routing them to
- * {@link DOMRpcService#invokeRpc(SchemaPath, NormalizedNode)}.
- */
-final class RpcInvoker extends AbstractUntypedActor {
-    private final DOMRpcService rpcService;
-
-    private RpcInvoker(final DOMRpcService rpcService) {
-        this.rpcService = Preconditions.checkNotNull(rpcService);
-    }
-
-    public static Props props(final DOMRpcService rpcService) {
-        Preconditions.checkNotNull(rpcService, "DOMRpcService can not be null");
-        return Props.create(RpcInvoker.class, rpcService);
-    }
-
-    @Override
-    protected void handleReceive(final Object message) {
-        if (message instanceof ExecuteRpc) {
-            executeRpc((ExecuteRpc) message);
-        } else {
-            unknownMessage(message);
-        }
-    }
-
-    @SuppressWarnings("checkstyle:IllegalCatch")
-    private void executeRpc(final ExecuteRpc msg) {
-        LOG.debug("Executing rpc {}", msg.getRpc());
-        final SchemaPath schemaPath = SchemaPath.create(true, msg.getRpc());
-        final ActorRef sender = getSender();
-        final ActorRef self = self();
-
-        final ListenableFuture<DOMRpcResult> future;
-        try {
-            future = rpcService.invokeRpc(schemaPath, msg.getInputNormalizedNode());
-        } catch (final RuntimeException e) {
-            LOG.debug("Failed to invoke RPC {}", msg.getRpc(), e);
-            sender.tell(new akka.actor.Status.Failure(e), sender);
-            return;
-        }
-
-        Futures.addCallback(future, new FutureCallback<DOMRpcResult>() {
-            @Override
-            public void onSuccess(final DOMRpcResult result) {
-                if (result == null) {
-                    // This shouldn't happen but the FutureCallback annotates the result param with Nullable so
-                    // handle null here to avoid FindBugs warning.
-                    LOG.debug("Got null DOMRpcResult - sending null response for execute rpc : {}", msg.getRpc());
-                    sender.tell(new RpcResponse(null), self);
-                    return;
-                }
-
-                if (!result.getErrors().isEmpty()) {
-                    final String message = String.format("Execution of RPC %s failed", msg.getRpc());
-                    sender.tell(new akka.actor.Status.Failure(new RpcErrorsException(message, result.getErrors())),
-                        self);
-                } else {
-                    LOG.debug("Sending response for execute rpc : {}", msg.getRpc());
-                    sender.tell(new RpcResponse(result.getResult()), self);
-                }
-            }
-
-            @Override
-            public void onFailure(final Throwable failure) {
-                LOG.debug("Failed to execute RPC {}", msg.getRpc(), failure);
-                LOG.error("Failed to execute RPC {} due to {}. More details are available on DEBUG level.",
-                    msg.getRpc(), Throwables.getRootCause(failure).getMessage());
-                sender.tell(new akka.actor.Status.Failure(failure), self);
-            }
-        }, MoreExecutors.directExecutor());
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcManager.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcManager.java
deleted file mode 100644 (file)
index 5dbc1cd..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorRef;
-import akka.actor.OneForOneStrategy;
-import akka.actor.Props;
-import akka.actor.SupervisorStrategy;
-import com.google.common.base.Preconditions;
-import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
-import scala.concurrent.duration.FiniteDuration;
-
-/**
- * This class acts as a supervisor, creates all the actors, resumes them, if an exception is thrown. It also registers
- * {@link RpcListener} with the local {@link DOMRpcService}.
- */
-public class RpcManager extends AbstractUntypedActor {
-    private final DOMRpcProviderService rpcProvisionRegistry;
-    private final RemoteRpcProviderConfig config;
-    private final DOMRpcService rpcServices;
-
-    private ListenerRegistration<RpcListener> listenerReg;
-    private ActorRef rpcInvoker;
-    private ActorRef rpcRegistry;
-    private ActorRef rpcRegistrar;
-
-    RpcManager(final DOMRpcProviderService rpcProvisionRegistry, final DOMRpcService rpcServices,
-            final RemoteRpcProviderConfig config) {
-        this.rpcProvisionRegistry = Preconditions.checkNotNull(rpcProvisionRegistry);
-        this.rpcServices = Preconditions.checkNotNull(rpcServices);
-        this.config = Preconditions.checkNotNull(config);
-    }
-
-    public static Props props(final DOMRpcProviderService rpcProvisionRegistry, final DOMRpcService rpcServices,
-            final RemoteRpcProviderConfig config) {
-        Preconditions.checkNotNull(rpcProvisionRegistry, "RpcProviderService can not be null!");
-        Preconditions.checkNotNull(rpcServices, "RpcService can not be null!");
-        Preconditions.checkNotNull(config, "RemoteRpcProviderConfig can not be null!");
-        return Props.create(RpcManager.class, rpcProvisionRegistry, rpcServices, config);
-    }
-
-    @Override
-    public void preStart() throws Exception {
-        super.preStart();
-
-        rpcInvoker = getContext().actorOf(RpcInvoker.props(rpcServices)
-            .withMailbox(config.getMailBoxName()), config.getRpcBrokerName());
-        LOG.debug("Listening for RPC invocation requests with {}", rpcInvoker);
-
-        rpcRegistrar = getContext().actorOf(RpcRegistrar.props(config, rpcProvisionRegistry)
-            .withMailbox(config.getMailBoxName()), config.getRpcRegistrarName());
-        LOG.debug("Registering remote RPCs with {}", rpcRegistrar);
-
-        rpcRegistry = getContext().actorOf(RpcRegistry.props(config, rpcInvoker, rpcRegistrar)
-                .withMailbox(config.getMailBoxName()), config.getRpcRegistryName());
-        LOG.debug("Propagating RPC information with {}", rpcRegistry);
-
-        final RpcListener rpcListener = new RpcListener(rpcRegistry);
-        LOG.debug("Registering local availabitility listener {}", rpcListener);
-        listenerReg = rpcServices.registerRpcListener(rpcListener);
-    }
-
-    @Override
-    public void postStop() throws Exception {
-        if (listenerReg != null) {
-            listenerReg.close();
-            listenerReg = null;
-        }
-
-        super.postStop();
-    }
-
-    @Override
-    protected void handleReceive(final Object message) {
-        unknownMessage(message);
-    }
-
-    @Override
-    public SupervisorStrategy supervisorStrategy() {
-        return new OneForOneStrategy(10, FiniteDuration.create(1, TimeUnit.MINUTES), t -> {
-            LOG.error("An exception happened actor will be resumed", t);
-            return SupervisorStrategy.resume();
-        });
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcRegistrar.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcRegistrar.java
deleted file mode 100644 (file)
index 5fc088d..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.Address;
-import akka.actor.Props;
-import com.google.common.base.Preconditions;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Optional;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-
-/**
- * Actor handling registration of RPCs available on remote nodes with the local {@link DOMRpcProviderService}.
- *
- * @author Robert Varga
- */
-final class RpcRegistrar extends AbstractUntypedActor {
-    private final Map<Address, DOMRpcImplementationRegistration<?>> regs = new HashMap<>();
-    private final DOMRpcProviderService rpcProviderService;
-    private final RemoteRpcProviderConfig config;
-
-    RpcRegistrar(final RemoteRpcProviderConfig config, final DOMRpcProviderService rpcProviderService) {
-        this.config = Preconditions.checkNotNull(config);
-        this.rpcProviderService = Preconditions.checkNotNull(rpcProviderService);
-    }
-
-    public static Props props(final RemoteRpcProviderConfig config, final DOMRpcProviderService rpcProviderService) {
-        Preconditions.checkNotNull(rpcProviderService, "DOMRpcProviderService cannot be null");
-        return Props.create(RpcRegistrar.class, config, rpcProviderService);
-    }
-
-    @Override
-    public void postStop() throws Exception {
-        regs.values().forEach(DOMRpcImplementationRegistration::close);
-        regs.clear();
-
-        super.postStop();
-    }
-
-    @Override
-    protected void handleReceive(final Object message) {
-        if (message instanceof UpdateRemoteEndpoints) {
-            updateRemoteEndpoints(((UpdateRemoteEndpoints) message).getEndpoints());
-        } else {
-            unknownMessage(message);
-        }
-    }
-
-    private void updateRemoteEndpoints(final Map<Address, Optional<RemoteRpcEndpoint>> endpoints) {
-        /*
-         * Updating RPC providers is a two-step process. We first add the newly-discovered RPCs and then close
-         * the old registration. This minimizes churn observed by listeners, as they will not observe RPC
-         * unavailability which would occur if we were to do it the other way around.
-         *
-         * Note that when an RPC moves from one remote node to another, we also do not want to expose the gap,
-         * hence we register all new implementations before closing all registrations.
-         */
-        final Collection<DOMRpcImplementationRegistration<?>> prevRegs = new ArrayList<>(endpoints.size());
-
-        for (Entry<Address, Optional<RemoteRpcEndpoint>> e : endpoints.entrySet()) {
-            LOG.debug("Updating RPC registrations for {}", e.getKey());
-
-            final DOMRpcImplementationRegistration<?> prevReg;
-            final Optional<RemoteRpcEndpoint> maybeEndpoint = e.getValue();
-            if (maybeEndpoint.isPresent()) {
-                final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
-                final RemoteRpcImplementation impl = new RemoteRpcImplementation(endpoint.getRouter(), config);
-                prevReg = regs.put(e.getKey(), rpcProviderService.registerRpcImplementation(impl,
-                    endpoint.getRpcs()));
-            } else {
-                prevReg = regs.remove(e.getKey());
-            }
-
-            if (prevReg != null) {
-                prevRegs.add(prevReg);
-            }
-        }
-
-        for (DOMRpcImplementationRegistration<?> r : prevRegs) {
-            r.close();
-        }
-    }
-}
index 6590941e39e85d638550e6f391dc11081fae52ea..a79a4e45d421f2aad2f008e6a41d92ff115c1592 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import akka.actor.Terminated;
@@ -21,12 +20,11 @@ public class TerminationMonitor extends UntypedAbstractActor {
         LOG.debug("Created TerminationMonitor");
     }
 
-    @Override public void onReceive(Object message) {
-        if (message instanceof Terminated) {
-            Terminated terminated = (Terminated) message;
+    @Override
+    public void onReceive(final Object message) {
+        if (message instanceof Terminated terminated) {
             LOG.debug("Actor terminated : {}", terminated.actor());
-        } else if (message instanceof Monitor) {
-            Monitor monitor = (Monitor) message;
+        } else if (message instanceof Monitor monitor) {
             getContext().watch(monitor.getActorRef());
         }
     }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractExecute.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractExecute.java
new file mode 100644 (file)
index 0000000..613d986
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects;
+import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * An abstract base class for invocation requests. Specialized via {@link ExecuteAction} and {@link ExecuteRpc}.
+ */
+public abstract class AbstractExecute<T, I extends NormalizedNode> implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final transient @NonNull T type;
+    private final transient I input;
+
+    AbstractExecute(final @NonNull T type, final I input) {
+        this.type = requireNonNull(type);
+        this.input = input;
+    }
+
+    public final @NonNull T getType() {
+        return type;
+    }
+
+    public final I getInput() {
+        return input;
+    }
+
+    @Override
+    public final String toString() {
+        // We want 'type' to be always first
+        return addToStringAttributes(MoreObjects.toStringHelper(this).omitNullValues().add("type", type)).toString();
+    }
+
+    ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+        return helper.add("input", input);
+    }
+
+    abstract Object writeReplace();
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractResponse.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/AbstractResponse.java
new file mode 100644 (file)
index 0000000..b184493
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import java.io.Serializable;
+import org.eclipse.jdt.annotation.Nullable;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+/**
+ * An abstract base class for invocation responses. Specialized via {@link ActionResponse} and {@link RpcResponse}.
+ */
+public abstract class AbstractResponse<T extends NormalizedNode> implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final transient @Nullable T output;
+
+    public AbstractResponse(final @Nullable T output) {
+        this.output = output;
+    }
+
+    public final @Nullable T getOutput() {
+        return output;
+    }
+
+    abstract Object writeReplace();
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ActionResponse.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ActionResponse.java
new file mode 100644 (file)
index 0000000..145f86f
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Collection;
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+@SuppressFBWarnings({"SE_TRANSIENT_FIELD_NOT_RESTORED", "DMI_NONSERIALIZABLE_OBJECT_WRITTEN"})
+public class ActionResponse extends AbstractResponse<ContainerNode> {
+    private static final long serialVersionUID = 1L;
+
+    private final transient @NonNull ImmutableList<@NonNull RpcError> errors;
+
+    public ActionResponse(final @NonNull Optional<ContainerNode> output, @NonNull final Collection<RpcError> errors) {
+        super(output.orElse(null));
+        this.errors = ImmutableList.copyOf(errors);
+    }
+
+    public @NonNull ImmutableList<@NonNull RpcError> getErrors() {
+        return errors;
+    }
+
+    @Override
+    Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private ActionResponse actionResponse;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+        }
+
+        Proxy(final ActionResponse actionResponse) {
+            this.actionResponse = requireNonNull(actionResponse);
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            out.writeObject(actionResponse.getErrors());
+            SerializationUtils.writeNormalizedNode(out, actionResponse.getOutput());
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            @SuppressWarnings("unchecked")
+            final ImmutableList<RpcError> errors = (ImmutableList<RpcError>) in.readObject();
+            final Optional<NormalizedNode> output = SerializationUtils.readNormalizedNode(in);
+            actionResponse = new ActionResponse(output.map(ContainerNode.class::cast), errors);
+        }
+
+        private Object readResolve() {
+            return actionResponse;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteAction.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/ExecuteAction.java
new file mode 100644 (file)
index 0000000..c462f7b
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.base.MoreObjects.ToStringHelper;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+
+public final class ExecuteAction extends AbstractExecute<Absolute, @NonNull ContainerNode> {
+    private static final long serialVersionUID = 1128904894827335676L;
+
+    private final @NonNull DOMDataTreeIdentifier path;
+
+    private ExecuteAction(final @NonNull Absolute type, final @NonNull DOMDataTreeIdentifier path,
+            final @NonNull ContainerNode input) {
+        super(type, requireNonNull(input));
+        this.path = requireNonNull(path);
+    }
+
+    public static @NonNull ExecuteAction from(final @NonNull Absolute type, @NonNull final DOMDataTreeIdentifier path,
+            final @NonNull ContainerNode input) {
+        return new ExecuteAction(type, path, input);
+    }
+
+    public @NonNull DOMDataTreeIdentifier getPath() {
+        return path;
+    }
+
+    @Override
+    ToStringHelper addToStringAttributes(final ToStringHelper helper) {
+        return super.addToStringAttributes(helper.add("path", path));
+    }
+
+    @Override
+    Object writeReplace() {
+        return new Proxy(this);
+    }
+
+    private static final class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+
+        private ExecuteAction executeAction;
+
+        // checkstyle flags the public modifier as redundant which really doesn't make sense since it clearly isn't
+        // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+
+        }
+
+        Proxy(final ExecuteAction executeAction) {
+            this.executeAction = requireNonNull(executeAction);
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            try (var stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
+                stream.writeSchemaNodeIdentifier(executeAction.getType());
+                executeAction.getPath().datastore().writeTo(out);
+                stream.writeYangInstanceIdentifier(executeAction.getPath().path());
+                stream.writeOptionalNormalizedNode(executeAction.getInput());
+            }
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException {
+            final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
+            final SchemaNodeIdentifier sni = stream.readSchemaNodeIdentifier();
+            if (!(sni instanceof Absolute absolute)) {
+                throw new InvalidObjectException("Non-absolute type " + sni);
+            }
+
+            final LogicalDatastoreType type = LogicalDatastoreType.readFrom(in);
+            final YangInstanceIdentifier path = stream.readYangInstanceIdentifier();
+            final ContainerNode input = (ContainerNode) stream.readOptionalNormalizedNode().orElse(null);
+
+            executeAction = new ExecuteAction(absolute, DOMDataTreeIdentifier.of(type, path), input);
+        }
+
+        private Object readResolve() {
+            return verifyNotNull(executeAction);
+        }
+    }
+}
index dca81dc0b7bb29a069d514dca28f0650923011fb..d9f6a67b5f2ac7efcd53fab50d258ff8484b96ef 100644 (file)
@@ -9,59 +9,37 @@ package org.opendaylight.controller.remote.rpc.messages;
 
 import static java.util.Objects.requireNonNull;
 
-import com.google.common.base.MoreObjects;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.io.Serializable;
 import org.eclipse.jdt.annotation.NonNull;
 import org.eclipse.jdt.annotation.Nullable;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
 
-public final class ExecuteRpc implements Serializable {
+public final class ExecuteRpc extends AbstractExecute<QName, @Nullable ContainerNode> {
     private static final long serialVersionUID = 1128904894827335676L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
-    private final NormalizedNode<?, ?> inputNormalizedNode;
-    private final QName rpc;
-
-    private ExecuteRpc(final @Nullable NormalizedNode<?, ?> inputNormalizedNode, final @NonNull QName rpc) {
-        this.rpc = requireNonNull(rpc, "rpc Qname should not be null");
-        this.inputNormalizedNode = inputNormalizedNode;
-    }
-
-    public static ExecuteRpc from(final @NonNull DOMRpcIdentifier rpc, final @Nullable NormalizedNode<?, ?> input) {
-        return new ExecuteRpc(input, rpc.getType().getLastComponent());
-    }
-
-    public @Nullable NormalizedNode<?, ?> getInputNormalizedNode() {
-        return inputNormalizedNode;
+    private ExecuteRpc(final @NonNull QName type, final @Nullable ContainerNode input) {
+        super(type, input);
     }
 
-    public @NonNull QName getRpc() {
-        return rpc;
-    }
-
-    private Object writeReplace() {
-        return new Proxy(this);
+    public static @NonNull ExecuteRpc from(final @NonNull DOMRpcIdentifier rpc,
+            final @Nullable ContainerNode input) {
+        return new ExecuteRpc(rpc.getType(), input);
     }
 
     @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this)
-                .add("rpc", rpc)
-                .add("normalizedNode", inputNormalizedNode)
-                .toString();
+    Object writeReplace() {
+        return new Proxy(this);
     }
 
-    private static class Proxy implements Externalizable {
+    private static final class Proxy implements Externalizable {
         private static final long serialVersionUID = 1L;
 
         private ExecuteRpc executeRpc;
@@ -70,22 +48,27 @@ public final class ExecuteRpc implements Serializable {
         // redundant. It is explicitly needed for Java serialization to be able to create instances via reflection.
         @SuppressWarnings("checkstyle:RedundantModifier")
         public Proxy() {
+
         }
 
-        Proxy(ExecuteRpc executeRpc) {
-            this.executeRpc = executeRpc;
+        Proxy(final ExecuteRpc executeRpc) {
+            this.executeRpc = requireNonNull(executeRpc);
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) throws IOException {
-            out.writeObject(executeRpc.getRpc());
-            SerializationUtils.serializeNormalizedNode(executeRpc.getInputNormalizedNode(), out);
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            try (NormalizedNodeDataOutput stream = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
+                stream.writeQName(executeRpc.getType());
+                stream.writeOptionalNormalizedNode(executeRpc.getInput());
+            }
         }
 
         @Override
-        public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
-            QName qname = (QName) in.readObject();
-            executeRpc = new ExecuteRpc(SerializationUtils.deserializeNormalizedNode(in), qname);
+        public void readExternal(final ObjectInput in) throws IOException {
+            final NormalizedNodeDataInput stream = NormalizedNodeDataInput.newDataInput(in);
+            final QName type = stream.readQName();
+            final ContainerNode input = RpcResponse.unmaskContainer(stream.readOptionalNormalizedNode());
+            executeRpc = new ExecuteRpc(type, input);
         }
 
         private Object readResolve() {
index 02d0f1f185116ce30ce0d46754ff26aefe4dd86f..bb308203ddd789fcf783c7ab6e8e0abadd2ca63a 100644 (file)
@@ -7,33 +7,39 @@
  */
 package org.opendaylight.controller.remote.rpc.messages;
 
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
+import java.io.IOException;
+import java.io.InvalidObjectException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.io.Serializable;
+import java.util.Optional;
 import org.eclipse.jdt.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.node.utils.stream.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-public class RpcResponse implements Serializable {
+public class RpcResponse extends AbstractResponse<ContainerNode> {
     private static final long serialVersionUID = -4211279498688989245L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
-            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
-            + "aren't serialized. FindBugs does not recognize this.")
-    private final NormalizedNode<?, ?> resultNormalizedNode;
-
-    public RpcResponse(final @Nullable NormalizedNode<?, ?> inputNormalizedNode) {
-        resultNormalizedNode = inputNormalizedNode;
+    public RpcResponse(final @Nullable ContainerNode output) {
+        super(output);
     }
 
-    public @Nullable NormalizedNode<?, ?> getResultNormalizedNode() {
-        return resultNormalizedNode;
+    @Override
+    Object writeReplace() {
+        return new Proxy(this);
     }
 
-    private Object writeReplace() {
-        return new Proxy(this);
+    static @Nullable ContainerNode unmaskContainer(final Optional<NormalizedNode> optNode)
+            throws InvalidObjectException {
+        if (optNode.isEmpty()) {
+            return null;
+        }
+        final var node = optNode.orElseThrow();
+        if (node instanceof ContainerNode container) {
+            return container;
+        }
+        throw new InvalidObjectException("Unexpected data " + node.contract().getSimpleName());
     }
 
     private static class Proxy implements Externalizable {
@@ -47,18 +53,18 @@ public class RpcResponse implements Serializable {
         public Proxy() {
         }
 
-        Proxy(RpcResponse rpcResponse) {
+        Proxy(final RpcResponse rpcResponse) {
             this.rpcResponse = rpcResponse;
         }
 
         @Override
-        public void writeExternal(ObjectOutput out) {
-            SerializationUtils.serializeNormalizedNode(rpcResponse.getResultNormalizedNode(), out);
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            SerializationUtils.writeNormalizedNode(out, rpcResponse.getOutput());
         }
 
         @Override
-        public void readExternal(ObjectInput in) {
-            rpcResponse = new RpcResponse(SerializationUtils.deserializeNormalizedNode(in));
+        public void readExternal(final ObjectInput in) throws IOException {
+            rpcResponse = new RpcResponse(unmaskContainer(SerializationUtils.readNormalizedNode(in)));
         }
 
         private Object readResolve() {
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/AbstractRoutingTable.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/AbstractRoutingTable.java
new file mode 100644 (file)
index 0000000..c25ee44
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableSet;
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Optional;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketData;
+
+/**
+ * Common class for routing tables.
+ *
+ * @param <T> Table type
+ * @param <I> Item type
+ */
+public abstract class AbstractRoutingTable<T extends AbstractRoutingTable<T, I>, I> implements BucketData<T>,
+        Serializable {
+    private static final long serialVersionUID = 1L;
+
+    private final @NonNull ActorRef invoker;
+    private final @NonNull ImmutableSet<I> items;
+
+    AbstractRoutingTable(final ActorRef invoker, final Collection<I> items) {
+        this.invoker = requireNonNull(invoker);
+        this.items = ImmutableSet.copyOf(items);
+    }
+
+    @Override
+    public final Optional<ActorRef> getWatchActor() {
+        return Optional.of(invoker);
+    }
+
+    public final @NonNull ImmutableSet<I> getItems() {
+        return items;
+    }
+
+    final @NonNull ActorRef getInvoker() {
+        return invoker;
+    }
+
+    @VisibleForTesting
+    public final boolean contains(final I routeId) {
+        return items.contains(routeId);
+    }
+
+    @VisibleForTesting
+    public final int size() {
+        return items.size();
+    }
+
+    abstract Object writeReplace();
+
+    @Override
+    public final String toString() {
+        return MoreObjects.toStringHelper(this).add("invoker", invoker).add("items", items).toString();
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistry.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistry.java
new file mode 100644 (file)
index 0000000..2a91b5a
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.ActorRef;
+import akka.actor.Address;
+import akka.actor.Props;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor;
+import org.opendaylight.controller.remote.rpc.registry.mbeans.RemoteActionRegistryMXBeanImpl;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+
+/**
+ * Registry to look up cluster nodes that have registered for a given Action.
+ *
+ * <p>
+ * It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor} to maintain this
+ * cluster wide information.
+ */
+public class ActionRegistry extends BucketStoreActor<ActionRoutingTable> {
+    private final ActorRef rpcRegistrar;
+
+    private RemoteActionRegistryMXBeanImpl mxBean;
+
+    public ActionRegistry(final RemoteOpsProviderConfig config, final ActorRef rpcInvoker,
+                          final ActorRef rpcRegistrar) {
+        super(config, config.getRpcRegistryPersistenceId(), new ActionRoutingTable(rpcInvoker, ImmutableSet.of()));
+        this.rpcRegistrar = requireNonNull(rpcRegistrar);
+    }
+
+    /**
+     * Create a new props instance for instantiating an ActionRegistry actor.
+     *
+     * @param config Provider configuration
+     * @param opsRegistrar Local RPC provider interface, used to register routers to remote nodes
+     * @param opsInvoker Actor handling RPC invocation requests from remote nodes
+     * @return A new {@link Props} instance
+     */
+    public static Props props(final RemoteOpsProviderConfig config, final ActorRef opsInvoker,
+                              final ActorRef opsRegistrar) {
+        return Props.create(ActionRegistry.class, config, opsInvoker, opsRegistrar);
+    }
+
+    @Override
+    public void preStart() {
+        super.preStart();
+        mxBean = new RemoteActionRegistryMXBeanImpl(new BucketStoreAccess(self(), getContext().dispatcher(),
+            getConfig().getAskDuration()), getConfig().getAskDuration());
+    }
+
+    @Override
+    public void postStop() throws Exception {
+        if (mxBean != null) {
+            mxBean.unregister();
+            mxBean = null;
+        }
+        super.postStop();
+    }
+
+    @Override
+    protected void handleCommand(final Object message) throws Exception {
+        if (message instanceof ActionRegistry.Messages.UpdateActions updateActions) {
+            LOG.debug("handling updatesActionRoutes message");
+            updatesActionRoutes(updateActions);
+        } else {
+            super.handleCommand(message);
+        }
+    }
+
+    private void updatesActionRoutes(final Messages.UpdateActions msg) {
+        LOG.debug("addedActions: {}", msg.getAddedActions());
+        LOG.debug("removedActions: {}", msg.getRemovedActions());
+        updateLocalBucket(getLocalData().updateActions(msg.getAddedActions(), msg.getRemovedActions()));
+    }
+
+    @Override
+    protected void onBucketRemoved(final Address address, final Bucket<ActionRoutingTable> bucket) {
+        rpcRegistrar.tell(new Messages.UpdateRemoteActionEndpoints(ImmutableMap.of(address, Optional.empty())),
+            ActorRef.noSender());
+    }
+
+    @Override
+    protected void onBucketsUpdated(final Map<Address, Bucket<ActionRoutingTable>> buckets) {
+        LOG.debug("Updating buckets for action registry");
+        final Map<Address, Optional<RemoteActionEndpoint>> endpoints = new HashMap<>(buckets.size());
+
+        for (Map.Entry<Address, Bucket<ActionRoutingTable>> e : buckets.entrySet()) {
+            final ActionRoutingTable table = e.getValue().getData();
+
+            final Collection<DOMActionInstance> actions = table.getItems();
+            endpoints.put(e.getKey(), actions.isEmpty() ? Optional.empty()
+                : Optional.of(new RemoteActionEndpoint(table.getInvoker(), actions)));
+        }
+
+        if (!endpoints.isEmpty()) {
+            rpcRegistrar.tell(new Messages.UpdateRemoteActionEndpoints(endpoints), ActorRef.noSender());
+        }
+    }
+
+    public static final class RemoteActionEndpoint {
+        private final Set<DOMActionInstance> actions;
+        private final ActorRef router;
+
+        @VisibleForTesting
+        public RemoteActionEndpoint(final ActorRef router, final Collection<DOMActionInstance> actions) {
+            this.router = requireNonNull(router);
+            this.actions = ImmutableSet.copyOf(actions);
+        }
+
+        public ActorRef getRouter() {
+            return router;
+        }
+
+        public Set<DOMActionInstance> getActions() {
+            return actions;
+        }
+    }
+
+        /**
+         * All messages used by the ActionRegistry.
+         */
+    public static class Messages {
+        abstract static class AbstractActionRouteMessage {
+            final Collection<DOMActionInstance> addedActions;
+            final Collection<DOMActionInstance> removedActions;
+
+            AbstractActionRouteMessage(final Collection<DOMActionInstance> addedActions,
+                                       final Collection<DOMActionInstance> removedActions) {
+                this.addedActions = ImmutableList.copyOf(addedActions);
+                this.removedActions = ImmutableList.copyOf(removedActions);
+            }
+
+            Collection<DOMActionInstance> getAddedActions() {
+                return addedActions;
+            }
+
+            Collection<DOMActionInstance> getRemovedActions() {
+                return removedActions;
+            }
+
+
+            @Override
+            public String toString() {
+                return "ContainsRoute{" + "addedActions=" + addedActions + " removedActions=" + removedActions + '}';
+            }
+        }
+
+
+        public static final class UpdateActions extends AbstractActionRouteMessage {
+            public UpdateActions(final Collection<DOMActionInstance> addedActions,
+                                 final Collection<DOMActionInstance> removedActions) {
+                super(addedActions, removedActions);
+            }
+
+        }
+
+        public static final class UpdateRemoteActionEndpoints {
+            private final Map<Address, Optional<RemoteActionEndpoint>> actionEndpoints;
+
+            @VisibleForTesting
+            public UpdateRemoteActionEndpoints(final Map<Address, Optional<RemoteActionEndpoint>>
+                                                                   actionEndpoints) {
+                this.actionEndpoints = ImmutableMap.copyOf(actionEndpoints);
+            }
+
+            public Map<Address, Optional<RemoteActionEndpoint>> getActionEndpoints() {
+                return actionEndpoints;
+            }
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRoutingTable.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/ActionRoutingTable.java
new file mode 100644 (file)
index 0000000..5ca5a71
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import akka.actor.ActorRef;
+import akka.serialization.JavaSerializer;
+import akka.serialization.Serialization;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class ActionRoutingTable extends AbstractRoutingTable<ActionRoutingTable, DOMActionInstance> {
+    private static final class Proxy implements Externalizable {
+        private static final long serialVersionUID = 1L;
+        private static final Logger LOG = LoggerFactory.getLogger(ActionRoutingTable.class);
+
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "We deal with the field in serialization methods.")
+        private Collection<DOMActionInstance> actions;
+        private ActorRef opsInvoker;
+
+        // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
+        // be able to create instances via reflection.
+        @SuppressWarnings("checkstyle:RedundantModifier")
+        public Proxy() {
+            // For Externalizable
+        }
+
+        Proxy(final ActionRoutingTable table) {
+            actions = table.getItems();
+            opsInvoker = table.getInvoker();
+        }
+
+        @Override
+        public void writeExternal(final ObjectOutput out) throws IOException {
+            LOG.debug("serializing ActionRoutingTable.");
+            out.writeObject(Serialization.serializedActorPath(opsInvoker));
+
+            final NormalizedNodeDataOutput nnout = NormalizedNodeStreamVersion.current().newDataOutput(out);
+            nnout.writeInt(actions.size());
+            for (DOMActionInstance id : actions) {
+                final Absolute type = id.getType();
+                nnout.writeSchemaNodeIdentifier(type);
+                nnout.writeYangInstanceIdentifier(YangInstanceIdentifier.of(type.lastNodeIdentifier()));
+            }
+        }
+
+        @Override
+        public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+            LOG.debug("deserializing ActionRoutingTable");
+            opsInvoker = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
+
+            final NormalizedNodeDataInput nnin = NormalizedNodeDataInput.newDataInput(in);
+            final int size = nnin.readInt();
+            actions = new ArrayList<>(size);
+            for (int i = 0; i < size; ++i) {
+                final SchemaNodeIdentifier sni = nnin.readSchemaNodeIdentifier();
+                if (!(sni instanceof Absolute absolute)) {
+                    throw new InvalidObjectException("Non-absolute type " + sni);
+                }
+
+                actions.add(DOMActionInstance.of(absolute, LogicalDatastoreType.OPERATIONAL,
+                        nnin.readYangInstanceIdentifier()));
+            }
+        }
+
+        private Object readResolve() {
+            return new ActionRoutingTable(opsInvoker, actions);
+        }
+    }
+
+    private static final long serialVersionUID = 1L;
+    private static final Logger LOG = LoggerFactory.getLogger(ActionRoutingTable.class);
+
+    ActionRoutingTable(final ActorRef invoker, final Collection<DOMActionInstance> actions) {
+        super(invoker, actions);
+    }
+
+    ActionRoutingTable updateActions(final Collection<DOMActionInstance> toAdd,
+                                     final Collection<DOMActionInstance> toRemove) {
+        LOG.debug("Updating actions in ActionRoutingTable");
+        final Set<DOMActionInstance> newActions = new HashSet<>(getItems());
+        newActions.addAll(toAdd);
+        newActions.removeAll(toRemove);
+        return new ActionRoutingTable(getInvoker(), newActions);
+    }
+
+    @Override
+    Object writeReplace() {
+        return new Proxy(this);
+    }
+}
index 5159b96876d9e78bb4f2b85e08477b3ce842fb6a..f0795a3a42b89fd83fd6e37e6c2141b7c653c977 100644 (file)
@@ -10,33 +10,28 @@ package org.opendaylight.controller.remote.rpc.registry;
 import akka.actor.ActorRef;
 import akka.serialization.JavaSerializer;
 import akka.serialization.Serialization;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
-import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.Optional;
 import java.util.Set;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataInput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeDataOutput;
-import org.opendaylight.controller.cluster.datastore.node.utils.stream.NormalizedNodeInputOutput;
-import org.opendaylight.controller.remote.rpc.registry.gossip.BucketData;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataInput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeDataOutput;
+import org.opendaylight.yangtools.yang.data.codec.binfmt.NormalizedNodeStreamVersion;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
 
-public final class RoutingTable implements BucketData<RoutingTable>, Serializable {
+public final class RoutingTable extends AbstractRoutingTable<RoutingTable, DOMRpcIdentifier> {
     private static final class Proxy implements Externalizable {
         private static final long serialVersionUID = 1L;
 
         @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "We deal with the field in serialization methods.")
         private Collection<DOMRpcIdentifier> rpcs;
-        private ActorRef rpcInvoker;
+        private ActorRef opsInvoker;
 
         // checkstyle flags the public modifier as redundant however it is explicitly needed for Java serialization to
         // be able to create instances via reflection.
@@ -46,91 +41,63 @@ public final class RoutingTable implements BucketData<RoutingTable>, Serializabl
         }
 
         Proxy(final RoutingTable table) {
-            rpcs = table.getRoutes();
-            rpcInvoker = table.getRpcInvoker();
+            rpcs = table.getItems();
+            opsInvoker = table.getInvoker();
         }
 
         @Override
         public void writeExternal(final ObjectOutput out) throws IOException {
-            out.writeObject(Serialization.serializedActorPath(rpcInvoker));
-
-            final NormalizedNodeDataOutput nnout = NormalizedNodeInputOutput.newDataOutput(out);
-            nnout.writeInt(rpcs.size());
-            for (DOMRpcIdentifier id : rpcs) {
-                nnout.writeSchemaPath(id.getType());
-                nnout.writeYangInstanceIdentifier(id.getContextReference());
+            out.writeObject(Serialization.serializedActorPath(opsInvoker));
+
+            try (NormalizedNodeDataOutput nnout = NormalizedNodeStreamVersion.current().newDataOutput(out)) {
+                nnout.writeInt(rpcs.size());
+                for (DOMRpcIdentifier id : rpcs) {
+                    // TODO: we should be able to get by with just a QName
+                    nnout.writeSchemaNodeIdentifier(Absolute.of(id.getType()));
+                    nnout.writeYangInstanceIdentifier(id.getContextReference());
+                }
             }
         }
 
         @Override
         public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
-            rpcInvoker = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
+            opsInvoker = JavaSerializer.currentSystem().value().provider().resolveActorRef((String) in.readObject());
 
-            final NormalizedNodeDataInput nnin = NormalizedNodeInputOutput.newDataInput(in);
+            final NormalizedNodeDataInput nnin = NormalizedNodeDataInput.newDataInput(in);
             final int size = nnin.readInt();
             rpcs = new ArrayList<>(size);
             for (int i = 0; i < size; ++i) {
-                rpcs.add(DOMRpcIdentifier.create(nnin.readSchemaPath(), nnin.readYangInstanceIdentifier()));
+                // TODO: we should be able to get by with just a QName
+                rpcs.add(DOMRpcIdentifier.create(nnin.readSchemaNodeIdentifier().firstNodeIdentifier(),
+                    nnin.readYangInstanceIdentifier()));
             }
         }
 
         private Object readResolve() {
-            return new RoutingTable(rpcInvoker, rpcs);
+            return new RoutingTable(opsInvoker, rpcs);
         }
     }
 
     private static final long serialVersionUID = 1L;
 
-    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "We deal with the field in serialization methods.")
-    private final Set<DOMRpcIdentifier> rpcs;
-    private final ActorRef rpcInvoker;
-
-    RoutingTable(final ActorRef rpcInvoker, final Collection<DOMRpcIdentifier> table) {
-        this.rpcInvoker = Preconditions.checkNotNull(rpcInvoker);
-        this.rpcs = ImmutableSet.copyOf(table);
-    }
-
-    @Override
-    public Optional<ActorRef> getWatchActor() {
-        return Optional.of(rpcInvoker);
-    }
-
-    public Set<DOMRpcIdentifier> getRoutes() {
-        return rpcs;
-    }
-
-    ActorRef getRpcInvoker() {
-        return rpcInvoker;
+    RoutingTable(final ActorRef invoker, final Collection<DOMRpcIdentifier> table) {
+        super(invoker, table);
     }
 
     RoutingTable addRpcs(final Collection<DOMRpcIdentifier> toAdd) {
-        final Set<DOMRpcIdentifier> newRpcs = new HashSet<>(rpcs);
+        final Set<DOMRpcIdentifier> newRpcs = new HashSet<>(getItems());
         newRpcs.addAll(toAdd);
-        return new RoutingTable(rpcInvoker, newRpcs);
+        return new RoutingTable(getInvoker(), newRpcs);
     }
 
     RoutingTable removeRpcs(final Collection<DOMRpcIdentifier> toRemove) {
-        final Set<DOMRpcIdentifier> newRpcs = new HashSet<>(rpcs);
+        final Set<DOMRpcIdentifier> newRpcs = new HashSet<>(getItems());
         newRpcs.removeAll(toRemove);
-        return new RoutingTable(rpcInvoker, newRpcs);
-    }
-
-    private Object writeReplace() {
-        return new Proxy(this);
-    }
-
-    @VisibleForTesting
-    boolean contains(final DOMRpcIdentifier routeId) {
-        return rpcs.contains(routeId);
-    }
-
-    @VisibleForTesting
-    int size() {
-        return rpcs.size();
+        return new RoutingTable(getInvoker(), newRpcs);
     }
 
     @Override
-    public String toString() {
-        return "RoutingTable{" + "rpcs=" + rpcs + ", rpcInvoker=" + rpcInvoker + '}';
+    Object writeReplace() {
+        return new Proxy(this);
     }
 }
index 5ba97a306f7b68625253eaff0f258660139a4a27..8d66ed8ccb163abc56891009bfe09cfdae29add6 100644 (file)
@@ -7,11 +7,13 @@
  */
 package org.opendaylight.controller.remote.rpc.registry;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.Address;
 import akka.actor.Props;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
@@ -22,10 +24,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.Set;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
 import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor;
@@ -41,13 +42,12 @@ import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
  */
 public class RpcRegistry extends BucketStoreActor<RoutingTable> {
     private final ActorRef rpcRegistrar;
-    private final RemoteRpcRegistryMXBeanImpl mxBean;
+    private RemoteRpcRegistryMXBeanImpl mxBean;
 
-    public RpcRegistry(final RemoteRpcProviderConfig config, final ActorRef rpcInvoker, final ActorRef rpcRegistrar) {
+    public RpcRegistry(final RemoteOpsProviderConfig config, final ActorRef rpcInvoker, final ActorRef rpcRegistrar) {
         super(config, config.getRpcRegistryPersistenceId(), new RoutingTable(rpcInvoker, ImmutableSet.of()));
-        this.rpcRegistrar = Preconditions.checkNotNull(rpcRegistrar);
-        this.mxBean = new RemoteRpcRegistryMXBeanImpl(new BucketStoreAccess(self(), getContext().dispatcher(),
-                config.getAskDuration()), config.getAskDuration());
+        this.rpcRegistrar = requireNonNull(rpcRegistrar);
+
     }
 
     /**
@@ -58,23 +58,33 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
      * @param rpcInvoker Actor handling RPC invocation requests from remote nodes
      * @return A new {@link Props} instance
      */
-    public static Props props(final RemoteRpcProviderConfig config, final ActorRef rpcInvoker,
-            final ActorRef rpcRegistrar) {
+    public static Props props(final RemoteOpsProviderConfig config, final ActorRef rpcInvoker,
+                              final ActorRef rpcRegistrar) {
         return Props.create(RpcRegistry.class, config, rpcInvoker, rpcRegistrar);
     }
 
     @Override
-    public void postStop() {
+    public void preStart() {
+        super.preStart();
+        mxBean = new RemoteRpcRegistryMXBeanImpl(new BucketStoreAccess(self(), getContext().dispatcher(),
+            getConfig().getAskDuration()), getConfig().getAskDuration());
+    }
+
+    @Override
+    public void postStop() throws Exception {
+        if (mxBean != null) {
+            mxBean.unregister();
+            mxBean = null;
+        }
         super.postStop();
-        this.mxBean.unregister();
     }
 
     @Override
     protected void handleCommand(final Object message) throws Exception {
-        if (message instanceof AddOrUpdateRoutes) {
-            receiveAddRoutes((AddOrUpdateRoutes) message);
-        } else if (message instanceof RemoveRoutes) {
-            receiveRemoveRoutes((RemoveRoutes) message);
+        if (message instanceof AddOrUpdateRoutes addRoutes) {
+            receiveAddRoutes(addRoutes);
+        } else if (message instanceof RemoveRoutes removeRoutes) {
+            receiveRemoveRoutes(removeRoutes);
         } else {
             super.handleCommand(message);
         }
@@ -97,7 +107,8 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
 
     @Override
     protected void onBucketRemoved(final Address address, final Bucket<RoutingTable> bucket) {
-        rpcRegistrar.tell(new UpdateRemoteEndpoints(ImmutableMap.of(address, Optional.empty())), ActorRef.noSender());
+        rpcRegistrar.tell(new Messages.UpdateRemoteEndpoints(ImmutableMap.of(address, Optional.empty())),
+                ActorRef.noSender());
     }
 
     @Override
@@ -107,13 +118,13 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
         for (Entry<Address, Bucket<RoutingTable>> e : buckets.entrySet()) {
             final RoutingTable table = e.getValue().getData();
 
-            final Collection<DOMRpcIdentifier> rpcs = table.getRoutes();
+            final Collection<DOMRpcIdentifier> rpcs = table.getItems();
             endpoints.put(e.getKey(), rpcs.isEmpty() ? Optional.empty()
-                    : Optional.of(new RemoteRpcEndpoint(table.getRpcInvoker(), rpcs)));
+                    : Optional.of(new RemoteRpcEndpoint(table.getInvoker(), rpcs)));
         }
 
         if (!endpoints.isEmpty()) {
-            rpcRegistrar.tell(new UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
+            rpcRegistrar.tell(new Messages.UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
         }
     }
 
@@ -123,7 +134,7 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
 
         @VisibleForTesting
         public RemoteRpcEndpoint(final ActorRef router, final Collection<DOMRpcIdentifier> rpcs) {
-            this.router = Preconditions.checkNotNull(router);
+            this.router = requireNonNull(router);
             this.rpcs = ImmutableSet.copyOf(rpcs);
         }
 
@@ -141,46 +152,48 @@ public class RpcRegistry extends BucketStoreActor<RoutingTable> {
      */
     public static class Messages {
         abstract static class AbstractRouteMessage {
-            final List<DOMRpcIdentifier> routeIdentifiers;
+            final List<DOMRpcIdentifier> rpcRouteIdentifiers;
 
-            AbstractRouteMessage(final Collection<DOMRpcIdentifier> routeIdentifiers) {
-                Preconditions.checkArgument(routeIdentifiers != null && !routeIdentifiers.isEmpty(),
+            AbstractRouteMessage(final Collection<DOMRpcIdentifier> rpcRouteIdentifiers) {
+                checkArgument(rpcRouteIdentifiers != null && !rpcRouteIdentifiers.isEmpty(),
                         "Route Identifiers must be supplied");
-                this.routeIdentifiers = ImmutableList.copyOf(routeIdentifiers);
+                this.rpcRouteIdentifiers = ImmutableList.copyOf(rpcRouteIdentifiers);
             }
 
             List<DOMRpcIdentifier> getRouteIdentifiers() {
-                return this.routeIdentifiers;
+                return rpcRouteIdentifiers;
             }
 
             @Override
             public String toString() {
-                return "ContainsRoute{" + "routeIdentifiers=" + routeIdentifiers + '}';
+                return "ContainsRoute{" + "routeIdentifiers=" + rpcRouteIdentifiers + '}';
             }
         }
 
-        public static final class AddOrUpdateRoutes extends AbstractRouteMessage {
-            public AddOrUpdateRoutes(final Collection<DOMRpcIdentifier> routeIdentifiers) {
-                super(routeIdentifiers);
+        public static final class AddOrUpdateRoutes extends Messages.AbstractRouteMessage {
+            public AddOrUpdateRoutes(final Collection<DOMRpcIdentifier> rpcRouteIdentifiers) {
+                super(rpcRouteIdentifiers);
             }
+
         }
 
         public static final class RemoveRoutes extends AbstractRouteMessage {
-            public RemoveRoutes(final Collection<DOMRpcIdentifier> routeIdentifiers) {
-                super(routeIdentifiers);
+            public RemoveRoutes(final Collection<DOMRpcIdentifier> rpcRouteIdentifiers) {
+                super(rpcRouteIdentifiers);
             }
         }
 
         public static final class UpdateRemoteEndpoints {
-            private final Map<Address, Optional<RemoteRpcEndpoint>> endpoints;
+            private final Map<Address, Optional<RemoteRpcEndpoint>> rpcEndpoints;
+
 
             @VisibleForTesting
-            public UpdateRemoteEndpoints(final Map<Address, Optional<RemoteRpcEndpoint>> endpoints) {
-                this.endpoints = ImmutableMap.copyOf(endpoints);
+            public UpdateRemoteEndpoints(final Map<Address, Optional<RemoteRpcEndpoint>> rpcEndpoints) {
+                this.rpcEndpoints = ImmutableMap.copyOf(rpcEndpoints);
             }
 
-            public Map<Address, Optional<RemoteRpcEndpoint>> getEndpoints() {
-                return endpoints;
+            public Map<Address, Optional<RemoteRpcEndpoint>> getRpcEndpoints() {
+                return rpcEndpoints;
             }
         }
     }
index e06e5fb15bfaf38cb7b522eb9f57efe3d6920c75..efbd63cd211ab4548252715530c313b484ef04d6 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
+import static java.util.Objects.requireNonNull;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getBucketsByMembersMessage;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getLocalDataMessage;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreActor.getRemoteBucketsMessage;
@@ -18,21 +19,16 @@ import akka.actor.Address;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
 import akka.util.Timeout;
-import com.google.common.annotations.Beta;
 import com.google.common.annotations.VisibleForTesting;
 import java.util.Collection;
 import java.util.Map;
-import java.util.Objects;
 import java.util.function.Consumer;
 import scala.concurrent.ExecutionContext;
 import scala.concurrent.Future;
 
 /**
  * Convenience access to {@link BucketStoreActor}. Used mostly by {@link Gossiper}.
- *
- * @author Robert Varga
  */
-@Beta
 @VisibleForTesting
 public final class BucketStoreAccess {
     private final ActorRef actorRef;
@@ -40,15 +36,15 @@ public final class BucketStoreAccess {
     private final Timeout timeout;
 
     public BucketStoreAccess(final ActorRef actorRef, final ExecutionContext dispatcher, final Timeout timeout) {
-        this.actorRef = Objects.requireNonNull(actorRef);
-        this.dispatcher = Objects.requireNonNull(dispatcher);
-        this.timeout = Objects.requireNonNull(timeout);
+        this.actorRef = requireNonNull(actorRef);
+        this.dispatcher = requireNonNull(dispatcher);
+        this.timeout = requireNonNull(timeout);
     }
 
     <T extends BucketData<T>> void getBucketsByMembers(final Collection<Address> members,
             final Consumer<Map<Address, Bucket<T>>> callback) {
         Patterns.ask(actorRef, getBucketsByMembersMessage(members), timeout)
-            .onComplete(new OnComplete<Object>() {
+            .onComplete(new OnComplete<>() {
                 @SuppressWarnings("unchecked")
                 @Override
                 public void onComplete(final Throwable failure, final Object success) {
@@ -60,7 +56,7 @@ public final class BucketStoreAccess {
     }
 
     void getBucketVersions(final Consumer<Map<Address, Long>> callback) {
-        Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<Object>() {
+        Patterns.ask(actorRef, Singletons.GET_BUCKET_VERSIONS, timeout).onComplete(new OnComplete<>() {
             @SuppressWarnings("unchecked")
             @Override
             public void onComplete(final Throwable failure, final Object success) {
@@ -96,9 +92,13 @@ public final class BucketStoreAccess {
     }
 
     public enum Singletons {
-        // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Bucket<?>>
+        /**
+         * Sent from Gossiper to BucketStore, response is an immutable {@code Map&lt;Address, Bucket&lt;?&gt;&gt;}.
+         */
         GET_ALL_BUCKETS,
-        // Sent from Gossiper to BucketStore, response is an immutable Map<Address, Long>
+        /**
+         * Sent from Gossiper to BucketStore, response is an immutable {@code Map&lt;Address, Long&gt;}.
+         */
         GET_BUCKET_VERSIONS,
     }
 }
index 9a84b91300c7681515f8dd85a94475aab1fb1364..f155880c0185677e20e4187df79684858f8be934 100644 (file)
@@ -5,9 +5,11 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
+import static com.google.common.base.Preconditions.checkState;
+import static com.google.common.base.Verify.verify;
+import static java.util.Objects.requireNonNull;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_ALL_BUCKETS;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_BUCKET_VERSIONS;
 
@@ -25,8 +27,6 @@ import akka.persistence.SaveSnapshotSuccess;
 import akka.persistence.SnapshotOffer;
 import akka.persistence.SnapshotSelectionCriteria;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.SetMultimap;
@@ -37,7 +37,7 @@ import java.util.Map.Entry;
 import java.util.Optional;
 import java.util.function.Consumer;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 
 /**
  * A store that syncs its data across nodes in the cluster.
@@ -72,7 +72,7 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
      */
     private final SetMultimap<ActorRef, Address> watchedActors = HashMultimap.create(1, 1);
 
-    private final RemoteRpcProviderConfig config;
+    private final RemoteOpsProviderConfig config;
     private final String persistenceId;
 
     /**
@@ -88,10 +88,10 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
     private Integer incarnation;
     private boolean persisting;
 
-    protected BucketStoreActor(final RemoteRpcProviderConfig config, final String persistenceId, final T initialData) {
-        this.config = Preconditions.checkNotNull(config);
-        this.initialData = Preconditions.checkNotNull(initialData);
-        this.persistenceId = Preconditions.checkNotNull(persistenceId);
+    protected BucketStoreActor(final RemoteOpsProviderConfig config, final String persistenceId, final T initialData) {
+        this.config = requireNonNull(config);
+        this.initialData = requireNonNull(initialData);
+        this.persistenceId = requireNonNull(persistenceId);
     }
 
     static ExecuteInActor getBucketsByMembersMessage(final Collection<Address> members) {
@@ -154,18 +154,17 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
             return;
         }
 
-        if (message instanceof ExecuteInActor) {
-            ((ExecuteInActor) message).accept(this);
+        if (message instanceof ExecuteInActor execute) {
+            execute.accept(this);
         } else if (GET_BUCKET_VERSIONS == message) {
             // FIXME: do we need to send ourselves?
             getSender().tell(ImmutableMap.copyOf(versions), getSelf());
-        } else if (message instanceof Terminated) {
-            actorTerminated((Terminated) message);
-        } else if (message instanceof DeleteSnapshotsSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), message);
-        } else if (message instanceof DeleteSnapshotsFailure) {
-            LOG.warn("{}: failed to delete prior snapshots", persistenceId(),
-                ((DeleteSnapshotsFailure) message).cause());
+        } else if (message instanceof Terminated terminated) {
+            actorTerminated(terminated);
+        } else if (message instanceof DeleteSnapshotsSuccess deleteSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), deleteSuccess);
+        } else if (message instanceof DeleteSnapshotsFailure deleteFailure) {
+            LOG.warn("{}: failed to delete prior snapshots", persistenceId(), deleteFailure.cause());
         } else {
             LOG.debug("Unhandled message [{}]", message);
             unhandled(message);
@@ -173,15 +172,14 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
     }
 
     private void handleSnapshotMessage(final Object message) {
-        if (message instanceof SaveSnapshotFailure) {
-            LOG.error("{}: failed to persist state", persistenceId(), ((SaveSnapshotFailure) message).cause());
+        if (message instanceof SaveSnapshotFailure saveFailure) {
+            LOG.error("{}: failed to persist state", persistenceId(), saveFailure.cause());
             persisting = false;
             self().tell(PoisonPill.getInstance(), ActorRef.noSender());
-        } else if (message instanceof SaveSnapshotSuccess) {
-            LOG.debug("{}: got command: {}", persistenceId(), message);
-            SaveSnapshotSuccess saved = (SaveSnapshotSuccess)message;
-            deleteSnapshots(new SnapshotSelectionCriteria(saved.metadata().sequenceNr(),
-                    saved.metadata().timestamp() - 1, 0L, 0L));
+        } else if (message instanceof SaveSnapshotSuccess saveSuccess) {
+            LOG.debug("{}: got command: {}", persistenceId(), saveSuccess);
+            deleteSnapshots(new SnapshotSelectionCriteria(scala.Long.MaxValue(), saveSuccess.metadata().timestamp() - 1,
+                0L, 0L));
             persisting = false;
             unstash();
         } else {
@@ -199,20 +197,20 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
                 incarnation = 0;
             }
 
-            this.localBucket = new LocalBucket<>(incarnation.intValue(), initialData);
+            this.localBucket = new LocalBucket<>(incarnation, initialData);
             initialData = null;
             LOG.debug("{}: persisting new incarnation {}", persistenceId(), incarnation);
             persisting = true;
             saveSnapshot(incarnation);
-        } else if (message instanceof SnapshotOffer) {
-            incarnation = (Integer) ((SnapshotOffer)message).snapshot();
+        } else if (message instanceof SnapshotOffer snapshotOffer) {
+            incarnation = (Integer) snapshotOffer.snapshot();
             LOG.debug("{}: recovered incarnation {}", persistenceId(), incarnation);
         } else {
             LOG.warn("{}: ignoring recovery message {}", persistenceId(), message);
         }
     }
 
-    protected final RemoteRpcProviderConfig getConfig() {
+    protected final RemoteOpsProviderConfig getConfig() {
         return config;
     }
 
@@ -224,7 +222,7 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
         if (bumpIncarnation) {
             LOG.debug("Version wrapped. incrementing incarnation");
 
-            Verify.verify(incarnation < Integer.MAX_VALUE, "Ran out of incarnations, cannot continue");
+            verify(incarnation < Integer.MAX_VALUE, "Ran out of incarnations, cannot continue");
             incarnation = incarnation + 1;
 
             persisting = true;
@@ -389,7 +387,7 @@ public abstract class BucketStoreActor<T extends BucketData<T>> extends
     }
 
     private LocalBucket<T> getLocalBucket() {
-        Preconditions.checkState(localBucket != null, "Attempted to access local bucket before recovery completed");
+        checkState(localBucket != null, "Attempted to access local bucket before recovery completed");
         return localBucket;
     }
 }
index a6a0c2b6f790a93d29ac56d9745484a97dacc65d..950b5f9f080a51acc28689269a211ca4c079b1a2 100644 (file)
@@ -7,8 +7,9 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.Address;
-import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
 import java.io.Serializable;
 import java.util.Map;
@@ -21,7 +22,7 @@ final class GossipEnvelope implements Serializable {
     private final Address to;
 
     GossipEnvelope(final Address from, final Address to, final Map<Address, ? extends Bucket<?>> buckets) {
-        this.to = Preconditions.checkNotNull(to);
+        this.to = requireNonNull(to);
         this.buckets = ImmutableMap.copyOf(buckets);
         this.from = from;
     }
index 56060dd714ff12b6e29120abe5b766673fa788d0..40be108b2e40a5ae821b24bbd50f11c8f04654ed 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
+import static com.google.common.base.Verify.verifyNotNull;
+import static java.util.Objects.requireNonNull;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorRefProvider;
 import akka.actor.ActorSelection;
@@ -18,8 +21,6 @@ import akka.cluster.ClusterActorRefProvider;
 import akka.cluster.ClusterEvent;
 import akka.cluster.Member;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import com.google.common.collect.Maps;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -31,7 +32,7 @@ import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import scala.concurrent.duration.FiniteDuration;
 
 /**
@@ -61,7 +62,7 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
     };
 
     private final boolean autoStartGossipTicks;
-    private final RemoteRpcProviderConfig config;
+    private final RemoteOpsProviderConfig config;
 
     /**
      * All known cluster members.
@@ -84,20 +85,20 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
 
     private BucketStoreAccess bucketStore;
 
-    Gossiper(final RemoteRpcProviderConfig config, final Boolean autoStartGossipTicks) {
-        this.config = Preconditions.checkNotNull(config);
-        this.autoStartGossipTicks = autoStartGossipTicks.booleanValue();
+    Gossiper(final RemoteOpsProviderConfig config, final Boolean autoStartGossipTicks) {
+        this.config = requireNonNull(config);
+        this.autoStartGossipTicks = autoStartGossipTicks;
     }
 
-    Gossiper(final RemoteRpcProviderConfig config) {
+    Gossiper(final RemoteOpsProviderConfig config) {
         this(config, Boolean.TRUE);
     }
 
-    public static Props props(final RemoteRpcProviderConfig config) {
+    public static Props props(final RemoteOpsProviderConfig config) {
         return Props.create(Gossiper.class, config);
     }
 
-    static Props testProps(final RemoteRpcProviderConfig config) {
+    static Props testProps(final RemoteOpsProviderConfig config) {
         return Props.create(Gossiper.class, config, Boolean.FALSE);
     }
 
@@ -118,14 +119,19 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         }
 
         if (autoStartGossipTicks) {
-            gossipTask = getContext().system().scheduler().schedule(
-                    new FiniteDuration(1, TimeUnit.SECONDS),        //initial delay
-                    config.getGossipTickInterval(),                 //interval
-                    getSelf(),                                      //target
-                    GOSSIP_TICK,                                    //message
-                    getContext().dispatcher(),                      //execution context
-                    getSelf()                                       //sender
-            );
+            gossipTask = getContext().system().scheduler().scheduleAtFixedRate(
+                // initial delay
+                new FiniteDuration(1, TimeUnit.SECONDS),
+                // interval
+                config.getGossipTickInterval(),
+                // target
+                getSelf(),
+                // message
+                GOSSIP_TICK,
+                // execution context
+                getContext().dispatcher(),
+                // sender
+                getSelf());
         }
     }
 
@@ -145,25 +151,25 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         //These ticks can be sent by another actor as well which is esp. useful while testing
         if (GOSSIP_TICK.equals(message)) {
             receiveGossipTick();
-        } else if (message instanceof GossipStatus) {
+        } else if (message instanceof GossipStatus status) {
             // Message from remote gossiper with its bucket versions
-            receiveGossipStatus((GossipStatus) message);
-        } else if (message instanceof GossipEnvelope) {
+            receiveGossipStatus(status);
+        } else if (message instanceof GossipEnvelope envelope) {
             // Message from remote gossiper with buckets. This is usually in response to GossipStatus
             // message. The contained buckets are newer as determined by the remote gossiper by
             // comparing the GossipStatus message with its local versions.
-            receiveGossip((GossipEnvelope) message);
-        } else if (message instanceof ClusterEvent.MemberUp) {
-            receiveMemberUpOrReachable(((ClusterEvent.MemberUp) message).member());
+            receiveGossip(envelope);
+        } else if (message instanceof ClusterEvent.MemberUp memberUp) {
+            receiveMemberUpOrReachable(memberUp.member());
 
-        } else if (message instanceof ClusterEvent.ReachableMember) {
-            receiveMemberUpOrReachable(((ClusterEvent.ReachableMember) message).member());
+        } else if (message instanceof ClusterEvent.ReachableMember reachableMember) {
+            receiveMemberUpOrReachable(reachableMember.member());
 
-        } else if (message instanceof ClusterEvent.MemberRemoved) {
-            receiveMemberRemoveOrUnreachable(((ClusterEvent.MemberRemoved) message).member());
+        } else if (message instanceof ClusterEvent.MemberRemoved memberRemoved) {
+            receiveMemberRemoveOrUnreachable(memberRemoved.member());
 
-        } else if (message instanceof ClusterEvent.UnreachableMember) {
-            receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
+        } else if (message instanceof ClusterEvent.UnreachableMember unreachableMember) {
+            receiveMemberRemoveOrUnreachable(unreachableMember.member());
 
         } else {
             unhandled(message);
@@ -243,7 +249,7 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         }
 
         LOG.trace("Gossiping to [{}]", address);
-        getLocalStatusAndSendTo(Verify.verifyNotNull(peers.get(address)));
+        getLocalStatusAndSendTo(verifyNotNull(peers.get(address)));
     }
 
     /**
index 5e9d907aadd18227869cf8bb8dc8c0bc0a6e25cd..313dac8089116564ea36059a3f2a86a6e8185f88 100644 (file)
@@ -7,7 +7,8 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
 
 /**
  * Local bucket implementation. Unlike a full-blown {@link Bucket}, this class is mutable and tracks when it has been
@@ -30,9 +31,9 @@ final class LocalBucket<T extends BucketData<T>> {
     private boolean bumpVersion;
 
     LocalBucket(final int incarnation, final T data) {
-        Preconditions.checkArgument(incarnation >= 0);
-        this.version = ((long)incarnation) << Integer.SIZE;
-        this.data = Preconditions.checkNotNull(data);
+        checkArgument(incarnation >= 0);
+        this.version = (long)incarnation << Integer.SIZE;
+        this.data = requireNonNull(data);
     }
 
     T getData() {
@@ -49,7 +50,7 @@ final class LocalBucket<T extends BucketData<T>> {
     }
 
     boolean setData(final T newData) {
-        this.data = Preconditions.checkNotNull(newData);
+        this.data = requireNonNull(newData);
         if (!bumpVersion) {
             return false;
         }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/AbstractRegistryMXBean.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/AbstractRegistryMXBean.java
new file mode 100644 (file)
index 0000000..a38b8c4
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+import static java.util.Objects.requireNonNull;
+
+import akka.actor.Address;
+import akka.util.Timeout;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import java.util.Map;
+import java.util.concurrent.TimeoutException;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+import org.opendaylight.controller.remote.rpc.registry.AbstractRoutingTable;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+abstract class AbstractRegistryMXBean<T extends AbstractRoutingTable<T, I>, I> extends AbstractMXBean {
+    static final String LOCAL_CONSTANT = "local";
+    static final String ROUTE_CONSTANT = "route:";
+    static final String NAME_CONSTANT = " | name:";
+
+    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
+    protected final Logger log = LoggerFactory.getLogger(getClass());
+
+    private final BucketStoreAccess bucketAccess;
+    private final FiniteDuration timeout;
+
+    @SuppressFBWarnings(value = "MC_OVERRIDABLE_METHOD_CALL_IN_CONSTRUCTOR",
+        justification = "registerMBean() is expected to be stateless")
+    AbstractRegistryMXBean(final @NonNull String beanName, final @NonNull String beanType,
+            final @NonNull BucketStoreAccess bucketAccess, final @NonNull Timeout timeout) {
+        super(beanName, beanType, null);
+        this.bucketAccess = requireNonNull(bucketAccess);
+        this.timeout = timeout.duration();
+        registerMBean();
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    final T localData() {
+        try {
+            return (T) Await.result((Future) bucketAccess.getLocalData(), timeout);
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getLocalData failed", e);
+        }
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    final Map<Address, Bucket<T>> remoteBuckets() {
+        try {
+            return (Map<Address, Bucket<T>>) Await.result((Future)bucketAccess.getRemoteBuckets(), timeout);
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getRemoteBuckets failed", e);
+        }
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    final String bucketVersions() {
+        try {
+            return Await.result((Future)bucketAccess.getBucketVersions(), timeout).toString();
+        } catch (InterruptedException | TimeoutException e) {
+            throw new IllegalStateException("getVersions failed", e);
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBean.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBean.java
new file mode 100644 (file)
index 0000000..5826384
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+import java.util.Map;
+import java.util.Set;
+
+public interface RemoteActionRegistryMXBean {
+
+    String getBucketVersions();
+
+    Set<String> getLocalRegisteredAction();
+
+    Map<String, String> findActionByName(String name);
+
+    Map<String, String> findActionByRoute(String route);
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImpl.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImpl.java
new file mode 100644 (file)
index 0000000..8314b13
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+import akka.actor.Address;
+import akka.util.Timeout;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.opendaylight.controller.remote.rpc.registry.ActionRoutingTable;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+public class RemoteActionRegistryMXBeanImpl extends AbstractRegistryMXBean<ActionRoutingTable, DOMActionInstance>
+        implements RemoteActionRegistryMXBean {
+    public RemoteActionRegistryMXBeanImpl(final BucketStoreAccess actionRegistryAccess, final Timeout timeout) {
+        super("RemoteActionRegistry", "RemoteActionBroker", actionRegistryAccess, timeout);
+    }
+
+    @Override
+    public Set<String> getLocalRegisteredAction() {
+        ActionRoutingTable table = localData();
+        Set<String> routedAction = new HashSet<>(table.getItems().size());
+        for (DOMActionInstance route : table.getItems()) {
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+            if (!actionPath.isEmpty()) {
+                routedAction.add(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + route.getType());
+            }
+        }
+
+        log.debug("Locally registered routed RPCs {}", routedAction);
+        return routedAction;
+    }
+
+    @Override
+    public Map<String, String> findActionByName(final String name) {
+        ActionRoutingTable localTable = localData();
+        // Get all Actions from local bucket
+        Map<String, String> rpcMap = new HashMap<>(getActionMemberMapByName(localTable, name, LOCAL_CONSTANT));
+
+        // Get all Actions from remote bucket
+        Map<Address, Bucket<ActionRoutingTable>> buckets = remoteBuckets();
+        for (Map.Entry<Address, Bucket<ActionRoutingTable>> entry : buckets.entrySet()) {
+            ActionRoutingTable table = entry.getValue().getData();
+            rpcMap.putAll(getActionMemberMapByName(table, name, entry.getKey().toString()));
+        }
+
+        log.debug("list of Actions {} searched by name {}", rpcMap, name);
+        return rpcMap;
+    }
+
+    @Override
+    public Map<String, String> findActionByRoute(final String routeId) {
+        ActionRoutingTable localTable = localData();
+        Map<String, String> rpcMap = new HashMap<>(getActionMemberMapByAction(localTable, routeId, LOCAL_CONSTANT));
+
+        Map<Address, Bucket<ActionRoutingTable>> buckets = remoteBuckets();
+        for (Map.Entry<Address, Bucket<ActionRoutingTable>> entry : buckets.entrySet()) {
+            ActionRoutingTable table = entry.getValue().getData();
+            rpcMap.putAll(getActionMemberMapByAction(table, routeId, entry.getKey().toString()));
+        }
+
+        log.debug("list of Actions {} searched by route {}", rpcMap, routeId);
+        return rpcMap;
+    }
+
+    /**
+     * Search if the routing table route String contains routeName.
+     */
+    private static Map<String, String> getActionMemberMapByAction(final ActionRoutingTable table,
+                                                                  final String routeName, final String address) {
+        Collection<DOMActionInstance> routes = table.getItems();
+        Map<String, String> actionMap = new HashMap<>(routes.size());
+        for (DOMActionInstance route : routes) {
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+            if (!actionPath.isEmpty()) {
+                String routeString = actionPath.toString();
+                if (routeString.contains(routeName)) {
+                    actionMap.put(ROUTE_CONSTANT + routeString + NAME_CONSTANT + route.getType(), address);
+                }
+            }
+        }
+        return actionMap;
+    }
+
+    /**
+     * Search if the routing table route type contains name.
+     */
+    private static Map<String, String> getActionMemberMapByName(final ActionRoutingTable table, final String name,
+                                                                final String address) {
+        Collection<DOMActionInstance> routes = table.getItems();
+        Map<String, String> actionMap = new HashMap<>(routes.size());
+        for (DOMActionInstance route : routes) {
+            final YangInstanceIdentifier actionPath = YangInstanceIdentifier.of(route.getType().lastNodeIdentifier());
+            if (!actionPath.isEmpty()) {
+                String type = route.getType().toString();
+                if (type.contains(name)) {
+                    actionMap.put(ROUTE_CONSTANT + actionPath + NAME_CONSTANT + type, address);
+                }
+            }
+        }
+        return actionMap;
+    }
+
+    @Override
+    public String getBucketVersions() {
+        return bucketVersions();
+    }
+}
index 47903bb114221df67c2cbb899ea46c54780910d3..dc4ee8fd8635818c4d407a1d47a405b31145f5f8 100644 (file)
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc.registry.mbeans;
 
 import java.util.Map;
@@ -22,7 +21,7 @@ public interface RemoteRpcRegistryMXBean {
 
     Set<String> getLocalRegisteredRoutedRpc();
 
-    Map<String,String> findRpcByName(String name);
+    Map<String, String> findRpcByName(String name);
 
-    Map<String,String> findRpcByRoute(String route);
+    Map<String, String> findRpcByRoute(String route);
 }
index 87adef3d9cedf30f62ce9de22653ea2a643cde3b..d5c72fcea395f0009602ea56c9923e9f053e7d8e 100644 (file)
@@ -9,67 +9,27 @@ package org.opendaylight.controller.remote.rpc.registry.mbeans;
 
 import akka.actor.Address;
 import akka.util.Timeout;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.remote.rpc.registry.RoutingTable;
 import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.concurrent.Await;
-import scala.concurrent.Future;
-
-public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements RemoteRpcRegistryMXBean {
-
-    @SuppressFBWarnings("SLF4J_LOGGER_SHOULD_BE_PRIVATE")
-    protected final Logger log = LoggerFactory.getLogger(getClass());
-
-    private static final String LOCAL_CONSTANT = "local";
-
-    private static final String ROUTE_CONSTANT = "route:";
-
-    private static final String NAME_CONSTANT = " | name:";
-
-    private final BucketStoreAccess rpcRegistryAccess;
-    private final Timeout timeout;
 
+public class RemoteRpcRegistryMXBeanImpl extends AbstractRegistryMXBean<RoutingTable, DOMRpcIdentifier>
+        implements RemoteRpcRegistryMXBean {
     public RemoteRpcRegistryMXBeanImpl(final BucketStoreAccess rpcRegistryAccess, final Timeout timeout) {
-        super("RemoteRpcRegistry", "RemoteRpcBroker", null);
-        this.rpcRegistryAccess = rpcRegistryAccess;
-        this.timeout = timeout;
-        registerMBean();
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
-    private RoutingTable getLocalData() {
-        try {
-            return (RoutingTable) Await.result((Future) rpcRegistryAccess.getLocalData(), timeout.duration());
-        } catch (Exception e) {
-            throw new RuntimeException("getLocalData failed", e);
-        }
-    }
-
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
-    private Map<Address, Bucket<RoutingTable>> getRemoteBuckets() {
-        try {
-            return (Map<Address, Bucket<RoutingTable>>) Await.result((Future)rpcRegistryAccess.getRemoteBuckets(),
-                    timeout.duration());
-        } catch (Exception e) {
-            throw new RuntimeException("getRemoteBuckets failed", e);
-        }
+        super("RemoteRpcRegistry", "RemoteRpcBroker", rpcRegistryAccess, timeout);
     }
 
     @Override
     public Set<String> getGlobalRpc() {
-        RoutingTable table = getLocalData();
-        Set<String> globalRpc = new HashSet<>(table.getRoutes().size());
-        for (DOMRpcIdentifier route : table.getRoutes()) {
+        RoutingTable table = localData();
+        Set<String> globalRpc = new HashSet<>(table.getItems().size());
+        for (DOMRpcIdentifier route : table.getItems()) {
             if (route.getContextReference().isEmpty()) {
                 globalRpc.add(route.getType().toString());
             }
@@ -81,9 +41,9 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
 
     @Override
     public Set<String> getLocalRegisteredRoutedRpc() {
-        RoutingTable table = getLocalData();
-        Set<String> routedRpc = new HashSet<>(table.getRoutes().size());
-        for (DOMRpcIdentifier route : table.getRoutes()) {
+        RoutingTable table = localData();
+        Set<String> routedRpc = new HashSet<>(table.getItems().size());
+        for (DOMRpcIdentifier route : table.getItems()) {
             if (!route.getContextReference().isEmpty()) {
                 routedRpc.add(ROUTE_CONSTANT + route.getContextReference() + NAME_CONSTANT + route.getType());
             }
@@ -95,12 +55,12 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
 
     @Override
     public Map<String, String> findRpcByName(final String name) {
-        RoutingTable localTable = getLocalData();
+        RoutingTable localTable = localData();
         // Get all RPCs from local bucket
         Map<String, String> rpcMap = new HashMap<>(getRpcMemberMapByName(localTable, name, LOCAL_CONSTANT));
 
         // Get all RPCs from remote bucket
-        Map<Address, Bucket<RoutingTable>> buckets = getRemoteBuckets();
+        Map<Address, Bucket<RoutingTable>> buckets = remoteBuckets();
         for (Entry<Address, Bucket<RoutingTable>> entry : buckets.entrySet()) {
             RoutingTable table = entry.getValue().getData();
             rpcMap.putAll(getRpcMemberMapByName(table, name, entry.getKey().toString()));
@@ -112,10 +72,10 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
 
     @Override
     public Map<String, String> findRpcByRoute(final String routeId) {
-        RoutingTable localTable = getLocalData();
+        RoutingTable localTable = localData();
         Map<String, String> rpcMap = new HashMap<>(getRpcMemberMapByRoute(localTable, routeId, LOCAL_CONSTANT));
 
-        Map<Address, Bucket<RoutingTable>> buckets = getRemoteBuckets();
+        Map<Address, Bucket<RoutingTable>> buckets = remoteBuckets();
         for (Entry<Address, Bucket<RoutingTable>> entry : buckets.entrySet()) {
             RoutingTable table = entry.getValue().getData();
             rpcMap.putAll(getRpcMemberMapByRoute(table, routeId, entry.getKey().toString()));
@@ -129,8 +89,8 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
      * Search if the routing table route String contains routeName.
      */
     private static Map<String,String> getRpcMemberMapByRoute(final RoutingTable table, final String routeName,
-                                                      final String address) {
-        Set<DOMRpcIdentifier> routes = table.getRoutes();
+                                                             final String address) {
+        Set<DOMRpcIdentifier> routes = table.getItems();
         Map<String, String> rpcMap = new HashMap<>(routes.size());
         for (DOMRpcIdentifier route : routes) {
             if (!route.getContextReference().isEmpty()) {
@@ -146,9 +106,9 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
     /**
      * Search if the routing table route type contains name.
      */
-    private static Map<String, String>  getRpcMemberMapByName(final RoutingTable table, final String name,
-                                                       final String address) {
-        Set<DOMRpcIdentifier> routes = table.getRoutes();
+    private static Map<String, String> getRpcMemberMapByName(final RoutingTable table, final String name,
+                                                             final String address) {
+        Set<DOMRpcIdentifier> routes = table.getItems();
         Map<String, String> rpcMap = new HashMap<>(routes.size());
         for (DOMRpcIdentifier route : routes) {
             if (!route.getContextReference().isEmpty()) {
@@ -162,12 +122,7 @@ public class RemoteRpcRegistryMXBeanImpl extends AbstractMXBean implements Remot
     }
 
     @Override
-    @SuppressWarnings({"unchecked", "checkstyle:IllegalCatch", "rawtypes"})
     public String getBucketVersions() {
-        try {
-            return Await.result((Future)rpcRegistryAccess.getBucketVersions(), timeout.duration()).toString();
-        } catch (Exception e) {
-            throw new RuntimeException("getVersions failed", e);
-        }
+        return bucketVersions();
     }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/OSGI-INF/blueprint/remote-rpc.xml b/opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/OSGI-INF/blueprint/remote-rpc.xml
deleted file mode 100644 (file)
index e6d19a1..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="false" odl:use-default-for-reference-types="true">
-
-  <cm:property-placeholder persistent-id="org.opendaylight.controller.remoterpc" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="enable-metric-capture" value="false"/>
-      <cm:property name="bounded-mailbox-capacity" value="1000"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <reference id="actorSystemProvider" interface="org.opendaylight.controller.cluster.ActorSystemProvider" />
-  <reference id="domRpcService" interface="org.opendaylight.mdsal.dom.api.DOMRpcService"/>
-  <reference id="domRpcRegistry" interface="org.opendaylight.mdsal.dom.api.DOMRpcProviderService"/>
-
-  <bean id="actorSystem" factory-ref="actorSystemProvider" factory-method="getActorSystem"/>
-
-  <bean id="remoteRpcProviderConfig" class="org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig"
-          factory-method="newInstance">
-    <argument>
-      <bean factory-ref="actorSystem" factory-method="name"/>
-    </argument>
-    <argument value="${enable-metric-capture}"/>
-    <argument value="${bounded-mailbox-capacity}"/>
-  </bean>
-
-  <bean id="remoteRpcProvider" class="org.opendaylight.controller.remote.rpc.RemoteRpcProviderFactory"
-          factory-method="createInstance" init-method="start" destroy-method="close">
-    <argument ref="domRpcRegistry"/>
-    <argument ref="domRpcService"/>
-    <argument ref="actorSystem"/>
-    <argument ref="remoteRpcProviderConfig"/>
-  </bean>
-
-</blueprint>
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import static org.junit.Assert.assertEquals;
@@ -17,27 +16,28 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.testkit.javadsl.TestKit;
 import java.net.URI;
-import java.util.Collection;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.mdsal.dom.api.DOMRpcResult;
 import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yangtools.yang.common.ErrorSeverity;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
 import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
 
 /**
@@ -45,26 +45,26 @@ import org.opendaylight.yangtools.yang.test.util.YangParserTestUtils;
  *
  * @author Thomas Pantelis
  */
-public class AbstractRpcTest {
+public class AbstractOpsTest {
     static final String TEST_REV = "2014-08-28";
     static final String TEST_NS = "urn:test";
     static final URI TEST_URI = URI.create(TEST_NS);
-    static final QName TEST_RPC = QName.create(TEST_NS, TEST_REV, "test-rpc");
+    static final QName TEST_RPC = QName.create(TEST_NS, TEST_REV, "test-something");
     static final QName TEST_RPC_INPUT = QName.create(TEST_NS, TEST_REV, "input");
     static final QName TEST_RPC_INPUT_DATA = QName.create(TEST_NS, TEST_REV, "input-data");
     static final QName TEST_RPC_OUTPUT = QName.create(TEST_NS, TEST_REV, "output");
-    static final QName TEST_RPC_OUTPUT_DATA = QName.create(TEST_URI, "output-data");
 
 
-    static final SchemaPath TEST_RPC_TYPE = SchemaPath.create(true, TEST_RPC);
-    static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.create(
-            new YangInstanceIdentifier.NodeIdentifier(TEST_RPC));
-    public static final DOMRpcIdentifier TEST_RPC_ID = DOMRpcIdentifier.create(TEST_RPC_TYPE, TEST_PATH);
+    static final Absolute TEST_RPC_TYPE = Absolute.of(TEST_RPC);
+    static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_RPC);
+    public static final DOMRpcIdentifier TEST_RPC_ID = DOMRpcIdentifier.create(TEST_RPC, TEST_PATH);
+    public static final DOMDataTreeIdentifier TEST_DATA_TREE_ID =
+        DOMDataTreeIdentifier.of(LogicalDatastoreType.OPERATIONAL, TEST_PATH);
 
     static ActorSystem node1;
     static ActorSystem node2;
-    static RemoteRpcProviderConfig config1;
-    static RemoteRpcProviderConfig config2;
+    static RemoteOpsProviderConfig config1;
+    static RemoteOpsProviderConfig config2;
 
     protected ActorRef rpcInvoker1;
     protected TestKit rpcRegistry1Probe;
@@ -73,16 +73,22 @@ public class AbstractRpcTest {
     protected SchemaContext schemaContext;
     protected RemoteRpcImplementation remoteRpcImpl1;
     protected RemoteRpcImplementation remoteRpcImpl2;
+    protected RemoteActionImplementation remoteActionImpl1;
+    protected RemoteActionImplementation remoteActionImpl2;
 
     @Mock
     protected DOMRpcService domRpcService1;
     @Mock
+    protected DOMActionService domActionService1;
+    @Mock
     protected DOMRpcService domRpcService2;
+    @Mock
+    protected DOMActionService domActionService2;
 
     @BeforeClass
     public static void setup() {
-        config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
-        config2 = new RemoteRpcProviderConfig.Builder("memberB").build();
+        config1 = new RemoteOpsProviderConfig.Builder("memberA").build();
+        config2 = new RemoteOpsProviderConfig.Builder("memberB").build();
         node1 = ActorSystem.create("opendaylight-rpc", config1.get());
         node2 = ActorSystem.create("opendaylight-rpc", config2.get());
     }
@@ -97,21 +103,23 @@ public class AbstractRpcTest {
 
     @Before
     public void setUp() {
-        schemaContext = YangParserTestUtils.parseYangResources(AbstractRpcTest.class, "/test-rpc.yang");
+        schemaContext = YangParserTestUtils.parseYangResources(AbstractOpsTest.class, "/test-rpc.yang");
 
         MockitoAnnotations.initMocks(this);
 
         rpcRegistry1Probe = new TestKit(node1);
-        rpcInvoker1 = node1.actorOf(RpcInvoker.props(domRpcService1));
+        rpcInvoker1 = node1.actorOf(OpsInvoker.props(domRpcService1, domActionService1));
         rpcRegistry2Probe = new TestKit(node2);
-        rpcInvoker2 = node2.actorOf(RpcInvoker.props(domRpcService2));
+        rpcInvoker2 = node2.actorOf(OpsInvoker.props(domRpcService2, domActionService2));
         remoteRpcImpl1 = new RemoteRpcImplementation(rpcInvoker2, config1);
         remoteRpcImpl2 = new RemoteRpcImplementation(rpcInvoker1, config2);
+        remoteActionImpl1 = new RemoteActionImplementation(rpcInvoker2, config1);
+        remoteActionImpl2 = new RemoteActionImplementation(rpcInvoker1, config2);
     }
 
     static void assertRpcErrorEquals(final RpcError rpcError, final ErrorSeverity severity,
-            final ErrorType errorType, final String tag, final String message, final String applicationTag,
-            final String info, final String causeMsg) {
+                                     final ErrorType errorType, final String tag, final String message,
+                                     final String applicationTag, final String info, final String causeMsg) {
         assertEquals("getSeverity", severity, rpcError.getSeverity());
         assertEquals("getErrorType", errorType, rpcError.getErrorType());
         assertEquals("getTag", tag, rpcError.getTag());
@@ -126,35 +134,38 @@ public class AbstractRpcTest {
         }
     }
 
-    static void assertCompositeNodeEquals(final NormalizedNode<? , ?> exp, final NormalizedNode<? , ?> actual) {
+    static void assertCompositeNodeEquals(final NormalizedNode exp, final NormalizedNode actual) {
         assertEquals(exp, actual);
     }
 
     public static ContainerNode makeRPCInput(final String data) {
-        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
-            .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data)).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_RPC_INPUT))
+            .withChild(ImmutableNodes.leafNode(TEST_RPC_INPUT_DATA, data))
+            .build();
 
     }
 
     public static ContainerNode makeRPCOutput(final String data) {
-        return Builders.containerBuilder().withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
-                .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data)).build();
+        return ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(TEST_RPC_OUTPUT))
+            .withChild(ImmutableNodes.leafNode(TEST_RPC_OUTPUT, data))
+            .build();
     }
 
     static void assertFailedRpcResult(final DOMRpcResult rpcResult, final ErrorSeverity severity,
-            final ErrorType errorType, final String tag, final String message, final String applicationTag,
-            final String info, final String causeMsg) {
+                                      final ErrorType errorType, final String tag, final String message,
+                                      final String applicationTag, final String info, final String causeMsg) {
         assertNotNull("RpcResult was null", rpcResult);
-        final Collection<? extends RpcError> rpcErrors = rpcResult.getErrors();
+        final var rpcErrors = rpcResult.errors();
         assertEquals("RpcErrors count", 1, rpcErrors.size());
         assertRpcErrorEquals(rpcErrors.iterator().next(), severity, errorType, tag, message,
                 applicationTag, info, causeMsg);
     }
 
-    static void assertSuccessfulRpcResult(final DOMRpcResult rpcResult,
-            final NormalizedNode<? , ?> expOutput) {
+    static void assertSuccessfulRpcResult(final DOMRpcResult rpcResult, final NormalizedNode expOutput) {
         assertNotNull("RpcResult was null", rpcResult);
-        assertCompositeNodeEquals(expOutput, rpcResult.getResult());
+        assertCompositeNodeEquals(expOutput, rpcResult.value());
     }
 
     static class TestException extends Exception {
@@ -11,6 +11,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.when;
 
 import akka.actor.Status.Failure;
@@ -25,27 +26,26 @@ import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 
-public class RpcBrokerTest extends AbstractRpcTest {
+public class OpsBrokerTest extends AbstractOpsTest {
 
     @Test
     public void testExecuteRpc() {
         final ContainerNode invokeRpcResult = makeRPCOutput("bar");
         final DOMRpcResult rpcResult = new DefaultDOMRpcResult(invokeRpcResult);
-        when(domRpcService1.invokeRpc(eq(TEST_RPC_TYPE), any())).thenReturn(
-            FluentFutures.immediateFluentFuture(rpcResult));
+        doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService1)
+            .invokeRpc(eq(TEST_RPC), any());
+        final ExecuteRpc executeRpc = ExecuteRpc.from(TEST_RPC_ID, null);
 
-        final ExecuteRpc executeMsg = ExecuteRpc.from(TEST_RPC_ID, null);
-
-        rpcInvoker1.tell(executeMsg, rpcRegistry1Probe.getRef());
+        rpcInvoker1.tell(executeRpc, rpcRegistry1Probe.getRef());
 
         final RpcResponse rpcResponse = rpcRegistry1Probe.expectMsgClass(Duration.ofSeconds(5), RpcResponse.class);
 
-        assertEquals(rpcResult.getResult(), rpcResponse.getResultNormalizedNode());
+        assertEquals(rpcResult.value(), rpcResponse.getOutput());
     }
 
     @Test
     public void testExecuteRpcFailureWithException() {
-        when(domRpcService1.invokeRpc(eq(TEST_RPC_TYPE), any())).thenReturn(FluentFutures.immediateFailedFluentFuture(
+        when(domRpcService1.invokeRpc(eq(TEST_RPC), any())).thenReturn(FluentFutures.immediateFailedFluentFuture(
             new DOMRpcImplementationNotAvailableException("NOT FOUND")));
 
         final ExecuteRpc executeMsg = ExecuteRpc.from(TEST_RPC_ID, null);
@@ -5,7 +5,6 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc;
 
 import akka.actor.ActorRef;
@@ -16,19 +15,23 @@ import java.util.Collections;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
 
-public class RpcListenerTest {
+public class OpsListenerTest {
 
     private static final QName TEST_QNAME = QName.create("test", "2015-06-12", "test");
-    private static final SchemaPath RPC_TYPE = SchemaPath.create(true, TEST_QNAME);
-    private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier
-            .create(new YangInstanceIdentifier.NodeIdentifier(TEST_QNAME));
-    private static final DOMRpcIdentifier RPC_ID = DOMRpcIdentifier.create(RPC_TYPE, TEST_PATH);
+    private static final Absolute RPC_TYPE = Absolute.of(TEST_QNAME);
+    private static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+    private static final DOMRpcIdentifier RPC_ID = DOMRpcIdentifier.create(TEST_QNAME, TEST_PATH);
+    private static final DOMActionInstance ACTION_INSTANCE = DOMActionInstance.of(RPC_TYPE,
+            LogicalDatastoreType.OPERATIONAL, TEST_PATH);
 
     private static ActorSystem SYSTEM;
 
@@ -49,19 +52,40 @@ public class RpcListenerTest {
         final TestKit probeReg = new TestKit(SYSTEM);
         final ActorRef rpcRegistry = probeReg.getRef();
 
-        final RpcListener rpcListener = new RpcListener(rpcRegistry);
-        rpcListener.onRpcAvailable(Collections.singleton(RPC_ID));
+        final OpsListener opsListener = new OpsListener(rpcRegistry, rpcRegistry);
+        opsListener.onRpcAvailable(Collections.singleton(RPC_ID));
         probeReg.expectMsgClass(RpcRegistry.Messages.AddOrUpdateRoutes.class);
     }
 
+    @Test
+    public void testActionRouteAdd() {
+        // Test announcements
+        final TestKit probeReg = new TestKit(SYSTEM);
+        final ActorRef actionRegistry = probeReg.getRef();
+
+        final OpsListener opsListener = new OpsListener(actionRegistry, actionRegistry);
+        opsListener.onActionsChanged(Collections.emptySet(),Collections.singleton(ACTION_INSTANCE));
+        probeReg.expectMsgClass(ActionRegistry.Messages.UpdateActions.class);
+    }
+
     @Test
     public void testRouteRemove() {
         // Test announcements
         final TestKit probeReg = new TestKit(SYSTEM);
         final ActorRef rpcRegistry = probeReg.getRef();
 
-        final RpcListener rpcListener = new RpcListener(rpcRegistry);
-        rpcListener.onRpcUnavailable(Collections.singleton(RPC_ID));
+        final OpsListener opsListener = new OpsListener(rpcRegistry, rpcRegistry);
+        opsListener.onRpcUnavailable(Collections.singleton(RPC_ID));
         probeReg.expectMsgClass(RpcRegistry.Messages.RemoveRoutes.class);
     }
+
+//    @Test
+//    public void testAcceptsImplementation() {
+//
+//        final TestKit probeReg = new TestKit(SYSTEM);
+//        final ActorRef opsRegistry = probeReg.getRef();
+//
+//        final OpsListener opsListener = new OpsListener(opsRegistry, opsRegistry);
+//        opsListener.acceptsImplementation()
+//    }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsRegistrarTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/OpsRegistrarTest.java
new file mode 100644 (file)
index 0000000..2dd529f
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.collect.ImmutableMap;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.Messages.UpdateRemoteActionEndpoints;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.RemoteActionEndpoint;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+
+public class OpsRegistrarTest {
+    @Mock
+    private DOMRpcProviderService rpcService;
+    @Mock
+    private DOMActionProviderService actionService;
+    @Mock
+    private Registration oldReg;
+    @Mock
+    private Registration newReg;
+    @Mock
+    private ObjectRegistration<RemoteActionImplementation> oldActionReg;
+    @Mock
+    private ObjectRegistration<RemoteActionImplementation> newActionReg;
+
+    private ActorSystem system;
+    private TestActorRef<OpsRegistrar> testActorRef;
+    private Address endpointAddress;
+    private RemoteRpcEndpoint firstEndpoint;
+    private RemoteRpcEndpoint secondEndpoint;
+    private RemoteActionEndpoint firstActionEndpoint;
+    private RemoteActionEndpoint secondActionEndpoint;
+    private OpsRegistrar opsRegistrar;
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+        system = ActorSystem.create("test");
+
+        final TestKit testKit = new TestKit(system);
+        final RemoteOpsProviderConfig config = new RemoteOpsProviderConfig.Builder("system").build();
+        final Props props = OpsRegistrar.props(config, rpcService, actionService);
+        testActorRef = new TestActorRef<>(system, props, testKit.getRef(), "actorRef");
+        endpointAddress = new Address("http", "local");
+
+        final DOMRpcIdentifier firstEndpointId = DOMRpcIdentifier.create(QName.create("first:identifier", "foo"));
+        final DOMRpcIdentifier secondEndpointId = DOMRpcIdentifier.create(QName.create("second:identifier", "bar"));
+        final QName firstActionQName = QName.create("first:actionIdentifier", "fooAction");
+
+        final DOMActionInstance firstActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
+                LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
+
+        final DOMActionInstance secondActionInstance = DOMActionInstance.of(Absolute.of(firstActionQName),
+                LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(firstActionQName));
+
+        final TestKit senderKit = new TestKit(system);
+        firstEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(firstEndpointId));
+        secondEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(secondEndpointId));
+        firstActionEndpoint = new RemoteActionEndpoint(senderKit.getRef(),
+                Collections.singletonList(firstActionInstance));
+        secondActionEndpoint = new RemoteActionEndpoint(senderKit.getRef(),
+                Collections.singletonList(secondActionInstance));
+
+        doReturn(oldReg).when(rpcService).registerRpcImplementation(any(RemoteRpcImplementation.class),
+            eq(firstEndpoint.getRpcs()));
+        doReturn(newReg).when(rpcService).registerRpcImplementation(any(RemoteRpcImplementation.class),
+            eq(secondEndpoint.getRpcs()));
+
+        doReturn(oldActionReg).when(actionService).registerActionImplementation(any(RemoteActionImplementation.class),
+            eq(secondActionEndpoint.getActions()));
+        doReturn(oldActionReg).when(actionService).registerActionImplementation(any(RemoteActionImplementation.class),
+                eq(secondActionEndpoint.getActions()));
+
+        opsRegistrar = testActorRef.underlyingActor();
+    }
+
+    @After
+    public void tearDown() {
+        TestKit.shutdownActorSystem(system, true);
+    }
+
+    @Test
+    public void testHandleReceiveAddEndpoint() {
+        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = ImmutableMap.of(
+                endpointAddress, Optional.of(firstEndpoint));
+        testActorRef.tell(new UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
+
+        verify(rpcService).registerRpcImplementation(any(RemoteRpcImplementation.class),
+            eq(firstEndpoint.getRpcs()));
+        verifyNoMoreInteractions(rpcService, oldReg, newReg);
+    }
+
+    @Test
+    public void testHandleReceiveRemoveEndpoint() {
+        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = ImmutableMap.of(
+                endpointAddress, Optional.empty());
+        testActorRef.tell(new UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
+        verifyNoMoreInteractions(rpcService, oldReg, newReg);
+    }
+
+    @Test
+    public void testHandleReceiveUpdateRpcEndpoint() {
+        final InOrder inOrder = inOrder(rpcService, oldReg, newReg);
+
+        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(firstEndpoint))),
+                ActorRef.noSender());
+
+        inOrder.verify(rpcService).registerRpcImplementation(any(RemoteRpcImplementation.class),
+                eq(firstEndpoint.getRpcs()));
+
+        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(secondEndpoint))),
+                ActorRef.noSender());
+
+        inOrder.verify(rpcService).registerRpcImplementation(any(RemoteRpcImplementation.class),
+                eq(secondEndpoint.getRpcs()));
+
+        // verify first registration is closed
+        inOrder.verify(oldReg).close();
+
+        verifyNoMoreInteractions(rpcService, oldReg, newReg);
+    }
+
+    @Test
+    public void testHandleReceiveUpdateActionEndpoint() {
+        final InOrder inOrder = inOrder(actionService, oldActionReg, newActionReg);
+
+        testActorRef.tell(new UpdateRemoteActionEndpoints(ImmutableMap.of(endpointAddress,
+                Optional.of(firstActionEndpoint))), ActorRef.noSender());
+
+        inOrder.verify(actionService).registerActionImplementation(any(RemoteActionImplementation.class),
+                eq(firstActionEndpoint.getActions()));
+
+        testActorRef.tell(new UpdateRemoteActionEndpoints(ImmutableMap.of(endpointAddress,
+                Optional.of(secondActionEndpoint))), ActorRef.noSender());
+
+        inOrder.verify(actionService).registerActionImplementation(any(RemoteActionImplementation.class),
+                eq(secondActionEndpoint.getActions()));
+
+        // verify first registration is closed
+        inOrder.verify(oldActionReg).close();
+
+        verifyNoMoreInteractions(actionService, oldActionReg, newActionReg);
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsImplementationTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsImplementationTest.java
new file mode 100644 (file)
index 0000000..2e94f83
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.opendaylight.mdsal.dom.api.DOMActionException;
+import org.opendaylight.mdsal.dom.api.DOMActionResult;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcException;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.mdsal.dom.spi.SimpleDOMActionResult;
+import org.opendaylight.yangtools.util.concurrent.FluentFutures;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+
+/**
+ * Unit tests for RemoteRpcImplementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class RemoteOpsImplementationTest extends AbstractOpsTest {
+
+    /**
+     * This test method invokes and executes the remote rpc.
+     */
+    @Test
+    public void testInvokeRpc() throws Exception {
+        final ContainerNode rpcOutput = makeRPCOutput("bar");
+        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
+
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+
+        doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
+            .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
+
+        final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
+        assertEquals(rpcOutput, result.value());
+    }
+
+    /**
+     * This test method invokes and executes the remote action.
+     */
+    @Test
+    public void testInvokeAction() throws Exception {
+        final ContainerNode actionOutput = makeRPCOutput("bar");
+        final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput, Collections.emptyList());
+        final NormalizedNode invokeActionInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor =
+                ArgumentCaptor.forClass(ContainerNode.class);
+        doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
+                eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
+        final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
+                TEST_DATA_TREE_ID, (ContainerNode) invokeActionInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
+        final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
+        assertEquals(Optional.of(actionOutput), result.getOutput());
+    }
+
+    /**
+     * This test method invokes and executes the remote rpc.
+     */
+    @Test
+    public void testInvokeRpcWithNullInput() throws Exception {
+        final ContainerNode rpcOutput = makeRPCOutput("bar");
+        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
+
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+
+        doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
+            .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
+
+        ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, null);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
+        assertEquals(rpcOutput, result.value());
+    }
+
+    /**
+     * This test method invokes and executes the remote action.
+     */
+    @Test
+    public void testInvokeActionWithNullInput() throws Exception {
+        final ContainerNode actionOutput = makeRPCOutput("bar");
+        final DOMActionResult actionResult = new SimpleDOMActionResult(actionOutput);
+
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+        doReturn(FluentFutures.immediateFluentFuture(actionResult)).when(domActionService2).invokeAction(
+                eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
+
+        ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
+                TEST_DATA_TREE_ID, actionOutput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
+
+        final DOMActionResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
+        assertEquals(Optional.of(actionOutput), result.getOutput());
+    }
+
+    /**
+     * This test method invokes and executes the remote rpc.
+     */
+    @Test
+    public void testInvokeRpcWithNoOutput() throws Exception {
+        final ContainerNode rpcOutput = null;
+        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
+
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+
+        doReturn(FluentFutures.immediateFluentFuture(rpcResult)).when(domRpcService2)
+            .invokeRpc(eq(TEST_RPC), inputCaptor.capture());
+
+        final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
+        assertNull(result.value());
+    }
+
+    /**
+     * This test method invokes and executes the remote rpc.
+     */
+    @Test
+    public void testInvokeRpcWithRemoteFailedFuture() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+
+        doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+            .when(domRpcService2).invokeRpc(eq(TEST_RPC), inputCaptor.capture());
+
+        final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMRpcException.class));
+    }
+
+    /**
+     * This test method invokes and executes the remote rpc.
+     */
+    @Test
+    public void testInvokeActionWithRemoteFailedFuture() {
+        final ContainerNode invokeActionInput = makeRPCInput("foo");
+        final ArgumentCaptor<ContainerNode> inputCaptor = ArgumentCaptor.forClass(ContainerNode.class);
+
+        doReturn(FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)))
+            .when(domActionService2).invokeAction(eq(TEST_RPC_TYPE), eq(TEST_DATA_TREE_ID), inputCaptor.capture());
+
+        final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
+                TEST_DATA_TREE_ID, invokeActionInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
+
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMActionException.class));
+    }
+
+    /**
+     * This test method invokes and tests exceptions when akka timeout occured
+     * Currently ignored since this test with current config takes around 15 seconds to complete.
+     */
+    @Ignore
+    @Test
+    public void testInvokeRpcWithAkkaTimeoutException() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+        final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        assertThrows(RemoteDOMRpcException.class, () -> frontEndFuture.get(20, TimeUnit.SECONDS));
+    }
+
+    /**
+     * This test method invokes remote rpc and lookup failed with runtime exception.
+     */
+    @Test
+    public void testInvokeRpcWithLookupException() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+
+        doThrow(new RuntimeException("test")).when(domRpcService2).invokeRpc(any(QName.class),
+            any(ContainerNode.class));
+
+        final ListenableFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMRpcFuture.class));
+
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMRpcException.class));
+    }
+
+    /**
+     * This test method invokes remote rpc and lookup failed
+     * with runtime exception.
+     */
+    @Test
+    public void testInvokeActionWithLookupException() {
+        final ContainerNode invokeRpcInput = makeRPCInput("foo");
+
+        doThrow(new RuntimeException("test")).when(domActionService2).invokeAction(any(Absolute.class),
+                any(DOMDataTreeIdentifier.class), any(ContainerNode.class));
+
+        final ListenableFuture<DOMActionResult> frontEndFuture = remoteActionImpl1.invokeAction(TEST_RPC_TYPE,
+                TEST_DATA_TREE_ID, invokeRpcInput);
+        assertThat(frontEndFuture, instanceOf(RemoteDOMActionFuture.class));
+
+        final var ex = assertThrows(ExecutionException.class, () -> frontEndFuture.get(5, TimeUnit.SECONDS)).getCause();
+        assertThat(ex, instanceOf(DOMActionException.class));
+    }
+}
@@ -7,6 +7,11 @@
  */
 package org.opendaylight.controller.remote.rpc;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.actor.UntypedAbstractActor;
@@ -14,33 +19,34 @@ import akka.testkit.TestActorRef;
 import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
 import java.util.concurrent.TimeUnit;
-import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
 import scala.concurrent.duration.FiniteDuration;
 
-public class RemoteRpcProviderConfigTest {
+public class RemoteOpsProviderConfigTest {
 
     @Test
     public void testConfigDefaults() {
-        RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test").build();
+        RemoteOpsProviderConfig config = new RemoteOpsProviderConfig.Builder("unit-test").build();
 
         //Assert on configurations from common config
-        Assert.assertFalse(config.isMetricCaptureEnabled()); //should be disabled by default
-        Assert.assertNotNull(config.getMailBoxCapacity());
-        Assert.assertNotNull(config.getMailBoxName());
-        Assert.assertNotNull(config.getMailBoxPushTimeout());
+        assertFalse(config.isMetricCaptureEnabled()); //should be disabled by default
+        assertNotNull(config.getMailBoxCapacity());
+        assertNotNull(config.getMailBoxName());
+        assertNotNull(config.getMailBoxPushTimeout());
 
         //rest of the configurations should be set
-        Assert.assertNotNull(config.getActorSystemName());
-        Assert.assertNotNull(config.getRpcBrokerName());
-        Assert.assertNotNull(config.getRpcBrokerPath());
-        Assert.assertNotNull(config.getRpcManagerName());
-        Assert.assertNotNull(config.getRpcManagerPath());
-        Assert.assertNotNull(config.getRpcRegistryName());
-        Assert.assertNotNull(config.getRpcRegistryPath());
-        Assert.assertNotNull(config.getAskDuration());
-        Assert.assertNotNull(config.getGossipTickInterval());
+        assertNotNull(config.getActorSystemName());
+        assertNotNull(config.getRpcBrokerName());
+        assertNotNull(config.getRpcBrokerPath());
+        assertNotNull(config.getRpcManagerName());
+        assertNotNull(config.getRpcManagerPath());
+        assertNotNull(config.getRpcRegistryName());
+        assertNotNull(config.getActionRegistryName());
+        assertNotNull(config.getRpcRegistryPath());
+        assertNotNull(config.getActionRegistryPath());
+        assertNotNull(config.getAskDuration());
+        assertNotNull(config.getGossipTickInterval());
     }
 
     @Test
@@ -52,16 +58,16 @@ public class RemoteRpcProviderConfigTest {
         String timeOutVal = "10ms";
         FiniteDuration expectedTimeout = FiniteDuration.create(10, TimeUnit.MILLISECONDS);
 
-        RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test")
+        RemoteOpsProviderConfig config = new RemoteOpsProviderConfig.Builder("unit-test")
                 .metricCaptureEnabled(true)//enable metric capture
                 .mailboxCapacity(expectedCapacity)
                 .mailboxPushTimeout(timeOutVal)
                 .withConfigReader(reader)
                 .build();
 
-        Assert.assertTrue(config.isMetricCaptureEnabled());
-        Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
-        Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+        assertTrue(config.isMetricCaptureEnabled());
+        assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+        assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
 
         //Now check this config inside an actor
         ActorSystem system = ActorSystem.create("unit-test", config.get());
@@ -71,11 +77,11 @@ public class RemoteRpcProviderConfigTest {
         ConfigTestActor actor = configTestActorTestActorRef.underlyingActor();
         Config actorConfig = actor.getConfig();
 
-        config = new RemoteRpcProviderConfig(actorConfig);
+        config = new RemoteOpsProviderConfig(actorConfig);
 
-        Assert.assertTrue(config.isMetricCaptureEnabled());
-        Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
-        Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+        assertTrue(config.isMetricCaptureEnabled());
+        assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+        assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
     }
 
     public static class ConfigTestActor extends UntypedAbstractActor {
@@ -87,7 +93,7 @@ public class RemoteRpcProviderConfigTest {
         }
 
         @Override
-        public void onReceive(Object message) {
+        public void onReceive(final Object message) {
         }
 
         /**
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactoryTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteOpsProviderFactoryTest.java
new file mode 100644 (file)
index 0000000..851e37e
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017 Pantheon Technologies s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import akka.actor.ActorSystem;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+
+public class RemoteOpsProviderFactoryTest {
+
+    @Mock
+    private DOMRpcProviderService providerService;
+    @Mock
+    private DOMRpcService rpcService;
+    @Mock
+    private ActorSystem actorSystem;
+    @Mock
+    private RemoteOpsProviderConfig providerConfig;
+    @Mock
+    private DOMActionProviderService actionProviderService;
+    @Mock
+    private DOMActionService actionService;
+
+    @Before
+    public void setUp() {
+        initMocks(this);
+    }
+
+    @Test
+    public void testCreateInstance() {
+        Assert.assertNotNull(RemoteOpsProviderFactory
+                .createInstance(providerService, rpcService, actorSystem, providerConfig,
+                        actionProviderService, actionService));
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingProvideService() {
+        RemoteOpsProviderFactory.createInstance(null, rpcService, actorSystem, providerConfig,
+                actionProviderService, actionService);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingRpcService() {
+        RemoteOpsProviderFactory.createInstance(providerService, null, actorSystem, providerConfig,
+                actionProviderService, actionService);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingActorSystem() {
+        RemoteOpsProviderFactory.createInstance(providerService, rpcService, null, providerConfig,
+                actionProviderService, actionService);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingProviderConfig() {
+        RemoteOpsProviderFactory.createInstance(providerService, rpcService, actorSystem, null,
+                actionProviderService, actionService);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingActionProvider() {
+        RemoteOpsProviderFactory.createInstance(providerService, rpcService, actorSystem, providerConfig,
+                null, actionService);
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCreateInstanceMissingActionService() {
+        RemoteOpsProviderFactory.createInstance(providerService, rpcService, actorSystem, providerConfig,
+                actionProviderService, null);
+    }
+}
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.remote.rpc;
 
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 
 import akka.actor.ActorRef;
@@ -16,21 +17,22 @@ import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
 import java.util.concurrent.TimeUnit;
 import org.junit.AfterClass;
-import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.opendaylight.mdsal.dom.api.DOMActionProviderService;
+import org.opendaylight.mdsal.dom.api.DOMActionService;
 import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMRpcService;
 import scala.concurrent.Await;
 import scala.concurrent.duration.FiniteDuration;
 
-public class RemoteRpcProviderTest {
+public class RemoteOpsProviderTest {
     static ActorSystem system;
-    static RemoteRpcProviderConfig moduleConfig;
+    static RemoteOpsProviderConfig moduleConfig;
 
     @BeforeClass
     public static void setup() {
-        moduleConfig = new RemoteRpcProviderConfig.Builder("odl-cluster-rpc")
+        moduleConfig = new RemoteOpsProviderConfig.Builder("odl-cluster-rpc")
                 .withConfigReader(ConfigFactory::load).build();
         final Config config = moduleConfig.get();
         system = ActorSystem.create("odl-cluster-rpc", config);
@@ -45,16 +47,16 @@ public class RemoteRpcProviderTest {
 
     @Test
     public void testRemoteRpcProvider() throws Exception {
-        try (RemoteRpcProvider rpcProvider = new RemoteRpcProvider(system, mock(DOMRpcProviderService.class),
-            mock(DOMRpcService.class), new RemoteRpcProviderConfig(system.settings().config()))) {
+        try (RemoteOpsProvider rpcProvider = new RemoteOpsProvider(system, mock(DOMRpcProviderService.class),
+                mock(DOMRpcService.class), new RemoteOpsProviderConfig(system.settings().config()),
+                mock(DOMActionProviderService.class), mock(DOMActionService.class))) {
 
             rpcProvider.start();
-
             final ActorRef actorRef = Await.result(
                     system.actorSelection(moduleConfig.getRpcManagerPath()).resolveOne(
                             FiniteDuration.create(1, TimeUnit.SECONDS)), FiniteDuration.create(2, TimeUnit.SECONDS));
 
-            Assert.assertTrue(actorRef.path().toString().contains(moduleConfig.getRpcManagerPath()));
+            assertTrue(actorRef.path().toString().contains(moduleConfig.getRpcManagerPath()));
         }
     }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementationTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementationTest.java
deleted file mode 100644 (file)
index 6e755f1..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.when;
-
-import com.google.common.util.concurrent.FluentFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import org.opendaylight.mdsal.dom.api.DOMRpcException;
-import org.opendaylight.mdsal.dom.api.DOMRpcResult;
-import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.yangtools.util.concurrent.FluentFutures;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-/**
- * Unit tests for RemoteRpcImplementation.
- *
- * @author Thomas Pantelis
- */
-public class RemoteRpcImplementationTest extends AbstractRpcTest {
-
-    /**
-     * This test method invokes and executes the remote rpc.
-     */
-    @Test
-    public void testInvokeRpc() throws Exception {
-        final ContainerNode rpcOutput = makeRPCOutput("bar");
-        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
-
-        final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
-
-        when(domRpcService2.invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture())).thenReturn(
-                FluentFutures.immediateFluentFuture(rpcResult));
-
-        final FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(rpcOutput, result.getResult());
-    }
-
-    /**
-     * This test method invokes and executes the remote rpc.
-     */
-    @Test
-    public void testInvokeRpcWithNullInput() throws Exception {
-        final ContainerNode rpcOutput = makeRPCOutput("bar");
-        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
-
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
-
-        when(domRpcService2.invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture())).thenReturn(
-                FluentFutures.immediateFluentFuture(rpcResult));
-
-        FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, null);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertEquals(rpcOutput, result.getResult());
-    }
-
-    /**
-     * This test method invokes and executes the remote rpc.
-     */
-    @Test
-    public void testInvokeRpcWithNoOutput() throws Exception {
-        final ContainerNode rpcOutput = null;
-        final DOMRpcResult rpcResult = new DefaultDOMRpcResult(rpcOutput);
-
-        final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
-
-        when(domRpcService2.invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture())).thenReturn(
-                FluentFutures.immediateFluentFuture(rpcResult));
-
-        final FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        final DOMRpcResult result = frontEndFuture.get(5, TimeUnit.SECONDS);
-        assertNull(result.getResult());
-    }
-
-    /**
-     * This test method invokes and executes the remote rpc.
-     */
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    @Test(expected = DOMRpcException.class)
-    public void testInvokeRpcWithRemoteFailedFuture() throws Throwable {
-        final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
-        @SuppressWarnings({"unchecked", "rawtypes"})
-        final ArgumentCaptor<NormalizedNode<?, ?>> inputCaptor =
-                (ArgumentCaptor) ArgumentCaptor.forClass(NormalizedNode.class);
-
-        when(domRpcService2.invokeRpc(eq(TEST_RPC_TYPE), inputCaptor.capture())).thenReturn(
-                FluentFutures.immediateFailedFluentFuture(new RemoteDOMRpcException("Test Exception", null)));
-
-        final FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-
-    /**
-     * This test method invokes and tests exceptions when akka timeout occured
-     * Currently ignored since this test with current config takes around 15 seconds to complete.
-     */
-    @Ignore
-    @Test(expected = RemoteDOMRpcException.class)
-    public void testInvokeRpcWithAkkaTimeoutException() throws Exception {
-        final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
-        final FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        frontEndFuture.get(20, TimeUnit.SECONDS);
-    }
-
-    /**
-     * This test method invokes remote rpc and lookup failed
-     * with runtime exception.
-     */
-    @Test(expected = DOMRpcException.class)
-    @SuppressWarnings({"checkstyle:AvoidHidingCauseException", "checkstyle:IllegalThrows"})
-    public void testInvokeRpcWithLookupException() throws Throwable {
-        final NormalizedNode<?, ?> invokeRpcInput = makeRPCInput("foo");
-
-        doThrow(new RuntimeException("test")).when(domRpcService2).invokeRpc(any(SchemaPath.class),
-            any(NormalizedNode.class));
-
-        final FluentFuture<DOMRpcResult> frontEndFuture = remoteRpcImpl1.invokeRpc(TEST_RPC_ID, invokeRpcInput);
-        assertTrue(frontEndFuture instanceof RemoteDOMRpcFuture);
-
-        try {
-            frontEndFuture.get(5, TimeUnit.SECONDS);
-        } catch (ExecutionException e) {
-            throw e.getCause();
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactoryTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactoryTest.java
deleted file mode 100644 (file)
index 3481d5b..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc;
-
-import static org.mockito.MockitoAnnotations.initMocks;
-
-import akka.actor.ActorSystem;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mock;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.mdsal.dom.api.DOMRpcService;
-
-public class RemoteRpcProviderFactoryTest {
-
-    @Mock
-    private DOMRpcProviderService providerService;
-    @Mock
-    private DOMRpcService rpcService;
-    @Mock
-    private ActorSystem actorSystem;
-    @Mock
-    private RemoteRpcProviderConfig providerConfig;
-
-    @Before
-    public void setUp() {
-        initMocks(this);
-    }
-
-    @Test
-    public void testCreateInstance() {
-        Assert.assertNotNull(RemoteRpcProviderFactory
-                .createInstance(providerService, rpcService, actorSystem, providerConfig));
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void testCreateInstanceMissingProvideService() {
-        RemoteRpcProviderFactory.createInstance(null, rpcService, actorSystem, providerConfig);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void testCreateInstanceMissingRpcService() {
-        RemoteRpcProviderFactory.createInstance(providerService, null, actorSystem, providerConfig);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void testCreateInstanceMissingActorSystem() {
-        RemoteRpcProviderFactory.createInstance(providerService, rpcService, null, providerConfig);
-    }
-
-    @Test(expected = NullPointerException.class)
-    public void testCreateInstanceMissingProviderConfig() {
-        RemoteRpcProviderFactory.createInstance(providerService, rpcService, actorSystem, null);
-    }
-}
index 941ae2b330090b1cd49fffe45dd31c6495408058..bba4305a9174982b0e631f490b2118684a456fa3 100644 (file)
@@ -7,11 +7,14 @@
  */
 package org.opendaylight.controller.remote.rpc;
 
+import static org.junit.Assert.assertEquals;
+
 import java.util.ArrayList;
 import java.util.List;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
@@ -23,10 +26,9 @@ public class RpcErrorsExceptionTest {
 
     @Before
     public void setUp() {
-        final RpcError rpcError = RpcResultBuilder.newError(
-                RpcError.ErrorType.RPC, "error", "error message");
-        final RpcError rpcWarning = RpcResultBuilder.newWarning(
-                RpcError.ErrorType.RPC, "warning", "warning message");
+        final RpcError rpcError = RpcResultBuilder.newError(ErrorType.RPC, new ErrorTag("error"), "error message");
+        final RpcError rpcWarning = RpcResultBuilder.newWarning(ErrorType.RPC, new ErrorTag("warning"),
+            "warning message");
 
         rpcErrors = new ArrayList<>();
         rpcErrors.add(rpcError);
@@ -37,22 +39,25 @@ public class RpcErrorsExceptionTest {
 
     @Test
     public void testGetMessage() {
-        Assert.assertEquals(ERROR_MESSAGE, exception.getMessage());
+        assertEquals(ERROR_MESSAGE, exception.getMessage());
     }
 
     @Test
     public void testGetRpcErrors() {
         final List<RpcError> actualErrors = (List<RpcError>) exception.getRpcErrors();
-        Assert.assertEquals(rpcErrors.size(), actualErrors.size());
+        assertEquals(rpcErrors.size(), actualErrors.size());
 
         for (int i = 0; i < actualErrors.size(); i++) {
-            Assert.assertEquals(rpcErrors.get(i).getApplicationTag(), actualErrors.get(i).getApplicationTag());
-            Assert.assertEquals(rpcErrors.get(i).getSeverity(), actualErrors.get(i).getSeverity());
-            Assert.assertEquals(rpcErrors.get(i).getMessage(), actualErrors.get(i).getMessage());
-            Assert.assertEquals(rpcErrors.get(i).getErrorType(), actualErrors.get(i).getErrorType());
-            Assert.assertEquals(rpcErrors.get(i).getCause(), actualErrors.get(i).getCause());
-            Assert.assertEquals(rpcErrors.get(i).getInfo(), actualErrors.get(i).getInfo());
-            Assert.assertEquals(rpcErrors.get(i).getTag(), actualErrors.get(i).getTag());
+            final RpcError expected = rpcErrors.get(i);
+            final RpcError actual = actualErrors.get(i);
+
+            assertEquals(expected.getApplicationTag(), actual.getApplicationTag());
+            assertEquals(expected.getSeverity(), actual.getSeverity());
+            assertEquals(expected.getMessage(), actual.getMessage());
+            assertEquals(expected.getErrorType(), actual.getErrorType());
+            assertEquals(expected.getCause(), actual.getCause());
+            assertEquals(expected.getInfo(), actual.getInfo());
+            assertEquals(expected.getTag(), actual.getTag());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcRegistrarTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcRegistrarTest.java
deleted file mode 100644 (file)
index 48b825a..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.Address;
-import akka.actor.Props;
-import akka.testkit.TestActorRef;
-import akka.testkit.javadsl.TestKit;
-import com.google.common.collect.ImmutableMap;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Optional;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.InOrder;
-import org.mockito.Mock;
-import org.mockito.Mockito;
-import org.mockito.MockitoAnnotations;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndpoint;
-import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
-
-public class RpcRegistrarTest {
-    @Mock
-    private DOMRpcProviderService service;
-    @Mock
-    private DOMRpcImplementationRegistration<RemoteRpcImplementation> oldReg;
-    @Mock
-    private DOMRpcImplementationRegistration<RemoteRpcImplementation> newReg;
-
-    private ActorSystem system;
-    private TestActorRef<RpcRegistrar> testActorRef;
-    private Address endpointAddress;
-    private RemoteRpcEndpoint firstEndpoint;
-    private RemoteRpcEndpoint secondEndpoint;
-    private RpcRegistrar rpcRegistrar;
-
-    @Before
-    public void setUp() {
-        MockitoAnnotations.initMocks(this);
-        system = ActorSystem.create("test");
-
-        final TestKit testKit = new TestKit(system);
-        final RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("system").build();
-        final Props props = RpcRegistrar.props(config, service);
-        testActorRef = new TestActorRef<>(system, props, testKit.getRef(), "actorRef");
-        endpointAddress = new Address("http", "local");
-
-        final DOMRpcIdentifier firstEndpointId = DOMRpcIdentifier.create(
-                SchemaPath.create(true, QName.create("first:identifier", "foo")));
-        final DOMRpcIdentifier secondEndpointId = DOMRpcIdentifier.create(
-                SchemaPath.create(true, QName.create("second:identifier", "bar")));
-
-        final TestKit senderKit = new TestKit(system);
-        firstEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(firstEndpointId));
-        secondEndpoint = new RemoteRpcEndpoint(senderKit.getRef(), Collections.singletonList(secondEndpointId));
-
-        Mockito.doReturn(oldReg).when(service).registerRpcImplementation(
-                Mockito.any(RemoteRpcImplementation.class), Mockito.eq(firstEndpoint.getRpcs()));
-
-        Mockito.doReturn(newReg).when(service).registerRpcImplementation(
-                Mockito.any(RemoteRpcImplementation.class), Mockito.eq(secondEndpoint.getRpcs()));
-
-        rpcRegistrar = testActorRef.underlyingActor();
-    }
-
-    @After
-    public void tearDown() {
-        TestKit.shutdownActorSystem(system, true);
-    }
-
-    @Test
-    public void testPostStop() throws Exception {
-        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(firstEndpoint))),
-                ActorRef.noSender());
-        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(secondEndpoint))),
-                ActorRef.noSender());
-
-        rpcRegistrar.postStop();
-
-        Mockito.verify(oldReg).close();
-        Mockito.verify(newReg).close();
-    }
-
-    @Test
-    public void testHandleReceiveAddEndpoint() {
-        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = ImmutableMap.of(
-                endpointAddress, Optional.of(firstEndpoint));
-        testActorRef.tell(new UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
-
-        Mockito.verify(service).registerRpcImplementation(
-                Mockito.any(RemoteRpcImplementation.class), Mockito.eq(firstEndpoint.getRpcs()));
-        Mockito.verifyNoMoreInteractions(service, oldReg, newReg);
-    }
-
-    @Test
-    public void testHandleReceiveRemoveEndpoint() {
-        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = ImmutableMap.of(
-                endpointAddress, Optional.empty());
-        testActorRef.tell(new UpdateRemoteEndpoints(endpoints), ActorRef.noSender());
-        Mockito.verifyNoMoreInteractions(service, oldReg, newReg);
-    }
-
-    @Test
-    public void testHandleReceiveUpdateEndpoint() {
-        final InOrder inOrder = Mockito.inOrder(service, oldReg, newReg);
-
-        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(firstEndpoint))),
-                ActorRef.noSender());
-
-        // first registration
-        inOrder.verify(service).registerRpcImplementation(
-                Mockito.any(RemoteRpcImplementation.class), Mockito.eq(firstEndpoint.getRpcs()));
-
-        testActorRef.tell(new UpdateRemoteEndpoints(ImmutableMap.of(endpointAddress, Optional.of(secondEndpoint))),
-                ActorRef.noSender());
-
-        // second registration
-        inOrder.verify(service).registerRpcImplementation(
-                Mockito.any(RemoteRpcImplementation.class), Mockito.eq(secondEndpoint.getRpcs()));
-
-        // verify first registration is closed
-        inOrder.verify(oldReg).close();
-
-        Mockito.verifyNoMoreInteractions(service, oldReg, newReg);
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteOpsTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteOpsTest.java
new file mode 100644 (file)
index 0000000..189ca17
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
+
+public class ExecuteOpsTest {
+
+    @Test
+    public void testOpsSerialization() {
+        ExecuteRpc expected = ExecuteRpc.from(AbstractOpsTest.TEST_RPC_ID,
+                AbstractOpsTest.makeRPCInput("serialization-test"));
+
+        ExecuteRpc actual = SerializationUtils.clone(expected);
+
+        assertEquals("getName", expected.getType(), actual.getType());
+        assertEquals("getInputNormalizedNode", expected.getInput(), actual.getInput());
+        assertEquals("getPath", expected.getType(), actual.getType());
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteRpcTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/ExecuteRpcTest.java
deleted file mode 100644 (file)
index 2aeed83..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2016 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc.messages;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.AbstractRpcTest;
-
-/**
- * Unit tests for ExecuteRpc.
- *
- * @author Thomas Pantelis
- */
-public class ExecuteRpcTest {
-
-    @Test
-    public void testSerialization() {
-        ExecuteRpc expected = ExecuteRpc.from(AbstractRpcTest.TEST_RPC_ID,
-                AbstractRpcTest.makeRPCInput("serialization-test"));
-
-        ExecuteRpc actual = (ExecuteRpc) SerializationUtils.clone(expected);
-
-        assertEquals("getRpc", expected.getRpc(), actual.getRpc());
-        assertEquals("getInputNormalizedNode", expected.getInputNormalizedNode(), actual.getInputNormalizedNode());
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/OpsResponseTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/OpsResponseTest.java
new file mode 100644 (file)
index 0000000..4957075
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.messages;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+import java.util.Optional;
+import org.apache.commons.lang3.SerializationUtils;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.AbstractOpsTest;
+
+/**
+ * Unit tests for RpcResponse.
+ *
+ * @author Thomas Pantelis
+ */
+public class OpsResponseTest {
+
+    @Test
+    public void testSerialization() {
+        RpcResponse expectedRpc = new RpcResponse(AbstractOpsTest.makeRPCOutput("serialization-test"));
+
+        ActionResponse expectedAction = new ActionResponse(
+            Optional.of(AbstractOpsTest.makeRPCOutput("serialization-test")), Collections.emptyList());
+
+        RpcResponse actualRpc = SerializationUtils.clone(expectedRpc);
+
+        ActionResponse actualAction = SerializationUtils.clone(expectedAction);
+
+        assertEquals("getResultNormalizedNode", expectedRpc.getOutput(),
+                actualRpc.getOutput());
+
+        assertEquals("getResultNormalizedNode", expectedAction.getOutput(),
+                actualAction.getOutput());
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/RpcResponseTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/messages/RpcResponseTest.java
deleted file mode 100644 (file)
index f4ec377..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (c) 2016 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.remote.rpc.messages;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.commons.lang.SerializationUtils;
-import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.AbstractRpcTest;
-
-/**
- * Unit tests for RpcResponse.
- *
- * @author Thomas Pantelis
- */
-public class RpcResponseTest {
-
-    @Test
-    public void testSerialization() {
-        RpcResponse expected = new RpcResponse(AbstractRpcTest.makeRPCOutput("serialization-test"));
-
-        RpcResponse actual = (RpcResponse) SerializationUtils.clone(expected);
-
-        assertEquals("getResultNormalizedNode", expected.getResultNormalizedNode(), actual.getResultNormalizedNode());
-    }
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistryTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/ActionRegistryTest.java
new file mode 100644 (file)
index 0000000..7f2f096
--- /dev/null
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2014, 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_ALL_BUCKETS;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess.Singletons.GET_BUCKET_VERSIONS;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent.CurrentClusterState;
+import akka.cluster.Member;
+import akka.cluster.MemberStatus;
+import akka.cluster.UniqueAddress;
+import akka.testkit.javadsl.TestKit;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.Uninterruptibles;
+import com.typesafe.config.ConfigFactory;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.Messages.UpdateActions;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.Messages.UpdateRemoteActionEndpoints;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry.RemoteActionEndpoint;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ActionRegistryTest {
+    private static final Logger LOG = LoggerFactory.getLogger(ActionRegistryTest.class);
+
+    private static ActorSystem node1;
+    private static ActorSystem node2;
+    private static ActorSystem node3;
+
+    private TestKit invoker1;
+    private TestKit invoker2;
+    private TestKit invoker3;
+    private TestKit registrar1;
+    private TestKit registrar2;
+    private TestKit registrar3;
+    private ActorRef registry1;
+    private ActorRef registry2;
+    private ActorRef registry3;
+
+    private int routeIdCounter = 1;
+
+    @BeforeClass
+    public static void staticSetup() {
+        AkkaConfigurationReader reader = ConfigFactory::load;
+
+        RemoteOpsProviderConfig config1 = new RemoteOpsProviderConfig.Builder("memberA").gossipTickInterval("200ms")
+                .withConfigReader(reader).build();
+        RemoteOpsProviderConfig config2 = new RemoteOpsProviderConfig.Builder("memberB").gossipTickInterval("200ms")
+                .withConfigReader(reader).build();
+        RemoteOpsProviderConfig config3 = new RemoteOpsProviderConfig.Builder("memberC").gossipTickInterval("200ms")
+                .withConfigReader(reader).build();
+        node1 = ActorSystem.create("opendaylight-rpc", config1.get());
+        node2 = ActorSystem.create("opendaylight-rpc", config2.get());
+        node3 = ActorSystem.create("opendaylight-rpc", config3.get());
+
+        waitForMembersUp(node1, Cluster.get(node2).selfUniqueAddress(), Cluster.get(node3).selfUniqueAddress());
+        waitForMembersUp(node2, Cluster.get(node1).selfUniqueAddress(), Cluster.get(node3).selfUniqueAddress());
+    }
+
+    static void waitForMembersUp(final ActorSystem node, final UniqueAddress... addresses) {
+        Set<UniqueAddress> otherMembersSet = Sets.newHashSet(addresses);
+        Stopwatch sw = Stopwatch.createStarted();
+        while (sw.elapsed(TimeUnit.SECONDS) <= 10) {
+            CurrentClusterState state = Cluster.get(node).state();
+            for (Member m : state.getMembers()) {
+                if (m.status() == MemberStatus.up() && otherMembersSet.remove(m.uniqueAddress())
+                        && otherMembersSet.isEmpty()) {
+                    return;
+                }
+            }
+
+            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
+        }
+
+        fail("Member(s) " + otherMembersSet + " are not Up");
+    }
+
+    @AfterClass
+    public static void staticTeardown() {
+        TestKit.shutdownActorSystem(node1);
+        TestKit.shutdownActorSystem(node2);
+        TestKit.shutdownActorSystem(node3);
+    }
+
+    @Before
+    public void setup() {
+        invoker1 = new TestKit(node1);
+        registrar1 = new TestKit(node1);
+        registry1 = node1.actorOf(ActionRegistry.props(config(node1), invoker1.getRef(), registrar1.getRef()));
+        invoker2 = new TestKit(node2);
+        registrar2 = new TestKit(node2);
+        registry2 = node2.actorOf(ActionRegistry.props(config(node2), invoker2.getRef(), registrar2.getRef()));
+        invoker3 = new TestKit(node3);
+        registrar3 = new TestKit(node3);
+        registry3 = node3.actorOf(ActionRegistry.props(config(node3), invoker3.getRef(), registrar3.getRef()));
+    }
+
+    private static RemoteOpsProviderConfig config(final ActorSystem node) {
+        return new RemoteOpsProviderConfig(node.settings().config());
+    }
+
+    @After
+    public void teardown() {
+        if (registry1 != null) {
+            node1.stop(registry1);
+        }
+        if (registry2 != null) {
+            node2.stop(registry2);
+        }
+        if (registry3 != null) {
+            node3.stop(registry3);
+        }
+
+        if (invoker1 != null) {
+            node1.stop(invoker1.getRef());
+        }
+        if (invoker2 != null) {
+            node2.stop(invoker2.getRef());
+        }
+        if (invoker3 != null) {
+            node3.stop(invoker3.getRef());
+        }
+
+        if (registrar1 != null) {
+            node1.stop(registrar1.getRef());
+        }
+        if (registrar2 != null) {
+            node2.stop(registrar2.getRef());
+        }
+        if (registrar3 != null) {
+            node3.stop(registrar3.getRef());
+        }
+    }
+
+    /**
+     * One node cluster. 1. Register action, ensure router can be found 2. Then remove action, ensure its
+     * deleted
+     */
+    @Test
+    public void testAddRemoveActionOnSameNode() {
+        LOG.info("testAddRemoveActionOnSameNode starting");
+
+        Address nodeAddress = node1.provider().getDefaultAddress();
+
+        // Add action on node 1
+
+        List<DOMActionInstance> addedRouteIds = createRouteIds();
+
+        registry1.tell(new ActionRegistry.Messages.UpdateActions(addedRouteIds,
+                Collections.emptyList()), ActorRef.noSender());
+
+        // Bucket store should get an update bucket message. Updated bucket contains added action.
+        final TestKit testKit = new TestKit(node1);
+
+        Map<Address, Bucket<ActionRoutingTable>> buckets = retrieveBuckets(registry1, testKit, nodeAddress);
+        verifyBucket(buckets.get(nodeAddress), addedRouteIds);
+
+        Map<Address, Long> versions = retrieveVersions(registry1, testKit);
+        assertEquals("Version for bucket " + nodeAddress, (Long) buckets.get(nodeAddress).getVersion(),
+                versions.get(nodeAddress));
+
+        // Now remove action
+        registry1.tell(new UpdateActions(Collections.emptyList(),addedRouteIds), ActorRef.noSender());
+
+        // Bucket store should get an update bucket message. Action is removed in the updated bucket
+
+        verifyEmptyBucket(testKit, registry1, nodeAddress);
+
+        LOG.info("testAddRemoveActionOnSameNode ending");
+
+    }
+
+    /**
+     * Three node cluster. 1. Register action on 1 node, ensure 2nd node gets updated 2. Remove action on
+     * 1 node, ensure 2nd node gets updated
+     */
+    @Test
+    public void testActionAddRemoveInCluster() {
+
+        LOG.info("testActionAddRemoveInCluster starting");
+
+        List<DOMActionInstance> addedRouteIds = createRouteIds();
+
+        Address node1Address = node1.provider().getDefaultAddress();
+
+        // Add action on node 1
+        registry1.tell(new UpdateActions(addedRouteIds, Collections.emptyList()), ActorRef.noSender());
+
+        // Bucket store on node2 should get a message to update its local copy of remote buckets
+        final TestKit testKit = new TestKit(node2);
+
+        Map<Address, Bucket<ActionRoutingTable>> buckets = retrieveBuckets(registry2, testKit, node1Address);
+        verifyBucket(buckets.get(node1Address), addedRouteIds);
+
+        // Now remove
+        registry1.tell(new UpdateActions(Collections.emptyList(), addedRouteIds), ActorRef.noSender());
+
+        // Bucket store on node2 should get a message to update its local copy of remote buckets.
+        // Wait for the bucket for node1 to be empty.
+
+        verifyEmptyBucket(testKit, registry2, node1Address);
+
+        LOG.info("testActionAddRemoveInCluster ending");
+    }
+
+    private void verifyEmptyBucket(final TestKit testKit, final ActorRef registry, final Address address)
+            throws AssertionError {
+        Map<Address, Bucket<ActionRoutingTable>> buckets;
+        int numTries = 0;
+        while (true) {
+            buckets = retrieveBuckets(registry1, testKit, address);
+
+            try {
+                verifyBucket(buckets.get(address), Collections.emptyList());
+                break;
+            } catch (AssertionError e) {
+                if (++numTries >= 50) {
+                    throw e;
+                }
+            }
+
+            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+        }
+    }
+
+    /**
+     * Three node cluster. Register action on 2 nodes. Ensure 3rd gets updated.
+     */
+    @Test
+    public void testActionAddedOnMultiNodes() {
+        final TestKit testKit = new TestKit(node3);
+
+        // Add action on node 1
+        List<DOMActionInstance> addedRouteIds1 = createRouteIds();
+        registry1.tell(new UpdateActions(addedRouteIds1, Collections.emptyList()), ActorRef.noSender());
+
+        final UpdateRemoteActionEndpoints req1 = registrar3.expectMsgClass(Duration.ofSeconds(3),
+                UpdateRemoteActionEndpoints.class);
+
+        // Add action on node 2
+        List<DOMActionInstance> addedRouteIds2 = createRouteIds();
+        registry2.tell(new UpdateActions(addedRouteIds2, Collections.emptyList()), ActorRef.noSender());
+
+        final UpdateRemoteActionEndpoints req2 = registrar3.expectMsgClass(Duration.ofSeconds(3),
+                UpdateRemoteActionEndpoints.class);
+        Address node2Address = node2.provider().getDefaultAddress();
+        Address node1Address = node1.provider().getDefaultAddress();
+
+        Map<Address, Bucket<ActionRoutingTable>> buckets = retrieveBuckets(registry3, testKit, node1Address,
+                node2Address);
+
+        verifyBucket(buckets.get(node1Address), addedRouteIds1);
+        verifyBucket(buckets.get(node2Address), addedRouteIds2);
+
+        Map<Address, Long> versions = retrieveVersions(registry3, testKit);
+        assertEquals("Version for bucket " + node1Address, (Long) buckets.get(node1Address).getVersion(),
+                versions.get(node1Address));
+        assertEquals("Version for bucket " + node2Address, (Long) buckets.get(node2Address).getVersion(),
+                versions.get(node2Address));
+
+        assertEndpoints(req1, node1Address, invoker1);
+        assertEndpoints(req2, node2Address, invoker2);
+
+    }
+
+    private static void assertEndpoints(final ActionRegistry.Messages.UpdateRemoteActionEndpoints msg,
+                                        final Address address, final TestKit invoker) {
+        final Map<Address, Optional<RemoteActionEndpoint>> endpoints = msg.getActionEndpoints();
+        assertEquals(1, endpoints.size());
+
+        final Optional<RemoteActionEndpoint> maybeEndpoint = endpoints.get(address);
+        assertNotNull(maybeEndpoint);
+        assertTrue(maybeEndpoint.isPresent());
+
+        final RemoteActionEndpoint endpoint = maybeEndpoint.orElseThrow();
+        final ActorRef router = endpoint.getRouter();
+        assertNotNull(router);
+
+        router.tell("hello", ActorRef.noSender());
+        final String s = invoker.expectMsgClass(Duration.ofSeconds(3), String.class);
+        assertEquals("hello", s);
+    }
+
+    private static Map<Address, Long> retrieveVersions(final ActorRef bucketStore, final TestKit testKit) {
+        bucketStore.tell(GET_BUCKET_VERSIONS, testKit.getRef());
+        @SuppressWarnings("unchecked")
+        final Map<Address, Long> reply = testKit.expectMsgClass(Duration.ofSeconds(3), Map.class);
+        return reply;
+    }
+
+    private static void verifyBucket(final Bucket<ActionRoutingTable> bucket,
+                                     final List<DOMActionInstance> expRouteIds) {
+        ActionRoutingTable table = bucket.getData();
+        assertNotNull("Bucket ActionRoutingTable is null", table);
+        for (DOMActionInstance r : expRouteIds) {
+            if (!table.contains(r)) {
+                fail("ActionRoutingTable does not contain " + r + ". Actual: " + table);
+            }
+        }
+
+        assertEquals("ActionRoutingTable size", expRouteIds.size(), table.size());
+    }
+
+    private static Map<Address, Bucket<ActionRoutingTable>> retrieveBuckets(
+            final ActorRef bucketStore, final TestKit testKit, final Address... addresses) {
+        int numTries = 0;
+        while (true) {
+            bucketStore.tell(GET_ALL_BUCKETS, testKit.getRef());
+            @SuppressWarnings("unchecked")
+            Map<Address, Bucket<ActionRoutingTable>> buckets = testKit.expectMsgClass(Duration.ofSeconds(3),
+                    Map.class);
+
+            boolean foundAll = true;
+            for (Address addr : addresses) {
+                Bucket<ActionRoutingTable> bucket = buckets.get(addr);
+                if (bucket == null) {
+                    foundAll = false;
+                    break;
+                }
+            }
+
+            if (foundAll) {
+                return buckets;
+            }
+
+            if (++numTries >= 50) {
+                fail("Missing expected buckets for addresses: " + Arrays.toString(addresses)
+                        + ", Actual: " + buckets);
+            }
+
+            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+        }
+    }
+
+    @Test
+    public void testAddRoutesConcurrency() {
+        final TestKit testKit = new TestKit(node1);
+
+        final int nRoutes = 500;
+        final Collection<DOMActionInstance> added = new ArrayList<>(nRoutes);
+        for (int i = 0; i < nRoutes; i++) {
+            QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
+            final DOMActionInstance routeId = DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
+                    YangInstanceIdentifier.of(type));
+            added.add(routeId);
+
+            //Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+            registry1.tell(new UpdateActions(Arrays.asList(routeId), Collections.emptyList()),
+                    ActorRef.noSender());
+        }
+
+        int numTries = 0;
+        while (true) {
+            registry1.tell(GET_ALL_BUCKETS, testKit.getRef());
+            @SuppressWarnings("unchecked")
+            Map<Address, Bucket<ActionRoutingTable>> buckets = testKit.expectMsgClass(Duration.ofSeconds(3),
+                    Map.class);
+
+            Bucket<ActionRoutingTable> localBucket = buckets.values().iterator().next();
+            ActionRoutingTable table = localBucket.getData();
+            if (table != null && table.size() == nRoutes) {
+                for (DOMActionInstance r : added) {
+                    assertTrue("ActionRoutingTable contains " + r, table.contains(r));
+                }
+
+                break;
+            }
+
+            if (++numTries >= 50) {
+                fail("Expected # routes: " + nRoutes + ", Actual: " + table.size());
+            }
+
+            Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
+        }
+    }
+
+    private List<DOMActionInstance> createRouteIds() {
+        QName type = QName.create("/mockaction", "mockaction" + routeIdCounter++);
+        var routeIds = new ArrayList<DOMActionInstance>(1);
+        routeIds.add(DOMActionInstance.of(Absolute.of(type), LogicalDatastoreType.OPERATIONAL,
+            YangInstanceIdentifier.of(type)));
+        return routeIds;
+    }
+}
index d32e8d8a6ea8522ca2faec9a4a5d4fac7cdb56f7..18b2f9f6e0769383b4c32a8d46c4b364129585f5 100644 (file)
@@ -27,7 +27,6 @@ import com.google.common.base.Stopwatch;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Uninterruptibles;
 import com.typesafe.config.ConfigFactory;
-import java.net.URI;
 import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -44,7 +43,7 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.UpdateRemoteEndpoints;
@@ -52,7 +51,6 @@ import org.opendaylight.controller.remote.rpc.registry.RpcRegistry.RemoteRpcEndp
 import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,11 +77,11 @@ public class RpcRegistryTest {
     public static void staticSetup() {
         AkkaConfigurationReader reader = ConfigFactory::load;
 
-        RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").gossipTickInterval("200ms")
+        RemoteOpsProviderConfig config1 = new RemoteOpsProviderConfig.Builder("memberA").gossipTickInterval("200ms")
                 .withConfigReader(reader).build();
-        RemoteRpcProviderConfig config2 = new RemoteRpcProviderConfig.Builder("memberB").gossipTickInterval("200ms")
+        RemoteOpsProviderConfig config2 = new RemoteOpsProviderConfig.Builder("memberB").gossipTickInterval("200ms")
                 .withConfigReader(reader).build();
-        RemoteRpcProviderConfig config3 = new RemoteRpcProviderConfig.Builder("memberC").gossipTickInterval("200ms")
+        RemoteOpsProviderConfig config3 = new RemoteOpsProviderConfig.Builder("memberC").gossipTickInterval("200ms")
                 .withConfigReader(reader).build();
         node1 = ActorSystem.create("opendaylight-rpc", config1.get());
         node2 = ActorSystem.create("opendaylight-rpc", config2.get());
@@ -131,8 +129,8 @@ public class RpcRegistryTest {
         registry3 = node3.actorOf(RpcRegistry.props(config(node3), invoker3.getRef(), registrar3.getRef()));
     }
 
-    private static RemoteRpcProviderConfig config(final ActorSystem node) {
-        return new RemoteRpcProviderConfig(node.settings().config());
+    private static RemoteOpsProviderConfig config(final ActorSystem node) {
+        return new RemoteOpsProviderConfig(node.settings().config());
     }
 
     @After
@@ -299,14 +297,14 @@ public class RpcRegistryTest {
     }
 
     private static void assertEndpoints(final UpdateRemoteEndpoints msg, final Address address, final TestKit invoker) {
-        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = msg.getEndpoints();
+        final Map<Address, Optional<RemoteRpcEndpoint>> endpoints = msg.getRpcEndpoints();
         assertEquals(1, endpoints.size());
 
         final Optional<RemoteRpcEndpoint> maybeEndpoint = endpoints.get(address);
         assertNotNull(maybeEndpoint);
         assertTrue(maybeEndpoint.isPresent());
 
-        final RemoteRpcEndpoint endpoint = maybeEndpoint.get();
+        final RemoteRpcEndpoint endpoint = maybeEndpoint.orElseThrow();
         final ActorRef router = endpoint.getRouter();
         assertNotNull(router);
 
@@ -370,8 +368,7 @@ public class RpcRegistryTest {
         final int nRoutes = 500;
         final Collection<DOMRpcIdentifier> added = new ArrayList<>(nRoutes);
         for (int i = 0; i < nRoutes; i++) {
-            final DOMRpcIdentifier routeId = DOMRpcIdentifier.create(SchemaPath.create(true,
-                    QName.create(URI.create("/mockrpc"), "type" + i)));
+            final DOMRpcIdentifier routeId = DOMRpcIdentifier.create(QName.create("/mockrpc", "type" + i));
             added.add(routeId);
 
             //Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
@@ -404,9 +401,9 @@ public class RpcRegistryTest {
     }
 
     private List<DOMRpcIdentifier> createRouteIds() {
-        QName type = QName.create(URI.create("/mockrpc"), "mockrpc" + routeIdCounter++);
+        QName type = QName.create("/mockrpc", "mockrpc" + routeIdCounter++);
         List<DOMRpcIdentifier> routeIds = new ArrayList<>(1);
-        routeIds.add(DOMRpcIdentifier.create(SchemaPath.create(true, type)));
+        routeIds.add(DOMRpcIdentifier.create(type));
         return routeIds;
     }
 }
index b9784ab390a1a85ecc089b90b9373f5361fa45d1..fc364015986085d7351a8ebe9a58d186af1da3d3 100644 (file)
@@ -22,18 +22,16 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.TerminationMonitor;
 
 public class BucketStoreTest {
-
     /**
      * Dummy class to eliminate rawtype warnings.
      *
      * @author gwu
-     *
      */
-    private static class T implements BucketData<T> {
+    private static final class T implements BucketData<T> {
         @Override
         public Optional<ActorRef> getWatchActor() {
             return Optional.empty();
@@ -140,13 +138,13 @@ public class BucketStoreTest {
      */
     private static BucketStoreActor<T> createStore() {
         final Props props = Props.create(TestingBucketStoreActor.class,
-                new RemoteRpcProviderConfig(system.settings().config()), "testing-store",new T());
+                new RemoteOpsProviderConfig(system.settings().config()), "testing-store",new T());
         return TestActorRef.<BucketStoreActor<T>>create(system, props, "testStore").underlyingActor();
     }
 
     private static final class TestingBucketStoreActor extends BucketStoreActor<T> {
 
-        protected TestingBucketStoreActor(final RemoteRpcProviderConfig config,
+        protected TestingBucketStoreActor(final RemoteOpsProviderConfig config,
                                           final String persistenceId,
                                           final T initialData) {
             super(config, persistenceId, initialData);
index 6c826ac7bf036a592f7724b1f5b8ab0719d58d26..7ed3112ddb079bf9a7a55690806c99d0871d3d3c 100644 (file)
@@ -7,8 +7,8 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.gossip;
 
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyMap;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyMap;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.reset;
@@ -29,10 +29,9 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.TerminationMonitor;
 
-
 public class GossiperTest {
 
     private static ActorSystem system;
@@ -106,8 +105,8 @@ public class GossiperTest {
      * @return instance of Gossiper class
      */
     private static Gossiper createGossiper() {
-        final RemoteRpcProviderConfig config =
-                new RemoteRpcProviderConfig.Builder("unit-test")
+        final RemoteOpsProviderConfig config =
+                new RemoteOpsProviderConfig.Builder("unit-test")
                         .withConfigReader(ConfigFactory::load).build();
         final Props props = Gossiper.testProps(config);
         final TestActorRef<Gossiper> testRef = TestActorRef.create(system, props, "testGossiper");
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImplTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/mbeans/RemoteActionRegistryMXBeanImplTest.java
new file mode 100644 (file)
index 0000000..e53fce4
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019 Nordix Foundation.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.mbeans;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.dispatch.Dispatchers;
+import akka.testkit.TestActorRef;
+import akka.testkit.javadsl.TestKit;
+import akka.util.Timeout;
+import com.google.common.collect.Lists;
+import com.typesafe.config.ConfigFactory;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
+import org.opendaylight.controller.remote.rpc.registry.ActionRegistry;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMActionInstance;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.stmt.SchemaNodeIdentifier.Absolute;
+
+public class RemoteActionRegistryMXBeanImplTest {
+
+    private static final QName LOCAL_QNAME = QName.create("base", "local");
+    private static final QName REMOTE_QNAME = QName.create("base", "local");
+    private static final Absolute LOCAL_SCHEMA_PATH = Absolute.of(LOCAL_QNAME);
+    private static final Absolute REMOTE_SCHEMA_PATH = Absolute.of(REMOTE_QNAME);
+
+    private ActorSystem system;
+    private TestActorRef<ActionRegistry> testActor;
+    private List<DOMActionInstance> buckets;
+    private RemoteActionRegistryMXBeanImpl mxBean;
+
+    @Before
+    public void setUp() {
+        system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
+
+        final DOMActionInstance emptyActionIdentifier = DOMActionInstance.of(
+                REMOTE_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of());
+        final DOMActionInstance localActionIdentifier = DOMActionInstance.of(
+                LOCAL_SCHEMA_PATH, LogicalDatastoreType.OPERATIONAL, YangInstanceIdentifier.of(LOCAL_QNAME));
+
+        buckets = Lists.newArrayList(emptyActionIdentifier, localActionIdentifier);
+
+        final RemoteOpsProviderConfig config = new RemoteOpsProviderConfig.Builder("system").build();
+        final TestKit invoker = new TestKit(system);
+        final TestKit registrar = new TestKit(system);
+        final TestKit supervisor = new TestKit(system);
+        final Props props = ActionRegistry.props(config, invoker.getRef(), registrar.getRef())
+                .withDispatcher(Dispatchers.DefaultDispatcherId());
+        testActor = new TestActorRef<>(system, props, supervisor.getRef(), "testActor");
+
+        final Timeout timeout = Timeout.apply(10, TimeUnit.SECONDS);
+        mxBean = new RemoteActionRegistryMXBeanImpl(new BucketStoreAccess(testActor, system.dispatcher(), timeout),
+                timeout);
+    }
+
+    @After
+    public void tearDown() {
+        TestKit.shutdownActorSystem(system, Boolean.TRUE);
+    }
+
+    @Test
+    public void testGetLocalRegisteredRoutedActionEmptyBuckets() {
+        final Set<String> localRegisteredRoutedAction = mxBean.getLocalRegisteredAction();
+
+        Assert.assertNotNull(localRegisteredRoutedAction);
+        Assert.assertTrue(localRegisteredRoutedAction.isEmpty());
+    }
+
+    @Test
+    public void testGetLocalRegisteredRoutedAction() {
+        testActor.tell(new ActionRegistry.Messages.UpdateActions(Lists.newArrayList(buckets),
+                Collections.emptyList()), ActorRef.noSender());
+        final Set<String> localRegisteredRoutedAction = mxBean.getLocalRegisteredAction();
+
+        Assert.assertNotNull(localRegisteredRoutedAction);
+        Assert.assertEquals(1, localRegisteredRoutedAction.size());
+
+        final String localAction = localRegisteredRoutedAction.iterator().next();
+        Assert.assertTrue(localAction.contains(LOCAL_QNAME.toString()));
+        Assert.assertTrue(localAction.contains(LOCAL_SCHEMA_PATH.toString()));
+    }
+
+    @Test
+    public void testFindActionByNameEmptyBuckets() {
+        final Map<String, String> rpcByName = mxBean.findActionByName("");
+
+        Assert.assertNotNull(rpcByName);
+        Assert.assertTrue(rpcByName.isEmpty());
+    }
+
+    @Test
+    public void testFindActionByName() {
+        testActor.tell(new ActionRegistry.Messages.UpdateActions(Lists.newArrayList(buckets),
+                Collections.emptyList()), ActorRef.noSender());
+        final Map<String, String> rpcByName = mxBean.findActionByName("");
+
+        Assert.assertNotNull(rpcByName);
+        Assert.assertEquals(1, rpcByName.size());
+        Assert.assertTrue(rpcByName.containsValue(LOCAL_QNAME.getLocalName()));
+    }
+
+    @Test
+    public void testFindActionByRouteEmptyBuckets() {
+        final Map<String, String> rpcByRoute = mxBean.findActionByRoute("");
+
+        Assert.assertNotNull(rpcByRoute);
+        Assert.assertTrue(rpcByRoute.isEmpty());
+    }
+
+    @Test
+    public void testFindActionByRoute() {
+        testActor.tell(new ActionRegistry.Messages.UpdateActions(Lists.newArrayList(buckets),
+                Collections.emptyList()), ActorRef.noSender());
+        final Map<String, String> rpcByRoute = mxBean.findActionByRoute("");
+
+        Assert.assertNotNull(rpcByRoute);
+        Assert.assertEquals(1, rpcByRoute.size());
+        Assert.assertTrue(rpcByRoute.containsValue(LOCAL_QNAME.getLocalName()));
+    }
+
+    @Test
+    public void testGetBucketVersionsEmptyBuckets() {
+        final String bucketVersions = mxBean.getBucketVersions();
+        Assert.assertEquals(Collections.emptyMap().toString(), bucketVersions);
+    }
+
+    @Test
+    public void testGetBucketVersions() {
+        testActor.tell(new ActionRegistry.Messages.UpdateActions(Lists.newArrayList(buckets),
+                Collections.emptyList()), ActorRef.noSender());
+        final String bucketVersions = mxBean.getBucketVersions();
+
+        Assert.assertTrue(bucketVersions.contains(testActor.provider().getDefaultAddress().toString()));
+    }
+}
index d445ae1e6e31e0567e829e0afe8a1f8e9ef86ccd..26f4a1fa32deaf136ec760103293c27ea9dc9a38 100644 (file)
@@ -7,6 +7,12 @@
  */
 package org.opendaylight.controller.remote.rpc.registry.mbeans;
 
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
@@ -15,28 +21,25 @@ import akka.testkit.TestActorRef;
 import akka.testkit.javadsl.TestKit;
 import akka.util.Timeout;
 import com.google.common.collect.Lists;
+import com.typesafe.config.ConfigFactory;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
+import org.opendaylight.controller.remote.rpc.RemoteOpsProviderConfig;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStoreAccess;
 import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
 
 public class RemoteRpcRegistryMXBeanImplTest {
-
     private static final QName LOCAL_QNAME = QName.create("base", "local");
-    private static final SchemaPath EMPTY_SCHEMA_PATH = SchemaPath.ROOT;
-    private static final SchemaPath LOCAL_SCHEMA_PATH = SchemaPath.create(true, LOCAL_QNAME);
+    private static final QName REMOTE_QNAME = QName.create("base", "remote");
 
     private ActorSystem system;
     private TestActorRef<RpcRegistry> testActor;
@@ -45,16 +48,16 @@ public class RemoteRpcRegistryMXBeanImplTest {
 
     @Before
     public void setUp() {
-        system = ActorSystem.create("test");
+        system = ActorSystem.create("test", ConfigFactory.load().getConfig("unit-test"));
 
         final DOMRpcIdentifier emptyRpcIdentifier = DOMRpcIdentifier.create(
-                EMPTY_SCHEMA_PATH, YangInstanceIdentifier.EMPTY);
+                REMOTE_QNAME, YangInstanceIdentifier.of());
         final DOMRpcIdentifier localRpcIdentifier = DOMRpcIdentifier.create(
-                LOCAL_SCHEMA_PATH, YangInstanceIdentifier.of(LOCAL_QNAME));
+                LOCAL_QNAME, YangInstanceIdentifier.of(LOCAL_QNAME));
 
         buckets = Lists.newArrayList(emptyRpcIdentifier, localRpcIdentifier);
 
-        final RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("system").build();
+        final RemoteOpsProviderConfig config = new RemoteOpsProviderConfig.Builder("system").build();
         final TestKit invoker = new TestKit(system);
         final TestKit registrar = new TestKit(system);
         final TestKit supervisor = new TestKit(system);
@@ -76,8 +79,8 @@ public class RemoteRpcRegistryMXBeanImplTest {
     public void testGetGlobalRpcEmptyBuckets() {
         final Set<String> globalRpc = mxBean.getGlobalRpc();
 
-        Assert.assertNotNull(globalRpc);
-        Assert.assertTrue(globalRpc.isEmpty());
+        assertNotNull(globalRpc);
+        assertTrue(globalRpc.isEmpty());
     }
 
     @Test
@@ -85,19 +88,19 @@ public class RemoteRpcRegistryMXBeanImplTest {
         testActor.tell(new RpcRegistry.Messages.AddOrUpdateRoutes(Lists.newArrayList(buckets)), ActorRef.noSender());
         final Set<String> globalRpc = mxBean.getGlobalRpc();
 
-        Assert.assertNotNull(globalRpc);
-        Assert.assertEquals(1, globalRpc.size());
+        assertNotNull(globalRpc);
+        assertEquals(1, globalRpc.size());
 
         final String rpc = globalRpc.iterator().next();
-        Assert.assertEquals(EMPTY_SCHEMA_PATH.toString(), rpc);
+        assertEquals(REMOTE_QNAME.toString(), rpc);
     }
 
     @Test
     public void testGetLocalRegisteredRoutedRpcEmptyBuckets() {
         final Set<String> localRegisteredRoutedRpc = mxBean.getLocalRegisteredRoutedRpc();
 
-        Assert.assertNotNull(localRegisteredRoutedRpc);
-        Assert.assertTrue(localRegisteredRoutedRpc.isEmpty());
+        assertNotNull(localRegisteredRoutedRpc);
+        assertTrue(localRegisteredRoutedRpc.isEmpty());
     }
 
     @Test
@@ -105,20 +108,19 @@ public class RemoteRpcRegistryMXBeanImplTest {
         testActor.tell(new RpcRegistry.Messages.AddOrUpdateRoutes(Lists.newArrayList(buckets)), ActorRef.noSender());
         final Set<String> localRegisteredRoutedRpc = mxBean.getLocalRegisteredRoutedRpc();
 
-        Assert.assertNotNull(localRegisteredRoutedRpc);
-        Assert.assertEquals(1, localRegisteredRoutedRpc.size());
+        assertNotNull(localRegisteredRoutedRpc);
+        assertEquals(1, localRegisteredRoutedRpc.size());
 
         final String localRpc = localRegisteredRoutedRpc.iterator().next();
-        Assert.assertTrue(localRpc.contains(LOCAL_QNAME.toString()));
-        Assert.assertTrue(localRpc.contains(LOCAL_SCHEMA_PATH.toString()));
+        assertThat(localRpc, containsString(LOCAL_QNAME.toString()));
     }
 
     @Test
     public void testFindRpcByNameEmptyBuckets() {
         final Map<String, String> rpcByName = mxBean.findRpcByName("");
 
-        Assert.assertNotNull(rpcByName);
-        Assert.assertTrue(rpcByName.isEmpty());
+        assertNotNull(rpcByName);
+        assertTrue(rpcByName.isEmpty());
     }
 
     @Test
@@ -126,17 +128,17 @@ public class RemoteRpcRegistryMXBeanImplTest {
         testActor.tell(new RpcRegistry.Messages.AddOrUpdateRoutes(Lists.newArrayList(buckets)), ActorRef.noSender());
         final Map<String, String> rpcByName = mxBean.findRpcByName("");
 
-        Assert.assertNotNull(rpcByName);
-        Assert.assertEquals(1, rpcByName.size());
-        Assert.assertTrue(rpcByName.containsValue(LOCAL_QNAME.getLocalName()));
+        assertNotNull(rpcByName);
+        assertEquals(1, rpcByName.size());
+        assertTrue(rpcByName.containsValue(LOCAL_QNAME.getLocalName()));
     }
 
     @Test
     public void testFindRpcByRouteEmptyBuckets() {
         final Map<String, String> rpcByRoute = mxBean.findRpcByRoute("");
 
-        Assert.assertNotNull(rpcByRoute);
-        Assert.assertTrue(rpcByRoute.isEmpty());
+        assertNotNull(rpcByRoute);
+        assertTrue(rpcByRoute.isEmpty());
     }
 
     @Test
@@ -144,15 +146,15 @@ public class RemoteRpcRegistryMXBeanImplTest {
         testActor.tell(new RpcRegistry.Messages.AddOrUpdateRoutes(Lists.newArrayList(buckets)), ActorRef.noSender());
         final Map<String, String> rpcByRoute = mxBean.findRpcByRoute("");
 
-        Assert.assertNotNull(rpcByRoute);
-        Assert.assertEquals(1, rpcByRoute.size());
-        Assert.assertTrue(rpcByRoute.containsValue(LOCAL_QNAME.getLocalName()));
+        assertNotNull(rpcByRoute);
+        assertEquals(1, rpcByRoute.size());
+        assertTrue(rpcByRoute.containsValue(LOCAL_QNAME.getLocalName()));
     }
 
     @Test
     public void testGetBucketVersionsEmptyBuckets() {
         final String bucketVersions = mxBean.getBucketVersions();
-        Assert.assertEquals(Collections.EMPTY_MAP.toString(), bucketVersions);
+        assertEquals(Collections.emptyMap().toString(), bucketVersions);
     }
 
     @Test
@@ -160,6 +162,6 @@ public class RemoteRpcRegistryMXBeanImplTest {
         testActor.tell(new RpcRegistry.Messages.AddOrUpdateRoutes(Lists.newArrayList(buckets)), ActorRef.noSender());
         final String bucketVersions = mxBean.getBucketVersions();
 
-        Assert.assertTrue(bucketVersions.contains(testActor.provider().getDefaultAddress().toString()));
+        assertTrue(bucketVersions.contains(testActor.provider().getDefaultAddress().toString()));
     }
 }
index 11ad5eceb72638703ade0672dbca6ac15519a4a2..3324665c59147e071133788165f5660549c9e76f 100644 (file)
@@ -31,8 +31,6 @@ odl-cluster-rpc {
 
     cluster {
       seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2550"]
-
-      auto-down-unreachable-after = 10s
     }
   }
 }
@@ -94,8 +92,6 @@ memberA {
 
     cluster {
       seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
-      auto-down-unreachable-after = 10s
     }
   }
   in-memory-journal {
@@ -142,8 +138,6 @@ memberB {
 
     cluster {
       seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
-      auto-down-unreachable-after = 10s
     }
   }
   in-memory-journal {
@@ -190,8 +184,6 @@ memberC {
 
     cluster {
       seed-nodes = ["akka://opendaylight-rpc@127.0.0.1:2551"]
-
-      auto-down-unreachable-after = 10s
     }
   }
   in-memory-journal {
index 7438d3795dca5e49b8b05f0a9415f72fea133ee9..ce09fbb5fba3a16e76911dfe57aa07e5382496d0 100644 (file)
@@ -6,13 +6,11 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../parent</relativePath>
     </parent>
 
-    <groupId>org.opendaylight.controller</groupId>
     <artifactId>sal-test-model</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
index a1ab580067028baf8ff97ab6549a2cd5baf519c3..726b2a6713b950d5774a7e0290c82256a5847747 100644 (file)
@@ -7,7 +7,9 @@
  */
 package org.opendaylight.controller.md.sal.test.model.util;
 
-import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableMap.Builder;
+import com.google.common.collect.Maps;
 import java.util.Arrays;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugmentBuilder;
@@ -28,18 +30,16 @@ import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
 public final class ListsBindingUtils {
-
     private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
 
-    private ListsBindingUtils() {
-        throw new UnsupportedOperationException();
-    }
-
     public static final TopLevelListKey TOP_FOO_KEY = new TopLevelListKey("foo");
     public static final TopLevelListKey TOP_BAR_KEY = new TopLevelListKey("bar");
     public static final ListViaUsesKey USES_ONE_KEY = new ListViaUsesKey("one");
     public static final ListViaUsesKey USES_TWO_KEY = new ListViaUsesKey("two");
 
+    private ListsBindingUtils() {
+
+    }
 
     public static InstanceIdentifier<TopLevelList> path(final TopLevelListKey key) {
         return TOP_PATH.child(TopLevelList.class, key);
@@ -59,7 +59,7 @@ public final class ListsBindingUtils {
     }
 
     public static Top top(final TopLevelList... listItems) {
-        return new TopBuilder().setTopLevelList(Arrays.asList(listItems)).build();
+        return new TopBuilder().setTopLevelList(Maps.uniqueIndex(Arrays.asList(listItems), TopLevelList::key)).build();
     }
 
     public static TopLevelList topLevelList(final TopLevelListKey key) {
@@ -67,15 +67,17 @@ public final class ListsBindingUtils {
     }
 
     public static TopLevelList topLevelList(final TopLevelListKey key, final TreeComplexUsesAugment augment) {
-        TopLevelListBuilder builder = new TopLevelListBuilder().withKey(key);
-        builder.addAugmentation(TreeComplexUsesAugment.class, augment);
-        return builder.build();
+        final TopLevelListBuilder builder = new TopLevelListBuilder();
+        if (augment != null) {
+            builder.addAugmentation(augment);
+        }
+        return builder.withKey(key).build();
     }
 
     public static TreeComplexUsesAugment complexUsesAugment(final ListViaUsesKey... keys) {
-        ImmutableList.Builder<ListViaUses> listViaUses = ImmutableList.<ListViaUses>builder();
+        Builder<ListViaUsesKey, ListViaUses> listViaUses = ImmutableMap.builderWithExpectedSize(keys.length);
         for (ListViaUsesKey key : keys) {
-            listViaUses.add(new ListViaUsesBuilder().withKey(key).build());
+            listViaUses.put(key, new ListViaUsesBuilder().withKey(key).build());
         }
         return new TreeComplexUsesAugmentBuilder().setListViaUses(listViaUses.build()).build();
     }
@@ -84,5 +86,4 @@ public final class ListsBindingUtils {
 
         return new TreeLeafOnlyUsesAugmentBuilder().setLeafFromGrouping(leafFromGroupingValue).build();
     }
-
 }
index 68f2c27f26155b8bd28f6e202e7d9f9644600a92..0fc796cac834888b71c1ff78aa0d70ff69af340d 100644 (file)
@@ -7,44 +7,48 @@
  and is available at http://www.eclipse.org/legal/epl-v10.html
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-  <modelVersion>4.0.0</modelVersion>
+    <modelVersion>4.0.0</modelVersion>
     <parent>
-        <artifactId>clustering-it</artifactId>
-        <groupId>org.opendaylight.controller.samples</groupId>
-        <version>1.10.0-SNAPSHOT</version>
+        <groupId>org.opendaylight.odlparent</groupId>
+        <artifactId>odlparent-lite</artifactId>
+        <version>13.0.11</version>
+        <relativePath/>
     </parent>
+
+    <groupId>org.opendaylight.controller.samples</groupId>
     <artifactId>clustering-it-config</artifactId>
-  <packaging>jar</packaging>
-  <build>
-    <plugins>
-        <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>attach-artifacts</id>
-            <goals>
-              <goal>attach-artifact</goal>
-            </goals>
-            <phase>package</phase>
-            <configuration>
-              <artifacts>
-                  <artifact>
-                      <file>${project.build.directory}/classes/initial/module-shards.conf</file>
-                      <type>xml</type>
-                      <classifier>testmoduleshardconf</classifier>
-                  </artifact>
-                  <artifact>
-                      <file>${project.build.directory}/classes/initial/modules.conf</file>
-                      <type>xml</type>
-                      <classifier>testmoduleconf</classifier>
-                  </artifact>
-              </artifacts>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
+    <version>9.0.3-SNAPSHOT</version>
+    <packaging>jar</packaging>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <id>attach-artifacts</id>
+                        <goals>
+                            <goal>attach-artifact</goal>
+                        </goals>
+                        <phase>package</phase>
+                        <configuration>
+                            <artifacts>
+                                <artifact>
+                                    <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+                                    <type>xml</type>
+                                    <classifier>testmoduleshardconf</classifier>
+                                </artifact>
+                                <artifact>
+                                    <file>${project.build.directory}/classes/initial/modules.conf</file>
+                                    <type>xml</type>
+                                    <classifier>testmoduleconf</classifier>
+                                </artifact>
+                            </artifacts>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
 </project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/pom.xml
new file mode 100644 (file)
index 0000000..450c703
--- /dev/null
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright © 2021 PANTHEON.tech, s.r.o. and others.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+ -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <artifactId>mdsal-parent</artifactId>
+        <groupId>org.opendaylight.controller</groupId>
+        <version>9.0.3-SNAPSHOT</version>
+        <relativePath>../../../parent/pom.xml</relativePath>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>org.opendaylight.controller.samples</groupId>
+    <artifactId>clustering-it-karaf-cli</artifactId>
+    <packaging>bundle</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.karaf.shell</groupId>
+            <artifactId>org.apache.karaf.shell.core</artifactId>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-dom-codec-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-dom-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller.samples</groupId>
+            <artifactId>clustering-it-model</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-data-codec-gson</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.karaf.tooling</groupId>
+                <artifactId>karaf-services-maven-plugin</artifactId>
+                <version>${karaf.version}</version>
+                <executions>
+                    <execution>
+                        <id>service-metadata-generate</id>
+                        <phase>process-classes</phase>
+                        <goals>
+                            <goal>service-metadata-generate</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractDOMRpcAction.java
new file mode 100644 (file)
index 0000000..b8fcf74
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+
+public abstract class AbstractDOMRpcAction implements Action {
+    @Override
+    @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+    public final Object execute() throws InterruptedException, ExecutionException {
+        final DOMRpcResult result = invokeRpc().get();
+        if (!result.errors().isEmpty()) {
+            // FIXME: is there a better way to report errors?
+            System.out.println("Invocation failed: " + result.errors());
+            return null;
+        } else {
+            return result.value().prettyTree().get();
+        }
+    }
+
+    protected abstract ListenableFuture<? extends DOMRpcResult> invokeRpc();
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractRpcAction.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/AbstractRpcAction.java
new file mode 100644 (file)
index 0000000..eb483bf
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.concurrent.ExecutionException;
+import org.apache.karaf.shell.api.action.Action;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+/**
+ * Common base class for all commands which end up invoking an RPC.
+ */
+public abstract class AbstractRpcAction implements Action {
+    @Override
+    @SuppressWarnings("checkstyle:RegexpSinglelineJava")
+    public final Object execute() throws InterruptedException, ExecutionException {
+        final RpcResult<?> result = invokeRpc().get();
+        if (!result.isSuccessful()) {
+            // FIXME: is there a better way to report errors?
+            System.out.println("Invocation failed: " + result.getErrors());
+            return null;
+        } else {
+            return result.getResult();
+        }
+    }
+
+    protected abstract ListenableFuture<? extends RpcResult<?>> invokeRpc();
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/DefaultInstanceIdentifierSupport.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/DefaultInstanceIdentifierSupport.java
new file mode 100644 (file)
index 0000000..f975977
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Verify.verifyNotNull;
+
+import java.util.Optional;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingCodecTree;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingInstanceIdentifierCodec;
+import org.opendaylight.mdsal.binding.runtime.api.BindingRuntimeContext;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.codec.gson.JSONCodecFactorySupplier;
+import org.opendaylight.yangtools.yang.data.util.codec.TypeAwareCodec;
+import org.opendaylight.yangtools.yang.model.api.Status;
+import org.opendaylight.yangtools.yang.model.api.TypeAware;
+import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
+import org.opendaylight.yangtools.yang.model.api.type.InstanceIdentifierTypeDefinition;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.component.annotations.RequireServiceComponentRuntime;
+
+@Component
+@RequireServiceComponentRuntime
+public final class DefaultInstanceIdentifierSupport implements InstanceIdentifierSupport {
+    private final BindingInstanceIdentifierCodec bindingCodec;
+    private final TypeAwareCodec<?, ?, ?> jsonCodec;
+
+    @Activate
+    public DefaultInstanceIdentifierSupport(@Reference final BindingCodecTree bindingCodecTree,
+            @Reference final BindingRuntimeContext runtimeContext) {
+        bindingCodec = bindingCodecTree.getInstanceIdentifierCodec();
+        jsonCodec = JSONCodecFactorySupplier.RFC7951.createLazy(runtimeContext.modelContext())
+            .codecFor(new FakeLeafDefinition(), null);
+    }
+
+    @Override
+    public InstanceIdentifier<?> parseArgument(final String argument) {
+        final YangInstanceIdentifier path = verifyNotNull((YangInstanceIdentifier)jsonCodec.parseValue(null, argument));
+        final InstanceIdentifier<?> ret = bindingCodec.toBinding(path);
+        checkArgument(ret != null, "%s does not have a binding representation", path);
+        return ret;
+    }
+
+    // Mock wiring for JSON codec. Perhaps we should really bind to context-ref, or receive the class, or something.
+    private static final class FakeLeafDefinition implements InstanceIdentifierTypeDefinition, TypeAware {
+        @Override
+        public Optional<String> getReference() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public Optional<String> getDescription() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public Status getStatus() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public QName getQName() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public Optional<String> getUnits() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public Optional<? extends Object> getDefaultValue() {
+            throw new UnsupportedOperationException();
+        }
+
+        @Override
+        public InstanceIdentifierTypeDefinition getBaseType() {
+            return null;
+        }
+
+        @Override
+        public boolean requireInstance() {
+            return false;
+        }
+
+        @Override
+        public TypeDefinition<? extends TypeDefinition<?>> getType() {
+            return this;
+        }
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/InstanceIdentifierSupport.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/InstanceIdentifierSupport.java
new file mode 100644 (file)
index 0000000..e7724dc
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli;
+
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+
+/**
+ * Codec providing translation between CLI representation and {@link InstanceIdentifier}. This is mostly useful for
+ * injecting invocation contexts for {@code routed RPC}s and actions.
+ */
+public interface InstanceIdentifierSupport {
+    /**
+     * Parse a CLI argument into its {@link InstanceIdentifier} representation.
+     *
+     * @param argument Argument to parse
+     * @return Parse InstanceIdentifier
+     * @throws NullPointerException if {@code argument} is null
+     */
+    @NonNull InstanceIdentifier<?> parseArgument(String argument);
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterCommitCohortCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterCommitCohortCommand.java
new file mode 100644 (file)
index 0000000..13e44e3
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-commit-cohort", description = "Run a register-commit-cohort test")
+public class RegisterCommitCohortCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterCommitCohort.class).invoke(new RegisterCommitCohortInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterLoggingDtclCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterLoggingDtclCommand.java
new file mode 100644 (file)
index 0000000..2be5328
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-logging-dtcl", description = "Run a register-logging-dtcl test")
+public class RegisterLoggingDtclCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterLoggingDtcl.class).invoke(new RegisterLoggingDtclInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterOwnershipCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/RegisterOwnershipCommand.java
new file mode 100644 (file)
index 0000000..6298a87
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-ownership", description = "Run a register-ownership test")
+public class RegisterOwnershipCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "car-id", required = true)
+    private String carId;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterOwnership.class)
+            .invoke(new RegisterOwnershipInputBuilder().setCarId(carId).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StopStressTestCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StopStressTestCommand.java
new file mode 100644 (file)
index 0000000..7626711
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app" , name = "stop-stress-test", description = "Run a stop-stress-test")
+public class StopStressTestCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(StopStressTest.class).invoke(new StopStressTestInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StressTestCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/StressTestCommand.java
new file mode 100644 (file)
index 0000000..8af63e1
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint16;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app" , name = "stress-test", description = "Run a stress-test")
+public class StressTestCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "rate", required = true)
+    private int rate;
+    @Argument(index = 1, name = "count", required = true)
+    private long count;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(StressTest.class).invoke(new StressTestInputBuilder()
+            .setRate(Uint16.valueOf(rate))
+            .setCount(Uint32.valueOf(count))
+            .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterCommitCohortCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterCommitCohortCommand.java
new file mode 100644 (file)
index 0000000..95182bc
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-commit-cohort", description = "Run a unregister-commit-cohort test")
+public class UnregisterCommitCohortCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterCommitCohort.class).invoke(new UnregisterCommitCohortInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterLoggingDtclsCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterLoggingDtclsCommand.java
new file mode 100644 (file)
index 0000000..763b8e0
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-logging-dtcls", description = "Run and unregister-logging-dtcls test")
+public class UnregisterLoggingDtclsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterLoggingDtcls.class).invoke(new UnregisterLoggingDtclsInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterOwnershipCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/UnregisterOwnershipCommand.java
new file mode 100644 (file)
index 0000000..2b9730c
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-ownership", description = "Run an unregister-ownership test")
+public class UnregisterOwnershipCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "car-id", required = true)
+    private String carId;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterOwnership.class)
+            .invoke(new UnregisterOwnershipInputBuilder().setCarId(carId).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/purchase/BuyCarCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/car/purchase/BuyCarCommand.java
new file mode 100644 (file)
index 0000000..c5b5e67
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.car.purchase;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonRef;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "buy-car", description = "Run a buy-car test")
+public class BuyCarCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Reference
+    private InstanceIdentifierSupport iidSupport;
+    @Argument(index = 0, name = "person-ref", required = true)
+    String personRef;
+    @Argument(index = 1, name = "car-id", required = true)
+    private CarId carId;
+    @Argument(index = 2, name = "person-id", required = true)
+    private PersonId personId;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(BuyCar.class).invoke(new BuyCarInputBuilder()
+            .setPerson(new PersonRef(iidSupport.parseArgument(personRef)))
+            .setCarId(carId)
+            .setPersonId(personId)
+            .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/AddShardReplicaCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/AddShardReplicaCommand.java
new file mode 100644 (file)
index 0000000..4f55f4c
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "add-shard-replica", description = "Run an add-shard-replica test")
+public class AddShardReplicaCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(AddShardReplica.class)
+            .invoke(new AddShardReplicaInputBuilder().setShardName(shardName).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/CheckPublishNotificationsCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/CheckPublishNotificationsCommand.java
new file mode 100644 (file)
index 0000000..d26a635
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "check-publish-notifications",
+         description = "Run a check-publish-notifications test")
+public class CheckPublishNotificationsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private String id;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(CheckPublishNotifications.class)
+                .invoke(new CheckPublishNotificationsInputBuilder().setId(id).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/IsClientAbortedCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/IsClientAbortedCommand.java
new file mode 100644 (file)
index 0000000..7886dc3
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "is-client-aborted", description = "Run an is-client-aborted test")
+public class IsClientAbortedCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(IsClientAborted.class).invoke(new IsClientAbortedInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterBoundConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterBoundConstantCommand.java
new file mode 100644 (file)
index 0000000..afc9e82
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-bound-constant", description = "Run a register-bound-constant test")
+public class RegisterBoundConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Reference
+    private InstanceIdentifierSupport iidSupport;
+    @Argument(index =  0, name = "context", required = true)
+    private String context;
+    @Argument(index =  1, name = "constant", required = true)
+    private String constant;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterBoundConstant.class)
+                .invoke(new RegisterBoundConstantInputBuilder()
+                        .setConstant(constant)
+                        .setContext(iidSupport.parseArgument(context))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterConstantCommand.java
new file mode 100644 (file)
index 0000000..abab942
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-contact", description = "Run a register-contact test")
+public class RegisterConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index =  0, name = "constant", required = true)
+    private String constant;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterConstant.class)
+                .invoke(new RegisterConstantInputBuilder().setConstant(constant).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterDefaultConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterDefaultConstantCommand.java
new file mode 100644 (file)
index 0000000..6896561
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-default-constant", description = "Run a register-default-constant test")
+public class RegisterDefaultConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index =  0, name = "constant", required = true)
+    private String constant;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterDefaultConstant.class)
+                .invoke(new RegisterDefaultConstantInputBuilder().setConstant(constant).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterFlappingSingletonCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterFlappingSingletonCommand.java
new file mode 100644 (file)
index 0000000..4ae4288
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-flapping-singleton",
+         description = "Run a register-flapping-singleton test")
+public class RegisterFlappingSingletonCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterFlappingSingleton.class)
+                .invoke(new RegisterFlappingSingletonInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterSingletonConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RegisterSingletonConstantCommand.java
new file mode 100644 (file)
index 0000000..7901a23
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "register-singleton-constant",
+         description = "Run a register-singleton-constant text")
+public class RegisterSingletonConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "constant", required = true)
+    private String constant;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RegisterSingletonConstant.class)
+                .invoke(new RegisterSingletonConstantInputBuilder().setConstant(constant).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RemoveShardReplicaCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/RemoveShardReplicaCommand.java
new file mode 100644 (file)
index 0000000..3382201
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "remove-shard-replica", description = "Run a remove-shard-replica test")
+public class RemoveShardReplicaCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(RemoveShardReplica.class)
+                .invoke(new RemoveShardReplicaInputBuilder().setShardName(shardName).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/ShutdownShardReplicaCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/ShutdownShardReplicaCommand.java
new file mode 100644 (file)
index 0000000..79e619b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "shutdown-shard-replica", description = " Run a shutdown-shard-replica test")
+public class ShutdownShardReplicaCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "shard-name", required = true)
+    private String shardName;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(ShutdownShardReplica.class)
+                .invoke(new ShutdownShardReplicaInputBuilder()
+                        .setShardName(shardName)
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/StartPublishNotificationsCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/StartPublishNotificationsCommand.java
new file mode 100644 (file)
index 0000000..fdae5a7
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "start-publish-notifications",
+         description = "Run a start-publish-notifications test")
+public class StartPublishNotificationsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private String id;
+    @Argument(index = 1, name = "seconds", required = true)
+    private long seconds;
+    @Argument(index = 2, name = "notifications-per-second", required = true)
+    private long notificationsPerSecond;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(StartPublishNotifications.class)
+                .invoke(new StartPublishNotificationsInputBuilder()
+                        .setId(id)
+                        .setSeconds(Uint32.valueOf(seconds))
+                        .setNotificationsPerSecond(Uint32.valueOf(notificationsPerSecond))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDdtlCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDdtlCommand.java
new file mode 100644 (file)
index 0000000..b23b63f
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-ddtl", description = "Run a subscribe-ddtl test")
+public class SubscribeDdtlCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(SubscribeDdtl.class).invoke(new SubscribeDdtlInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDtclCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeDtclCommand.java
new file mode 100644 (file)
index 0000000..33b5ea1
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-dtcl", description = "Run a subscribe-dtcl test")
+public class SubscribeDtclCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(SubscribeDtcl.class).invoke(new SubscribeDtclInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeYnlCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/SubscribeYnlCommand.java
new file mode 100644 (file)
index 0000000..fc4ab18
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "subscribe-ynl", description = "Run a subscribe-ynl test")
+public class SubscribeYnlCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private String id;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(SubscribeYnl.class).invoke(new SubscribeYnlInputBuilder().setId(id).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterBoundConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterBoundConstantCommand.java
new file mode 100644 (file)
index 0000000..5274a99
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-bound-constant", description = "Run an unregister-bound-constant test")
+public class UnregisterBoundConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Reference
+    private InstanceIdentifierSupport iidSupport;
+    @Argument(index = 0, name = "context", required = true)
+    private String context;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterBoundConstant.class)
+                .invoke(new UnregisterBoundConstantInputBuilder()
+                        .setContext(iidSupport.parseArgument(context))
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterConstantCommand.java
new file mode 100644 (file)
index 0000000..ea857db
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-constant", description = "Run an unregister-constant test")
+public class UnregisterConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterConstant.class).invoke(new UnregisterConstantInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterDefaultConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterDefaultConstantCommand.java
new file mode 100644 (file)
index 0000000..d17fad9
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-default-constant",
+         description = "Run an unregister-default-constant test")
+public class UnregisterDefaultConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterDefaultConstant.class)
+            .invoke(new UnregisterDefaultConstantInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterFlappingSingletonCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterFlappingSingletonCommand.java
new file mode 100644 (file)
index 0000000..17954a7
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-flapping-singleton",
+        description = "Run an unregister-flapping-singleton test")
+public class UnregisterFlappingSingletonCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterFlappingSingleton.class)
+                .invoke(new UnregisterFlappingSingletonInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterSingletonConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnregisterSingletonConstantCommand.java
new file mode 100644 (file)
index 0000000..32bd3bc
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unregister-singleton-constant",
+        description = "Run an unregister-singleton-constant test")
+public class UnregisterSingletonConstantCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnregisterSingletonConstant.class)
+                .invoke(new UnregisterSingletonConstantInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDdtlCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDdtlCommand.java
new file mode 100644 (file)
index 0000000..1a6c18f
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-ddtl", description = "Run an unsubscribe-ddtl test")
+public class UnsubscribeDdtlCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnsubscribeDdtl.class).invoke(new UnsubscribeDdtlInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDtclCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeDtclCommand.java
new file mode 100644 (file)
index 0000000..7ad6cc4
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-dtcl", description = "Run an unsubscribe-dtcl test")
+public class UnsubscribeDtclCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnsubscribeDtcl.class).invoke(new UnsubscribeDtclInputBuilder().build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeYnlCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/UnsubscribeYnlCommand.java
new file mode 100644 (file)
index 0000000..0f83b6a
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "unsubscribe-ynl", description = "Run an unsubscribe-ynl test")
+public class UnsubscribeYnlCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private String id;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(UnsubscribeYnl.class)
+                .invoke(new UnsubscribeYnlInputBuilder().setId(id).build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/WriteTransactionsCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/control/WriteTransactionsCommand.java
new file mode 100644 (file)
index 0000000..d6da192
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.control;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "write-transactions", description = "Run a write-transactions test")
+public class WriteTransactionsCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private String id;
+    @Argument(index = 1, name = "seconds", required = true)
+    private long seconds;
+    @Argument(index = 2, name = "trasactions-per-second", required = true)
+    private long transactionsPerSecond;
+    @Argument(index = 3, name = "chained-transations", required = true)
+    private boolean chainedTransactions;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(WriteTransactions.class)
+                .invoke(new WriteTransactionsInputBuilder()
+                        .setId(id)
+                        .setSeconds(Uint32.valueOf(seconds))
+                        .setTransactionsPerSecond(Uint32.valueOf(transactionsPerSecond))
+                        .setChainedTransactions(chainedTransactions)
+                        .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetConstantCommand.java
new file mode 100644 (file)
index 0000000..8cc874a
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-constant", description = "Run an get-constant test")
+public class GetConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode input = serializer.toNormalizedNodeRpcData(new GetConstantInputBuilder().build());
+        return rpcService.invokeRpc(QName.create(GetConstantInput.QNAME, "get-constant"), input);
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetContextedConstantCommand.java
new file mode 100644 (file)
index 0000000..762daad
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.clustering.it.karaf.cli.InstanceIdentifierSupport;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetContextedConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-contexted-constant", description = "Run an get-contexted-constant test")
+public class GetContextedConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+    @Reference
+    private InstanceIdentifierSupport iidSupport;
+    @Argument(index = 0, name = "context", required = true)
+    private String context;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode inputNode = serializer.toNormalizedNodeRpcData(new GetContextedConstantInputBuilder()
+            .setContext(iidSupport.parseArgument(context))
+            .build());
+        return rpcService.invokeRpc(QName.create(GetContextedConstantInput.QNAME, "get-contexted-constant"), inputNode);
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/odl/mdsal/lowlevel/tgt/GetSingletonConstantCommand.java
new file mode 100644 (file)
index 0000000..1713ecc
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2022 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.odl.mdsal.lowlevel.tgt;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractDOMRpcAction;
+import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.api.DOMRpcService;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.GetSingletonConstantInputBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+
+@Service
+@Command(scope = "test-app", name = "get-singleton-constant", description = "Run an get-singleton-constant test")
+public class GetSingletonConstantCommand extends AbstractDOMRpcAction {
+    @Reference
+    private DOMRpcService rpcService;
+    @Reference
+    private BindingNormalizedNodeSerializer serializer;
+
+    @Override
+    protected ListenableFuture<? extends DOMRpcResult> invokeRpc() {
+        final ContainerNode inputNode =
+                serializer.toNormalizedNodeRpcData(new GetSingletonConstantInputBuilder().build());
+        return rpcService.invokeRpc(QName.create(GetSingletonConstantInput.QNAME, "get-singleton-constant"), inputNode);
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/people/AddPersonCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/people/AddPersonCommand.java
new file mode 100644 (file)
index 0000000..3eaec8d
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.people;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Argument;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonId;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
+
+@Service
+@Command(scope = "test-app", name = "add-person", description = " Run an add-person test")
+public class AddPersonCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+    @Argument(index = 0, name = "id", required = true)
+    private PersonId id;
+    @Argument(index = 1, name = "gender", required = true)
+    private String gender;
+    @Argument(index = 2, name = "age", required = true)
+    private long age;
+    @Argument(index = 3, name = "address", required = true)
+    private String address;
+    @Argument(index = 4, name = "contactNo", required = true)
+    private String contactNo;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(AddPerson.class).invoke(new AddPersonInputBuilder()
+                .setId(id)
+                .setGender(gender)
+                .setAge(Uint32.valueOf(age))
+                .setAddress(address)
+                .setContactNo(contactNo)
+                .build());
+    }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/rpc/test/BasicGlobalCommand.java b/opendaylight/md-sal/samples/clustering-test-app/karaf-cli/src/main/java/org/opendaylight/clustering/it/karaf/cli/rpc/test/BasicGlobalCommand.java
new file mode 100644 (file)
index 0000000..a61722c
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2021 PANTHEON.tech, s.r.o. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.clustering.it.karaf.cli.rpc.test;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.karaf.shell.api.action.Command;
+import org.apache.karaf.shell.api.action.lifecycle.Reference;
+import org.apache.karaf.shell.api.action.lifecycle.Service;
+import org.opendaylight.clustering.it.karaf.cli.AbstractRpcAction;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInputBuilder;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+
+@Service
+@Command(scope = "test-app", name = "global-basic", description = "Run a global-basic test")
+public class BasicGlobalCommand extends AbstractRpcAction {
+    @Reference
+    private RpcService rpcService;
+
+    @Override
+    protected ListenableFuture<? extends RpcResult<?>> invokeRpc() {
+        return rpcService.getRpc(BasicGlobal.class).invoke(new BasicGlobalInputBuilder().build());
+    }
+}
index d0ce9bfa286de7c36c90a38a03957ab566e13142..962989a7900ee45e5b7466dfd1598c281ec2bd42 100644 (file)
@@ -5,15 +5,25 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../../parent</relativePath>
     </parent>
 
     <groupId>org.opendaylight.controller.samples</groupId>
-    <version>1.10.0-SNAPSHOT</version>
     <artifactId>clustering-it-model</artifactId>
     <packaging>bundle</packaging>
 
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
+            <artifactId>rfc6991-ietf-inet-types</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.mdsal.model</groupId>
+            <artifactId>yang-ext</artifactId>
+        </dependency>
+    </dependencies>
+
     <build>
         <pluginManagement>
             <plugins>
             </plugins>
         </pluginManagement>
     </build>
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
-            <artifactId>rfc6991</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>yang-ext</artifactId>
-        </dependency>
-    </dependencies>
 </project>
index 34ffb1cfbf6d48d02a8449c7553097e34974acfd..f1a47cc71684a50862ee2ace16c95fd23866ab38 100644 (file)
@@ -1,42 +1,40 @@
 module car-people {
+  yang-version 1;
 
-    yang-version 1;
+  namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
 
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+  prefix car;
 
-    prefix car;
+  import car { prefix "c"; revision-date 2014-08-18; }
+  import people { prefix "people"; revision-date 2014-08-18; }
 
-         import ietf-inet-types { prefix "inet"; revision-date 2013-07-15; }
-         import car { prefix "c"; revision-date 2014-08-18; }
-         import people { prefix "people"; revision-date 2014-08-18; }
+  organization "Netconf Central";
 
-    organization "Netconf Central";
+  contact
+    "Harman Singh <harmasin@cisco.com>";
 
-    contact
-      "Harman Singh <harmasin@cisco.com>";
+  description
+    "YANG model for car for test application";
 
+  revision "2014-08-18" {
     description
-      "YANG model for car for test application";
+      "Clustering sample app";
+  }
 
-    revision "2014-08-18" {
-      description
-        "Clustering sample app";
-    }
-
-    container car-people {
-      description
-       "Top-level container for all people car map";
+  container car-people {
+    description
+     "Top-level container for all people car map";
 
-      list car-person {
-        key "car-id person-id";
-        description "A mapping of cars and people.";
-        leaf car-id {
-          type c:car-id;
-        }
+    list car-person {
+      key "car-id person-id";
+      description "A mapping of cars and people.";
+      leaf car-id {
+        type c:car-id;
+      }
 
-        leaf person-id {
-          type people:person-id;
-        }
+      leaf person-id {
+        type people:person-id;
       }
     }
+  }
 }
index b771346112a9d5d2160bf131bb0b8a22f5face0d..5497732b1df31dd11357e706b9e6dc0050a03cc1 100644 (file)
@@ -1,60 +1,58 @@
 module car-purchase {
+  yang-version 1;
 
-    yang-version 1;
+  namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
 
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+  prefix cp;
 
-    prefix cp;
+  import car { prefix "car"; revision-date 2014-08-18; }
+  import people { prefix "person"; revision-date 2014-08-18; }
+  import yang-ext {prefix "ext"; revision-date "2013-07-09";}
 
-         import ietf-inet-types { prefix "inet"; revision-date 2013-07-15; }
-         import car { prefix "car"; revision-date 2014-08-18; }
-         import people { prefix "person"; revision-date 2014-08-18; }
-         import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+  organization "Netconf Central";
 
-    organization "Netconf Central";
+  contact
+    "Harman Singh <harmasin@cisco.com>";
 
-    contact
-      "Harman Singh <harmasin@cisco.com>";
+  description
+    "YANG model for car purchase for test application";
 
+  revision "2014-08-18" {
     description
-      "YANG model for car purchase for test application";
+      "Clustering sample app";
+  }
 
-    revision "2014-08-18" {
-      description
-        "Clustering sample app";
+  rpc buy-car {
+    description
+      "buy a new car";
+    input {
+      leaf person {
+        ext:context-reference "person:person-context";
+        type person:person-ref;
+        description "A reference to a particular person.";
+      }
+
+      leaf car-id {
+        type car:car-id;
+        description "identifier of car.";
+      }
+      leaf person-id {
+        type person:person-id;
+        description "identifier of person.";
+      }
     }
+  }
 
-    rpc buy-car {
-          description
-            "buy a new car";
-          input {
-            leaf person {
-              ext:context-reference "person:person-context";
-              type person:person-ref;
-              description "A reference to a particular person.";
-            }
-
-            leaf car-id {
-              type car:car-id;
-              description "identifier of car.";
-            }
-            leaf person-id {
-              type person:person-id;
-              description "identifier of person.";
-            }
-          }
-        }
-
-        notification carBought {
-          description
-            "Indicates that a person bought a car.";
-          leaf car-id {
-            type car:car-id;
-            description "identifier of car.";
-          }
-          leaf person-id {
-            type person:person-id;
-            description "identifier of person.";
-          }
-        }
+  notification carBought {
+    description
+      "Indicates that a person bought a car.";
+    leaf car-id {
+      type car:car-id;
+      description "identifier of car.";
+    }
+    leaf person-id {
+      type person:person-id;
+      description "identifier of person.";
+    }
+  }
 }
index aa2f2da7dd0fdb9746328321aaf921594786f349..d81ae06bdf9b019398551e8b27deaa91b0abac8f 100644 (file)
@@ -290,83 +290,6 @@ module odl-mdsal-lowlevel-control {
         }
     }
 
-    rpc produce-transactions {
-        description "Upon receiving this, the member shall make sure the outer list item
-            of llt:in-ints exists for the given id, make sure a shard for
-            the whole (config) id-ints is created (by creating and closing producer
-            for the whole id-ints), and create a DOMDataTreeProducer for that item (using that shard).
-
-            FIXME: Is the above the normal way of creating prefix-based chards?
-
-            Then start creating (one by one) and submitting transactions
-            to randomly add or delete items on the inner list for that id.
-            To ensure balanced number of deletes, the first write can create
-            a random set of random numbers. Other writes shall be one per number.
-            The writes shall use DOMDataTreeProducer API, as opposed to transaction (chains)
-            created directly on datastore.
-            .get with a timeout on currently earliest non-complete Future (from .submit)
-            shall be used as the primary wait method to throttle the submission rate.
-            This RPC shall not return until all transactions are confirmed successful,
-            or an exception is raised (the exception should propagate to restconf response).
-            OptimisticLockException is always considered an error.
-            In either case, the producer should be closed before returning,
-            but the shard and the whole id item shall be kept as they are.";
-        input {
-            uses llc:id-grouping;
-            uses transactions-params;
-            leaf isolated-transactions {
-                description "The value for DOMDataTreeProducer#createTransaction argument.";
-                mandatory true;
-                type boolean;
-            }
-        }
-        output {
-            uses transactions-result;
-        }
-    }
-
-    rpc create-prefix-shard {
-        description "Upon receiving this, the member creates a prefix shard at the instance-identifier, with replicas
-                on the required members.";
-        input {
-
-            leaf prefix {
-                mandatory true;
-                type instance-identifier;
-            }
-            leaf-list replicas {
-                min-elements 1;
-                type string;
-            }
-        }
-    }
-
-    rpc remove-prefix-shard {
-        description "Upon receiving this, the member removes the prefix based shard identifier by this prefix.
-                This must be called from the same node that created the shard.";
-
-        input {
-            leaf prefix {
-                mandatory true;
-                type instance-identifier;
-            }
-        }
-    }
-
-
-    rpc become-prefix-leader {
-        description "Upon receiving this, the member shall ask the appropriate API
-            to become Leader of the given shard (presumably the llt:list-ints one,
-            created by produce-transactions) and return immediatelly.";
-        input {
-            leaf prefix {
-                mandatory true;
-                type instance-identifier;
-            }
-        }
-        // No output.
-    }
-
     rpc remove-shard-replica {
         description "A specialised copy of cluster-admin:remove-shard-replica.
 
@@ -469,17 +392,6 @@ module odl-mdsal-lowlevel-control {
 
     // The following calls are not required for Carbon testing.
 
-    rpc deconfigure-id-ints-shard {
-        description "Upon receiving this, the member shall ask the appropriate API
-            to remove the llt:id-ints shard (presumably created by produce-transactions)
-            and return immediatelly.
-            It is expected the data would move to the root prefix shard seamlessly.
-
-            TODO: Make shard name configurable by input?";
-        // No input.
-        // No output.
-    }
-
     rpc register-default-constant {
         description "Upon receiving this, the member has to create and register
             a default llt:get-contexted-constant implementation (routed RPC).
@@ -509,17 +421,4 @@ module odl-mdsal-lowlevel-control {
             }
         }
     }
-
-    rpc shutdown-prefix-shard-replica {
-        description "Upon receiving this, the member will try to gracefully shutdown local configuration
-            data store prefix-based shard replica.";
-        input {
-            leaf prefix {
-                description "The prefix of the configuration data store prefix-based shard to be shutdown
-                    gracefully.";
-                mandatory true;
-                type instance-identifier;
-            }
-        }
-    }
 }
index 5c89326c2071edbcd9a57a77fdea4067dc5bb782..b83c335a94814528cee09ae79dd37a116410ea05 100644 (file)
@@ -1,16 +1,27 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
+    <modelVersion>4.0.0</modelVersion>
     <parent>
-        <groupId>org.opendaylight.controller.samples</groupId>
-        <artifactId>sal-samples</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <groupId>org.opendaylight.odlparent</groupId>
+        <artifactId>odlparent-lite</artifactId>
+        <version>13.0.11</version>
+        <relativePath/>
     </parent>
-    <artifactId>clustering-it</artifactId>
+
+    <groupId>org.opendaylight.controller.samples</groupId>
+    <artifactId>clustering-test-app</artifactId>
+    <version>9.0.3-SNAPSHOT</version>
     <packaging>pom</packaging>
-  <modules>
-    <module>configuration</module>
-    <module>model</module>
-    <module>provider</module>
-  </modules>
+
+    <properties>
+        <maven.deploy.skip>true</maven.deploy.skip>
+        <maven.install.skip>true</maven.install.skip>
+    </properties>
+
+    <modules>
+        <module>configuration</module>
+        <module>karaf-cli</module>
+        <module>model</module>
+        <module>provider</module>
+    </modules>
 </project>
index 92b0f05e550342b42eb3c309e23fb7074805a1a5..5a4c62eaf67f8abeb8a8d675fd82b84df4450781 100644 (file)
@@ -4,44 +4,61 @@
     <parent>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>mdsal-parent</artifactId>
-        <version>1.10.0-SNAPSHOT</version>
+        <version>9.0.3-SNAPSHOT</version>
         <relativePath>../../../parent</relativePath>
     </parent>
 
     <groupId>org.opendaylight.controller.samples</groupId>
     <artifactId>clustering-it-provider</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
     <packaging>bundle</packaging>
 
     <dependencies>
+        <dependency>
+            <groupId>com.github.spotbugs</groupId>
+            <artifactId>spotbugs-annotations</artifactId>
+            <optional>true</optional>
+        </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
             <artifactId>mdsal-eos-binding-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.mdsal</groupId>
-            <artifactId>mdsal-singleton-common-api</artifactId>
+            <artifactId>mdsal-singleton-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller.samples</groupId>
             <artifactId>clustering-it-model</artifactId>
-            <version>${project.version}</version>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-binding-api</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-binding-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-api</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-dom-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>org.opendaylight.controller</groupId>
-            <artifactId>sal-common-util</artifactId>
+            <groupId>org.opendaylight.mdsal</groupId>
+            <artifactId>mdsal-common-api</artifactId>
         </dependency>
         <dependency>
             <groupId>org.opendaylight.controller</groupId>
             <artifactId>sal-distributed-datastore</artifactId>
         </dependency>
+        <dependency>
+            <groupId>jakarta.annotation</groupId>
+            <artifactId>jakarta.annotation-api</artifactId>
+            <optional>true</optional>
+        </dependency>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.service.component.annotations</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.guicedee.services</groupId>
+            <artifactId>javax.inject</artifactId>
+            <optional>true</optional>
+        </dependency>
     </dependencies>
 </project>
@@ -7,54 +7,71 @@
  */
 package org.opendaylight.controller.clustering.it.listener;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.MoreExecutors;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PeopleCarListener implements CarPurchaseListener {
+@Singleton
+@Component(service = { })
+public final class CarBoughtListener implements Listener<CarBought> {
+    private static final Logger LOG = LoggerFactory.getLogger(CarBoughtListener.class);
 
-    private static final Logger LOG = LoggerFactory.getLogger(PeopleCarListener.class);
+    private final DataBroker dataProvider;
+    private final Registration reg;
 
-    private DataBroker dataProvider;
+    @Inject
+    @Activate
+    public CarBoughtListener(@Reference final DataBroker dataProvider,
+            @Reference final NotificationService notifService) {
+        this.dataProvider = requireNonNull(dataProvider);
+        reg = notifService.registerListener(CarBought.class, this);
+    }
 
-    public void setDataProvider(final DataBroker salDataProvider) {
-        this.dataProvider = salDataProvider;
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        reg.close();
     }
 
     @Override
-    public void onCarBought(final CarBought notification) {
-
-        final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
-        carPersonBuilder.setCarId(notification.getCarId());
-        carPersonBuilder.setPersonId(notification.getPersonId());
-        CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
-        carPersonBuilder.withKey(key);
-        final CarPerson carPerson = carPersonBuilder.build();
+    public void onNotification(final CarBought notification) {
+        final var carPerson = new CarPersonBuilder()
+            .withKey(new CarPersonKey(notification.getCarId(), notification.getPersonId()))
+            .build();
 
         LOG.info("Car bought, adding car-person entry: [{}]", carPerson);
 
-        InstanceIdentifier<CarPerson> carPersonIId = InstanceIdentifier.builder(CarPeople.class)
+        final var carPersonIId = InstanceIdentifier.builder(CarPeople.class)
                 .child(CarPerson.class, carPerson.key()).build();
 
+        final var tx = dataProvider.newWriteOnlyTransaction();
+        tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
 
-        WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
-        tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson, true);
-
-        Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+        tx.commit().addCallback(new FutureCallback<CommitInfo>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final CommitInfo result) {
                 LOG.info("Successfully added car-person entry: [{}]", carPerson);
             }
 
@@ -7,50 +7,91 @@
  */
 package org.opendaylight.controller.clustering.it.provider;
 
+import static java.util.Objects.requireNonNull;
+
+import com.google.common.collect.ImmutableSet;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import java.util.HashSet;
+import java.util.Set;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.api.WriteTransaction;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCar;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPerson;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PeopleProvider implements PeopleService, AutoCloseable {
+@Singleton
+@Component(service = { })
+public final class AddPersonImpl implements AddPerson, AutoCloseable {
+    private static final Logger LOG = LoggerFactory.getLogger(AddPersonImpl.class);
 
-    private static final Logger LOG = LoggerFactory.getLogger(PeopleProvider.class);
+    private final Set<Registration> regs = new HashSet<>();
+    private final RpcProviderService rpcProviderService;
+    private final DataBroker dataProvider;
+    private final BuyCar buyCarRpc;
 
-    private DataBroker dataProvider;
+    @Inject
+    @Activate
+    public AddPersonImpl(@Reference final DataBroker dataProvider,
+            @Reference final NotificationPublishService notificationProvider,
+            @Reference final RpcProviderService rpcProviderService) {
+        this.dataProvider = requireNonNull(dataProvider);
+        this.rpcProviderService = requireNonNull(rpcProviderService);
 
-    private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+        requireNonNull(notificationProvider);
+        buyCarRpc = input -> {
+            LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+            final var carBought = new CarBoughtBuilder()
+                .setCarId(input.getCarId())
+                .setPersonId(input.getPersonId())
+                .build();
+            return Futures.transform(notificationProvider.offerNotification(carBought),
+                result -> RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build(),
+                MoreExecutors.directExecutor());
+        };
 
-    public void setDataProvider(final DataBroker salDataProvider) {
-        this.dataProvider = salDataProvider;
+        regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc));
+        regs.add(rpcProviderService.registerRpcImplementation(this));
     }
 
-
-    public void setRpcRegistration(final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
-        this.rpcRegistration = rpcRegistration;
+    @PreDestroy
+    @Deactivate
+    @Override
+    public void close() {
+        regs.forEach(Registration::close);
+        regs.clear();
     }
 
     @Override
-    public ListenableFuture<RpcResult<AddPersonOutput>> addPerson(final AddPersonInput input) {
+    public ListenableFuture<RpcResult<AddPersonOutput>> invoke(final AddPersonInput input) {
         LOG.info("RPC addPerson : adding person [{}]", input);
 
         PersonBuilder builder = new PersonBuilder(input);
@@ -62,13 +103,13 @@ public class PeopleProvider implements PeopleService, AutoCloseable {
                 .child(Person.class, person.key()).build();
         // Place entry in data store tree
         WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
-        tx.put(LogicalDatastoreType.CONFIGURATION, personId, person, true);
+        tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
 
-        Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+        tx.commit().addCallback(new FutureCallback<CommitInfo>() {
             @Override
-            public void onSuccess(final Void result) {
+            public void onSuccess(final CommitInfo result) {
                 LOG.info("RPC addPerson : person added successfully [{}]", person);
-                rpcRegistration.registerPath(PersonContext.class, personId);
+                regs.add(rpcProviderService.registerRpcImplementation(buyCarRpc, ImmutableSet.of(personId)));
                 LOG.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
                 futureResult.set(RpcResultBuilder.success(new AddPersonOutputBuilder().build()).build());
             }
@@ -77,13 +118,9 @@ public class PeopleProvider implements PeopleService, AutoCloseable {
             public void onFailure(final Throwable ex) {
                 LOG.error("RPC addPerson : person addition failed [{}]", person, ex);
                 futureResult.set(RpcResultBuilder.<AddPersonOutput>failed()
-                        .withError(RpcError.ErrorType.APPLICATION, ex.getMessage()).build());
+                        .withError(ErrorType.APPLICATION, ex.getMessage()).build());
             }
         }, MoreExecutors.directExecutor());
         return futureResult;
     }
-
-    @Override
-    public void close() {
-    }
 }
index a5e66fdad8c60beef135d5c5e891ebed41c88d20..4c41784eb5b20a70c8547e5e60c1b4ba004a73fe 100644 (file)
@@ -5,46 +5,60 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutput;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobal;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicGlobalOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.controller.basic.rpc.test.rev160120.BasicRpcTestService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class BasicRpcTestProvider implements ClusterSingletonService, BasicRpcTestService {
-
+@Singleton
+@Component(service = { })
+public final class BasicRpcTestProvider implements ClusterSingletonService {
     private static final Logger LOG = LoggerFactory.getLogger(BasicRpcTestProvider.class);
-    private static final ServiceGroupIdentifier IDENTIFIER = ServiceGroupIdentifier.create("Basic-rpc-test");
+    private static final ServiceGroupIdentifier IDENTIFIER = new ServiceGroupIdentifier("Basic-rpc-test");
 
-    private final RpcProviderRegistry rpcProviderRegistry;
-    private final ClusterSingletonServiceProvider singletonService;
-    private BindingAwareBroker.RpcRegistration<BasicRpcTestService> rpcRegistration;
+    private final RpcProviderService rpcProviderRegistry;
+    private final Registration singletonRegistration;
 
-    public BasicRpcTestProvider(final RpcProviderRegistry rpcProviderRegistry,
-                                final ClusterSingletonServiceProvider singletonService) {
+    private Registration rpcRegistration = null;
+
+    @Inject
+    @Activate
+    public BasicRpcTestProvider(@Reference final RpcProviderService rpcProviderRegistry,
+                                @Reference final ClusterSingletonServiceProvider singletonService) {
         this.rpcProviderRegistry = rpcProviderRegistry;
-        this.singletonService = singletonService;
+        singletonRegistration = singletonService.registerClusterSingletonService(this);
+    }
 
-        singletonService.registerClusterSingletonService(this);
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        singletonRegistration.close();
     }
 
     @Override
     public void instantiateServiceInstance() {
         LOG.info("Basic testing rpc registered as global");
-        rpcRegistration = rpcProviderRegistry.addRpcImplementation(BasicRpcTestService.class, this);
+        rpcRegistration = rpcProviderRegistry.registerRpcImplementation((BasicGlobal) input -> {
+            LOG.info("Basic test global rpc invoked");
+            return RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).buildFuture();
+        });
     }
 
     @Override
@@ -59,11 +73,4 @@ public class BasicRpcTestProvider implements ClusterSingletonService, BasicRpcTe
     public ServiceGroupIdentifier getIdentifier() {
         return IDENTIFIER;
     }
-
-    @Override
-    public ListenableFuture<RpcResult<BasicGlobalOutput>> basicGlobal(final BasicGlobalInput input) {
-        LOG.info("Basic test global rpc invoked");
-
-        return Futures.immediateFuture(RpcResultBuilder.success(new BasicGlobalOutputBuilder().build()).build());
-    }
 }
index 8d9eb77ddc812f6a5538a2dbd7fc99cb96b7dbd7..c3e9b89393e6dafaa46e8ab6e82006502ca96966 100644 (file)
@@ -7,12 +7,10 @@
  */
 package org.opendaylight.controller.clustering.it.provider;
 
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
-import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import java.util.List;
+import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
+import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
-import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -22,38 +20,32 @@ import org.slf4j.LoggerFactory;
  *
  * @author Ryan Goulding (ryandgoulding@gmail.com)
  */
-public class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
+public final class CarDataTreeChangeListener implements DataTreeChangeListener<Cars> {
     private static final Logger LOG = LoggerFactory.getLogger(CarDataTreeChangeListener.class);
 
-    @java.lang.Override
-    public void onDataTreeChanged(final java.util.Collection<DataTreeModification<Cars>> changes) {
+    @Override
+    public void onDataTreeChanged(final List<DataTreeModification<Cars>> changes) {
         if (LOG.isTraceEnabled()) {
-            for (DataTreeModification<Cars> change : changes) {
-                ouputChanges(change);
+            for (var change : changes) {
+                outputChanges(change);
             }
         }
     }
 
-    private static void ouputChanges(final DataTreeModification<Cars> change) {
-        final DataObjectModification<Cars> rootNode = change.getRootNode();
-        final ModificationType modificationType = rootNode.getModificationType();
-        final InstanceIdentifier<Cars> rootIdentifier = change.getRootPath().getRootIdentifier();
+    private static void outputChanges(final DataTreeModification<Cars> change) {
+        final var rootNode = change.getRootNode();
+        final var modificationType = rootNode.modificationType();
+        final var rootIdentifier = change.getRootPath().path();
         switch (modificationType) {
-            case WRITE:
-            case SUBTREE_MODIFIED: {
-                final Cars dataBefore = rootNode.getDataBefore();
-                final Cars dataAfter = rootNode.getDataAfter();
+            case WRITE, SUBTREE_MODIFIED -> {
                 LOG.trace("onDataTreeChanged - Cars config with path {} was added or changed from {} to {}",
-                        rootIdentifier, dataBefore, dataAfter);
-                break;
+                    rootIdentifier, rootNode.dataBefore(), rootNode.dataAfter());
             }
-            case DELETE: {
+            case DELETE -> {
                 LOG.trace("onDataTreeChanged - Cars config with path {} was deleted", rootIdentifier);
-                break;
             }
-            default: {
+            default -> {
                 LOG.trace("onDataTreeChanged called with unknown modificationType: {}", modificationType);
-                break;
             }
         }
     }
index 1b55dbac58cf4465c17f9cd6a9ae70c0164ff838..8c2e0b5f29d55f877c5dac60c8a472dd47927b9c 100644 (file)
@@ -20,12 +20,11 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yangtools.util.concurrent.FluentFutures;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
 import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.model.api.EffectiveModelContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,41 +33,39 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-public class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
+public final class CarEntryDataTreeCommitCohort implements DOMDataTreeCommitCohort {
     private static final Logger LOG = LoggerFactory.getLogger(CarEntryDataTreeCommitCohort.class);
 
     private static final QName YEAR_QNAME = QName.create(Cars.QNAME, "year").intern();
     private static final NodeIdentifier YEAR_NODE_ID = new NodeIdentifier(YEAR_QNAME);
 
     @Override
-    public FluentFuture<PostCanCommitStep> canCommit(Object txId, SchemaContext ctx,
-            Collection<DOMDataTreeCandidate> candidates) {
+    public FluentFuture<PostCanCommitStep> canCommit(final Object txId, final EffectiveModelContext ctx,
+            final Collection<DOMDataTreeCandidate> candidates) {
 
         for (DOMDataTreeCandidate candidate : candidates) {
             // Simple data validation - verify the year, if present, is >= 1990
 
             final DataTreeCandidateNode rootNode = candidate.getRootNode();
-            final Optional<NormalizedNode<?, ?>> dataAfter = rootNode.getDataAfter();
+            final NormalizedNode dataAfter = rootNode.dataAfter();
 
             LOG.info("In canCommit: modificationType: {}, dataBefore: {}, dataAfter: {}",
-                    rootNode.getModificationType(), rootNode.getDataBefore(), dataAfter);
+                    rootNode.modificationType(), rootNode.dataBefore(), dataAfter);
 
             // Note: we don't want to process DELETE modifications but we don't need to explicitly check the
             // ModificationType because dataAfter will not be present. Also dataAfter *should* always contain a
             // MapEntryNode but we verify anyway.
-            if (dataAfter.isPresent()) {
-                final NormalizedNode<?, ?> normalizedNode = dataAfter.get();
-                Verify.verify(normalizedNode instanceof DataContainerNode,
-                        "Expected type DataContainerNode, actual was %s", normalizedNode.getClass());
-                DataContainerNode<?> entryNode = (DataContainerNode<?>) normalizedNode;
-                final Optional<DataContainerChild<? extends PathArgument, ?>> possibleYear =
-                        entryNode.getChild(YEAR_NODE_ID);
+            if (dataAfter != null) {
+                Verify.verify(dataAfter instanceof DataContainerNode,
+                        "Expected type DataContainerNode, actual was %s", dataAfter.getClass());
+                DataContainerNode entryNode = (DataContainerNode) dataAfter;
+                final Optional<DataContainerChild> possibleYear = entryNode.findChildByArg(YEAR_NODE_ID);
                 if (possibleYear.isPresent()) {
-                    final Number year = (Number) possibleYear.get().getValue();
+                    final Number year = (Number) possibleYear.orElseThrow().body();
 
                     LOG.info("year is {}", year);
 
-                    if (!(year.longValue() >= 1990)) {
+                    if (year.longValue() < 1990) {
                         return FluentFutures.immediateFailedFluentFuture(new DataValidationFailedException(
                                 DOMDataTreeIdentifier.class, candidate.getRootPath(),
                                 String.format("Invalid year %d - year must be >= 1990", year)));
index a6f154bfcde7282b7b0bdb6c0d5a7166bdbd3238..bec65aa561a3357daf980b58a6f6ebbda36f8db5 100644 (file)
@@ -8,67 +8,84 @@
 package org.opendaylight.controller.clustering.it.provider;
 
 import com.google.common.base.Stopwatch;
-import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
-import org.opendaylight.controller.md.sal.binding.api.DataBroker;
-import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
-import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeCommitCohortRegistry;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.opendaylight.mdsal.binding.api.DataBroker;
+import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
+import org.opendaylight.mdsal.binding.api.WriteTransaction;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.CommitCohortExtension;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
 import org.opendaylight.mdsal.eos.binding.api.Entity;
-import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipChange;
 import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipListener;
 import org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService;
 import org.opendaylight.mdsal.eos.common.api.CandidateAlreadyRegisteredException;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarId;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarService;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.Cars;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.CarsBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtcl;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterLoggingDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.RegisterOwnershipOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StopStressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTest;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.StressTestOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohort;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterCommitCohortOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtcls;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterLoggingDtclsOutputBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnership;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipInput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutput;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.UnregisterOwnershipOutputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntry;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.rev140818.cars.CarEntryBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.Uint32;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -77,15 +94,14 @@ import org.slf4j.LoggerFactory;
  *
  * @author Thomas Pantelis
  */
-@SuppressFBWarnings("SLF4J_ILLEGAL_PASSED_CLASS")
-public class CarProvider implements CarService {
-    private static final Logger LOG_PURCHASE_CAR = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
-    private static final Logger LOG_CAR_PROVIDER = LoggerFactory.getLogger(CarProvider.class);
+@Singleton
+@Component(service = { })
+public final class CarProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(CarProvider.class);
 
     private static final String ENTITY_TYPE = "cars";
     private static final InstanceIdentifier<Cars> CARS_IID = InstanceIdentifier.builder(Cars.class).build();
-    private static final DataTreeIdentifier<Cars> CARS_DTID = new DataTreeIdentifier<>(
+    private static final DataTreeIdentifier<Cars> CARS_DTID = DataTreeIdentifier.of(
             LogicalDatastoreType.CONFIGURATION, CARS_IID);
 
     private final DataBroker dataProvider;
@@ -94,29 +110,44 @@ public class CarProvider implements CarService {
     private final AtomicLong succcessCounter = new AtomicLong();
     private final AtomicLong failureCounter = new AtomicLong();
 
-    private final CarEntityOwnershipListener ownershipListener = new CarEntityOwnershipListener();
-    private final AtomicBoolean registeredListener = new AtomicBoolean();
+    private final EntityOwnershipListener ownershipListener = (entity, change, inJeopardy) ->
+        LOG.info("ownershipChanged: entity={} change={} inJeopardy={}", entity, change, inJeopardy);
 
-    private final Collection<ListenerRegistration<?>> carsDclRegistrations =
-            Sets.newConcurrentHashSet();
-    private final Collection<ListenerRegistration<CarDataTreeChangeListener>> carsDtclRegistrations =
-            Sets.newConcurrentHashSet();
+    private final AtomicBoolean registeredListener = new AtomicBoolean();
+    private final AtomicReference<Registration> commitCohortReg = new AtomicReference<>();
+    private final Set<ObjectRegistration<?>> carsDclRegistrations = ConcurrentHashMap.newKeySet();
+    private final Set<Registration> regs = new HashSet<>();
+    private final Set<Registration> carsDtclRegistrations = ConcurrentHashMap.newKeySet();
 
     private volatile Thread testThread;
     private volatile boolean stopThread;
-    private final AtomicReference<DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort>> commitCohortReg =
-            new AtomicReference<>();
 
-    public CarProvider(final DataBroker dataProvider, final EntityOwnershipService ownershipService,
-            final DOMDataBroker domDataBroker) {
+    @Inject
+    @Activate
+    public CarProvider(@Reference final DataBroker dataProvider,
+            @Reference final EntityOwnershipService ownershipService, @Reference final DOMDataBroker domDataBroker,
+            @Reference final RpcProviderService rpcProviderService) {
         this.dataProvider = dataProvider;
         this.ownershipService = ownershipService;
         this.domDataBroker = domDataBroker;
+        regs.add(rpcProviderService.registerRpcImplementations(
+            (StressTest) this::stressTest,
+            (StopStressTest) this::stopStressTest,
+            (RegisterOwnership) this::registerOwnership,
+            (UnregisterOwnership) this::unregisterOwnership,
+            (RegisterLoggingDtcl) this::registerLoggingDtcl,
+            (UnregisterLoggingDtcls) this::unregisterLoggingDtcls,
+            (RegisterCommitCohort) this::registerCommitCohort,
+            (UnregisterCommitCohort) this::unregisterCommitCohort));
     }
 
+    @PreDestroy
+    @Deactivate
     public void close() {
         stopThread();
         closeCommitCohortRegistration();
+        regs.forEach(Registration::close);
+        regs.clear();
     }
 
     private void stopThread() {
@@ -132,27 +163,26 @@ public class CarProvider implements CarService {
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
+    private ListenableFuture<RpcResult<StressTestOutput>> stressTest(final StressTestInput input) {
         final int inputRate;
         final long inputCount;
 
         // If rate is not provided, or given as zero, then just return.
-        if (input.getRate() == null || input.getRate() == 0) {
-            LOG_PURCHASE_CAR.info("Exiting stress test as no rate is given.");
+        if (input.getRate() == null || input.getRate().toJava() == 0) {
+            LOG.info("Exiting stress test as no rate is given.");
             return Futures.immediateFuture(RpcResultBuilder.<StressTestOutput>failed()
                     .withError(ErrorType.PROTOCOL, "invalid rate")
                     .build());
         }
 
-        inputRate = input.getRate();
+        inputRate = input.getRate().toJava();
         if (input.getCount() != null) {
-            inputCount = input.getCount();
+            inputCount = input.getCount().toJava();
         } else {
             inputCount = 0;
         }
 
-        LOG_PURCHASE_CAR.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
+        LOG.info("Stress test starting : rate: {} count: {}", inputRate, inputCount);
 
         stopThread();
         // clear counters
@@ -163,9 +193,9 @@ public class CarProvider implements CarService {
         InstanceIdentifier<Cars> carsId = InstanceIdentifier.create(Cars.class);
         tx.merge(LogicalDatastoreType.CONFIGURATION, carsId, new CarsBuilder().build());
         try {
-            tx.submit().checkedGet(5, TimeUnit.SECONDS);
-        } catch (TransactionCommitFailedException | TimeoutException e) {
-            LOG_PURCHASE_CAR.error("Put Cars failed",e);
+            tx.commit().get(5, TimeUnit.SECONDS);
+        } catch (TimeoutException | InterruptedException | ExecutionException e) {
+            LOG.error("Put Cars failed",e);
             return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
         }
 
@@ -181,10 +211,10 @@ public class CarProvider implements CarService {
                 CarEntry car = new CarEntryBuilder().setId(new CarId("car" + id)).build();
                 tx1.put(LogicalDatastoreType.CONFIGURATION,
                         InstanceIdentifier.<Cars>builder(Cars.class).child(CarEntry.class, car.key()).build(), car);
-                Futures.addCallback(tx1.submit(), new FutureCallback<Void>() {
+                tx1.commit().addCallback(new FutureCallback<CommitInfo>() {
 
                     @Override
-                    public void onSuccess(final Void result) {
+                    public void onSuccess(final CommitInfo result) {
                         // Transaction succeeded
                         succcessCounter.getAndIncrement();
                     }
@@ -193,7 +223,7 @@ public class CarProvider implements CarService {
                     public void onFailure(final Throwable ex) {
                         // Transaction failed
                         failureCounter.getAndIncrement();
-                        LOG_CAR_PROVIDER.error("Put Cars failed", ex);
+                        LOG.error("Put Cars failed", ex);
                     }
                 }, MoreExecutors.directExecutor());
                 try {
@@ -203,7 +233,7 @@ public class CarProvider implements CarService {
                 }
 
                 if (count.get() % 1000 == 0) {
-                    LOG_PURCHASE_CAR.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
+                    LOG.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS));
                 }
 
                 // Check if a count is specified in input and we have created that many cars.
@@ -212,33 +242,30 @@ public class CarProvider implements CarService {
                 }
             }
 
-            LOG_PURCHASE_CAR.info("Stress test thread stopping after creating {} cars.", count.get());
+            LOG.info("Stress test thread stopping after creating {} cars.", count.get());
         });
         testThread.start();
 
         return Futures.immediateFuture(RpcResultBuilder.success(new StressTestOutputBuilder().build()).build());
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
+    private ListenableFuture<RpcResult<StopStressTestOutput>> stopStressTest(final StopStressTestInput input) {
         stopThread();
         StopStressTestOutputBuilder stopStressTestOutput;
         stopStressTestOutput = new StopStressTestOutputBuilder()
-                .setSuccessCount(succcessCounter.longValue())
-                .setFailureCount(failureCounter.longValue());
+                .setSuccessCount(Uint32.valueOf(succcessCounter.longValue()))
+                .setFailureCount(Uint32.valueOf(failureCounter.longValue()));
 
         final StopStressTestOutput result = stopStressTestOutput.build();
-        LOG_PURCHASE_CAR.info("Executed Stop Stress test; No. of cars created {}; "
-                + "No. of cars failed {}; ", succcessCounter, failureCounter);
+        LOG.info("Executed Stop Stress test; No. of cars created {}; No. of cars failed {}; ",
+            succcessCounter, failureCounter);
         // clear counters
         succcessCounter.set(0);
         failureCounter.set(0);
         return Futures.immediateFuture(RpcResultBuilder.<StopStressTestOutput>success(result).build());
     }
 
-
-    @Override
-    public ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
+    private ListenableFuture<RpcResult<RegisterOwnershipOutput>> registerOwnership(final RegisterOwnershipInput input) {
         if (registeredListener.compareAndSet(false, true)) {
             ownershipService.registerListener(ENTITY_TYPE, ownershipListener);
         }
@@ -254,49 +281,36 @@ public class CarProvider implements CarService {
         return RpcResultBuilder.success(new RegisterOwnershipOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
+    private ListenableFuture<RpcResult<UnregisterOwnershipOutput>> unregisterOwnership(
             final UnregisterOwnershipInput input) {
         return RpcResultBuilder.success(new UnregisterOwnershipOutputBuilder().build()).buildFuture();
     }
 
-    private static class CarEntityOwnershipListener implements EntityOwnershipListener {
-        @Override
-        public void ownershipChanged(final EntityOwnershipChange ownershipChange) {
-            LOG_CAR_PROVIDER.info("ownershipChanged: {}", ownershipChange);
-        }
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
+    private ListenableFuture<RpcResult<RegisterLoggingDtclOutput>> registerLoggingDtcl(
             final RegisterLoggingDtclInput input) {
-        LOG_CAR_PROVIDER.info("Registering a new CarDataTreeChangeListener");
-        final ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration =
-                dataProvider.registerDataTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
-
-        carsDtclRegistrations.add(carsDtclRegistration);
+        LOG.info("Registering a new CarDataTreeChangeListener");
+        final var reg = dataProvider.registerTreeChangeListener(CARS_DTID, new CarDataTreeChangeListener());
+        carsDtclRegistrations.add(reg);
         return RpcResultBuilder.success(new RegisterLoggingDtclOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
+    private ListenableFuture<RpcResult<UnregisterLoggingDtclsOutput>> unregisterLoggingDtcls(
             final UnregisterLoggingDtclsInput input) {
-        LOG_CAR_PROVIDER.info("Unregistering the CarDataTreeChangeListener(s)");
+        LOG.info("Unregistering the CarDataTreeChangeListener(s)");
         synchronized (carsDtclRegistrations) {
             int numListeners = 0;
-            for (ListenerRegistration<CarDataTreeChangeListener> carsDtclRegistration : carsDtclRegistrations) {
+            for (var carsDtclRegistration : carsDtclRegistrations) {
                 carsDtclRegistration.close();
                 numListeners++;
             }
             carsDtclRegistrations.clear();
-            LOG_CAR_PROVIDER.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
+            LOG.info("Unregistered {} CaraDataTreeChangeListener(s)", numListeners);
         }
         return RpcResultBuilder.success(new UnregisterLoggingDtclsOutputBuilder().build()).buildFuture();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
+    private ListenableFuture<RpcResult<UnregisterCommitCohortOutput>> unregisterCommitCohort(
             final UnregisterCommitCohortInput input) {
         closeCommitCohortRegistration();
 
@@ -304,23 +318,20 @@ public class CarProvider implements CarService {
     }
 
     private void closeCommitCohortRegistration() {
-        final DOMDataTreeCommitCohortRegistration<CarEntryDataTreeCommitCohort> reg = commitCohortReg.getAndSet(null);
+        final var reg = commitCohortReg.getAndSet(null);
         if (reg != null) {
             reg.close();
-            LOG_CAR_PROVIDER.info("Unregistered commit cohort");
+            LOG.info("Unregistered commit cohort");
         }
     }
 
-    @Override
-    public synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
+    private synchronized ListenableFuture<RpcResult<RegisterCommitCohortOutput>> registerCommitCohort(
             final RegisterCommitCohortInput input) {
         if (commitCohortReg.get() != null) {
             return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
         }
 
-        final DOMDataTreeCommitCohortRegistry commitCohortRegistry = (DOMDataTreeCommitCohortRegistry)
-                domDataBroker.getSupportedExtensions().get(DOMDataTreeCommitCohortRegistry.class);
-
+        final var commitCohortRegistry = domDataBroker.extension(CommitCohortExtension.class);
         if (commitCohortRegistry == null) {
             // Shouldn't happen
             return RpcResultBuilder.<RegisterCommitCohortOutput>failed().withError(ErrorType.APPLICATION,
@@ -335,12 +346,10 @@ public class CarProvider implements CarService {
         // to address all list entries, the second path argument is wild-carded by specifying just the CarEntry.QNAME.
         final YangInstanceIdentifier carEntryPath = YangInstanceIdentifier.builder(
                 YangInstanceIdentifier.of(Cars.QNAME)).node(CarEntry.QNAME).node(CarEntry.QNAME).build();
-        commitCohortReg.set(commitCohortRegistry.registerCommitCohort(
-                new org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier(
-                    org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION,
-                        carEntryPath), new CarEntryDataTreeCommitCohort()));
+        commitCohortReg.set(commitCohortRegistry.registerCommitCohort(DOMDataTreeIdentifier.of(
+            LogicalDatastoreType.CONFIGURATION, carEntryPath), new CarEntryDataTreeCommitCohort()));
 
-        LOG_CAR_PROVIDER.info("Registered commit cohort");
+        LOG.info("Registered commit cohort");
 
         return RpcResultBuilder.success(new RegisterCommitCohortOutputBuilder().build()).buildFuture();
     }
index 8c4e9716f054de39b58324eb4a3245899d7d8b6a..10dafba452be749dbafd447f93c70b65ccd2f1e3 100644 (file)
  */
 package org.opendaylight.controller.clustering.it.provider;
 
-import static akka.actor.ActorRef.noSender;
-
 import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.actor.PoisonPill;
-import akka.actor.Props;
+import akka.dispatch.Futures;
 import akka.dispatch.OnComplete;
 import akka.pattern.Patterns;
-import com.google.common.base.Optional;
 import com.google.common.base.Strings;
-import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-import org.opendaylight.controller.cluster.ActorSystemProvider;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientLocalHistory;
-import org.opendaylight.controller.cluster.databroker.actors.dds.ClientTransaction;
-import org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient;
-import org.opendaylight.controller.cluster.databroker.actors.dds.SimpleDataStoreClientActor;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 import org.opendaylight.controller.cluster.datastore.utils.ActorUtils;
-import org.opendaylight.controller.cluster.datastore.utils.ClusterUtils;
 import org.opendaylight.controller.cluster.raft.client.messages.Shutdown;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory;
 import org.opendaylight.controller.clustering.it.provider.impl.FlappingSingletonService;
 import org.opendaylight.controller.clustering.it.provider.impl.GetConstantService;
-import org.opendaylight.controller.clustering.it.provider.impl.IdIntsDOMDataTreeLIstener;
 import org.opendaylight.controller.clustering.it.provider.impl.IdIntsListener;
-import org.opendaylight.controller.clustering.it.provider.impl.PrefixLeaderHandler;
-import org.opendaylight.controller.clustering.it.provider.impl.PrefixShardHandler;
-import org.opendaylight.controller.clustering.it.provider.impl.ProduceTransactionsHandler;
 import org.opendaylight.controller.clustering.it.provider.impl.PublishNotificationsTask;
 import org.opendaylight.controller.clustering.it.provider.impl.RoutedGetConstantService;
 import org.opendaylight.controller.clustering.it.provider.impl.SingletonGetConstantService;
 import org.opendaylight.controller.clustering.it.provider.impl.WriteTransactionsHandler;
 import org.opendaylight.controller.clustering.it.provider.impl.YnlListener;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
-import org.opendaylight.controller.md.sal.binding.api.NotificationService;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
-import org.opendaylight.controller.sal.binding.api.RpcProviderRegistry;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker.DataTreeChangeExtension;
 import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeLoopException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
 import org.opendaylight.mdsal.dom.api.DOMSchemaService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.AddShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CheckPublishNotificationsOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.DeconfigureIdIntsShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.DeconfigureIdIntsShardOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAborted;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.IsClientAbortedOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.OdlMdsalLowlevelControlService;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RegisterSingletonConstantOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemoveShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownPrefixShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplica;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ShutdownShardReplicaOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotifications;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.StartPublishNotificationsOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDdtlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.SubscribeYnlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterBoundConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterDefaultConstantOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingleton;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterFlappingSingletonOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstant;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnregisterSingletonConstantOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDdtlOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtcl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeDtclOutputBuilder;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnl;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactions;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
+import org.opendaylight.yangtools.concepts.AbstractObjectRegistration;
+import org.opendaylight.yangtools.concepts.ObjectRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.FiniteDuration;
 
-public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService {
-
+@Singleton
+@Component(service = {})
+public final class MdsalLowLevelTestProvider {
     private static final Logger LOG = LoggerFactory.getLogger(MdsalLowLevelTestProvider.class);
-    private static final org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType CONTROLLER_CONFIG =
-            org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
 
-    private final RpcProviderRegistry rpcRegistry;
-    private final BindingAwareBroker.RpcRegistration<OdlMdsalLowlevelControlService> registration;
-    private final DistributedShardFactory distributedShardFactory;
+    private final Registration registration;
     private final DistributedDataStoreInterface configDataStore;
-    private final DOMDataTreeService domDataTreeService;
     private final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer;
     private final DOMDataBroker domDataBroker;
     private final NotificationPublishService notificationPublishService;
     private final NotificationService notificationService;
-    private final DOMSchemaService schemaService;
     private final ClusterSingletonServiceProvider singletonService;
     private final DOMRpcProviderService domRpcService;
-    private final PrefixLeaderHandler prefixLeaderHandler;
-    private final PrefixShardHandler prefixShardHandler;
-    private final DOMDataTreeChangeService domDataTreeChangeService;
-    private final ActorSystem actorSystem;
-
-    private final Map<InstanceIdentifier<?>, DOMRpcImplementationRegistration<RoutedGetConstantService>>
-            routedRegistrations = new HashMap<>();
+    private final DataTreeChangeExtension dataTreeChangeExtension;
 
-    private final Map<String, ListenerRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+    private final Map<InstanceIdentifier<?>, Registration> routedRegistrations = new HashMap<>();
+    private final Map<String, ObjectRegistration<YnlListener>> ynlRegistrations = new HashMap<>();
+    private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
 
-    private DOMRpcImplementationRegistration<GetConstantService> globalGetConstantRegistration = null;
-    private ClusterSingletonServiceRegistration getSingletonConstantRegistration;
+    private Registration globalGetConstantRegistration = null;
+    private Registration getSingletonConstantRegistration;
     private FlappingSingletonService flappingSingletonService;
-    private ListenerRegistration<DOMDataTreeChangeListener> dtclReg;
+    private Registration dtclReg;
     private IdIntsListener idIntsListener;
-    private final Map<String, PublishNotificationsTask> publishNotificationsTasks = new HashMap<>();
-    private ListenerRegistration<IdIntsDOMDataTreeLIstener> ddtlReg;
-    private IdIntsDOMDataTreeLIstener idIntsDdtl;
-
-
-
-    public MdsalLowLevelTestProvider(final RpcProviderRegistry rpcRegistry,
-                                     final DOMRpcProviderService domRpcService,
-                                     final ClusterSingletonServiceProvider singletonService,
-                                     final DOMSchemaService schemaService,
-                                     final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
-                                     final NotificationPublishService notificationPublishService,
-                                     final NotificationService notificationService,
-                                     final DOMDataBroker domDataBroker,
-                                     final DOMDataTreeService domDataTreeService,
-                                     final DistributedShardFactory distributedShardFactory,
-                                     final DistributedDataStoreInterface configDataStore,
-                                     final ActorSystemProvider actorSystemProvider) {
-        this.rpcRegistry = rpcRegistry;
+
+    @Inject
+    @Activate
+    public MdsalLowLevelTestProvider(
+            @Reference final RpcProviderService rpcRegistry,
+            @Reference final DOMRpcProviderService domRpcService,
+            @Reference final ClusterSingletonServiceProvider singletonService,
+            @Reference final DOMSchemaService schemaService,
+            @Reference final BindingNormalizedNodeSerializer bindingNormalizedNodeSerializer,
+            @Reference final NotificationPublishService notificationPublishService,
+            @Reference final NotificationService notificationService,
+            @Reference final DOMDataBroker domDataBroker,
+            @Reference final DistributedDataStoreInterface configDataStore) {
         this.domRpcService = domRpcService;
         this.singletonService = singletonService;
-        this.schemaService = schemaService;
         this.bindingNormalizedNodeSerializer = bindingNormalizedNodeSerializer;
         this.notificationPublishService = notificationPublishService;
         this.notificationService = notificationService;
         this.domDataBroker = domDataBroker;
-        this.domDataTreeService = domDataTreeService;
-        this.distributedShardFactory = distributedShardFactory;
         this.configDataStore = configDataStore;
-        this.actorSystem = actorSystemProvider.getActorSystem();
 
-        this.prefixLeaderHandler = new PrefixLeaderHandler(domDataTreeService, bindingNormalizedNodeSerializer);
-
-        domDataTreeChangeService =
-                (DOMDataTreeChangeService) domDataBroker.getSupportedExtensions().get(DOMDataTreeChangeService.class);
-
-        registration = rpcRegistry.addRpcImplementation(OdlMdsalLowlevelControlService.class, this);
+        dataTreeChangeExtension = domDataBroker.extension(DataTreeChangeExtension.class);
+
+        registration = rpcRegistry.registerRpcImplementations(
+            (UnregisterSingletonConstant) this::unregisterSingletonConstant,
+            (StartPublishNotifications) this::startPublishNotifications,
+            (SubscribeDdtl) this::subscribeDdtl,
+            (WriteTransactions) this::writeTransactions,
+            (IsClientAborted) this::isClientAborted,
+            (RemoveShardReplica) this::removeShardReplica,
+            (SubscribeYnl) this::subscribeYnl,
+            (UnregisterBoundConstant) this::unregisterBoundConstant,
+            (RegisterSingletonConstant) this::registerSingletonConstant,
+            (RegisterDefaultConstant) this::registerDefaultConstant,
+            (UnregisterConstant) this::unregisterConstant,
+            (UnregisterFlappingSingleton) this::unregisterFlappingSingleton,
+            (AddShardReplica) this::addShardReplica,
+            (RegisterBoundConstant) this::registerBoundConstant,
+            (RegisterFlappingSingleton) this::registerFlappingSingleton,
+            (UnsubscribeDdtl) this::unsubscribeDdtl,
+            (UnsubscribeYnl) this::unsubscribeYnl,
+            (CheckPublishNotifications) this::checkPublishNotifications,
+            (ShutdownShardReplica) this::shutdownShardReplica,
+            (RegisterConstant) this::registerConstant,
+            (UnregisterDefaultConstant) this::unregisterDefaultConstant,
+            (SubscribeDtcl) this::subscribeDtcl,
+            (UnsubscribeDtcl) this::unsubscribeDtcl);
+    }
 
-        prefixShardHandler = new PrefixShardHandler(distributedShardFactory, domDataTreeService,
-                bindingNormalizedNodeSerializer);
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        registration.close();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
+    private ListenableFuture<RpcResult<UnregisterSingletonConstantOutput>> unregisterSingletonConstant(
             final UnregisterSingletonConstantInput input) {
         LOG.info("In unregisterSingletonConstant");
 
         if (getSingletonConstantRegistration == null) {
-            return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed().withError(ErrorType.RPC, "data-missing",
-                    "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterSingletonConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         try {
@@ -252,13 +250,12 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
+    private ListenableFuture<RpcResult<StartPublishNotificationsOutput>> startPublishNotifications(
             final StartPublishNotificationsInput input) {
         LOG.info("In startPublishNotifications - input: {}", input);
 
         final PublishNotificationsTask task = new PublishNotificationsTask(notificationPublishService, input.getId(),
-                input.getSeconds(), input.getNotificationsPerSecond());
+                input.getSeconds().toJava(), input.getNotificationsPerSecond().toJava());
 
         publishNotificationsTasks.put(input.getId(), task);
 
@@ -267,97 +264,85 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new StartPublishNotificationsOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
+    private ListenableFuture<RpcResult<SubscribeDtclOutput>> subscribeDtcl(final SubscribeDtclInput input) {
         LOG.info("In subscribeDtcl - input: {}", input);
 
         if (dtclReg != null) {
-            return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already a DataTreeChangeListener registered for id-ints").buildFuture();
+            return RpcResultBuilder.<SubscribeDtclOutput>failed().withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+                "There is already a DataTreeChangeListener registered for id-ints")
+                .buildFuture();
         }
 
         idIntsListener = new IdIntsListener();
 
-        dtclReg = domDataTreeChangeService
-                .registerDataTreeChangeListener(
-                        new org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier(
-                                CONTROLLER_CONFIG, WriteTransactionsHandler.ID_INT_YID),
-                        idIntsListener);
+        dtclReg = dataTreeChangeExtension.registerTreeChangeListener(
+            DOMDataTreeIdentifier.of(LogicalDatastoreType.CONFIGURATION, WriteTransactionsHandler.ID_INT_YID),
+            idIntsListener);
 
         return RpcResultBuilder.success(new SubscribeDtclOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
+    private ListenableFuture<RpcResult<WriteTransactionsOutput>> writeTransactions(final WriteTransactionsInput input) {
         return WriteTransactionsHandler.start(domDataBroker, input);
     }
 
-    @Override
-    public ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
+    private ListenableFuture<RpcResult<IsClientAbortedOutput>> isClientAborted(final IsClientAbortedInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
+    private ListenableFuture<RpcResult<RemoveShardReplicaOutput>> removeShardReplica(
             final RemoveShardReplicaInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
+    private ListenableFuture<RpcResult<SubscribeYnlOutput>> subscribeYnl(final SubscribeYnlInput input) {
         LOG.info("In subscribeYnl - input: {}", input);
 
         if (ynlRegistrations.containsKey(input.getId())) {
-            return RpcResultBuilder.<SubscribeYnlOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already a listener registered for id: " + input.getId()).buildFuture();
+            return RpcResultBuilder.<SubscribeYnlOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS,
+                    "There is already a listener registered for id: " + input.getId())
+                .buildFuture();
         }
 
-        ynlRegistrations.put(input.getId(),
-                notificationService.registerNotificationListener(new YnlListener(input.getId())));
+        final var id = input.getId();
+        final var listener = new YnlListener(id);
+        final var reg = notificationService.registerListener(IdSequence.class, listener);
+        ynlRegistrations.put(id, new AbstractObjectRegistration<>(listener) {
+            @Override
+            protected void removeRegistration() {
+                reg.close();
+            }
+        });
 
         return RpcResultBuilder.success(new SubscribeYnlOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RemovePrefixShardOutput>> removePrefixShard(final RemovePrefixShardInput input) {
-        LOG.info("In removePrefixShard - input: {}", input);
-
-        return prefixShardHandler.onRemovePrefixShard(input);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<BecomePrefixLeaderOutput>> becomePrefixLeader(
-            final BecomePrefixLeaderInput input) {
-        LOG.info("n becomePrefixLeader - input: {}", input);
 
-        return prefixLeaderHandler.makeLeaderLocal(input);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
+    private ListenableFuture<RpcResult<UnregisterBoundConstantOutput>> unregisterBoundConstant(
             final UnregisterBoundConstantInput input) {
         LOG.info("In unregisterBoundConstant - {}", input);
 
-        final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
-                routedRegistrations.remove(input.getContext());
-
+        final var rpcRegistration = routedRegistrations.remove(input.getContext());
         if (rpcRegistration == null) {
-            return RpcResultBuilder.<UnregisterBoundConstantOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered for " + input.getContext()).buildFuture();
+            return RpcResultBuilder.<UnregisterBoundConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+                    "No prior RPC was registered for " + input.getContext())
+                .buildFuture();
         }
 
         rpcRegistration.close();
         return RpcResultBuilder.success(new UnregisterBoundConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
+    private ListenableFuture<RpcResult<RegisterSingletonConstantOutput>> registerSingletonConstant(
             final RegisterSingletonConstantInput input) {
         LOG.info("In registerSingletonConstant - input: {}", input);
 
         if (input.getConstant() == null) {
-            return RpcResultBuilder.<RegisterSingletonConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+            return RpcResultBuilder.<RegisterSingletonConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+                .buildFuture();
         }
 
         getSingletonConstantRegistration =
@@ -366,36 +351,35 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new RegisterSingletonConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
+    private ListenableFuture<RpcResult<RegisterDefaultConstantOutput>> registerDefaultConstant(
             final RegisterDefaultConstantInput input) {
         return null;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
+    private ListenableFuture<RpcResult<UnregisterConstantOutput>> unregisterConstant(
             final UnregisterConstantInput input) {
         LOG.info("In unregisterConstant");
 
         if (globalGetConstantRegistration == null) {
-            return RpcResultBuilder.<UnregisterConstantOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         globalGetConstantRegistration.close();
         globalGetConstantRegistration = null;
 
-        return Futures.immediateFuture(RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).build());
+        return RpcResultBuilder.success(new UnregisterConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
+    private ListenableFuture<RpcResult<UnregisterFlappingSingletonOutput>> unregisterFlappingSingleton(
             final UnregisterFlappingSingletonInput input) {
         LOG.info("In unregisterFlappingSingleton");
 
         if (flappingSingletonService == null) {
-            return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior RPC was registered").buildFuture();
+            return RpcResultBuilder.<UnregisterFlappingSingletonOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior RPC was registered")
+                .buildFuture();
         }
 
         final long flapCount = flappingSingletonService.setInactive();
@@ -405,72 +389,49 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
                 .buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
-        return null;
+    private ListenableFuture<RpcResult<AddShardReplicaOutput>> addShardReplica(final AddShardReplicaInput input) {
+        throw new UnsupportedOperationException();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
-        LOG.info("In subscribeDdtl");
-
-        if (ddtlReg != null) {
-            return RpcResultBuilder.<SubscribeDdtlOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already a listener registered for id-ints").buildFuture();
-        }
-
-        idIntsDdtl = new IdIntsDOMDataTreeLIstener();
-
-        try {
-            ddtlReg = domDataTreeService.registerListener(idIntsDdtl,
-                    Collections.singleton(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION,
-                            ProduceTransactionsHandler.ID_INT_YID)),
-                    true, Collections.emptyList());
-        } catch (DOMDataTreeLoopException e) {
-            LOG.error("Failed to register DOMDataTreeListener", e);
-            return RpcResultBuilder.<SubscribeDdtlOutput>failed().withError(
-                ErrorType.APPLICATION, "Failed to register DOMDataTreeListener", e).buildFuture();
-        }
-
-        return RpcResultBuilder.success(new SubscribeDdtlOutputBuilder().build()).buildFuture();
+    private ListenableFuture<RpcResult<SubscribeDdtlOutput>> subscribeDdtl(final SubscribeDdtlInput input) {
+        throw new UnsupportedOperationException();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
+    private ListenableFuture<RpcResult<RegisterBoundConstantOutput>> registerBoundConstant(
             final RegisterBoundConstantInput input) {
         LOG.info("In registerBoundConstant - input: {}", input);
 
         if (input.getContext() == null) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Context value is null").buildFuture();
+                    ErrorType.RPC, ErrorTag.INVALID_VALUE, "Context value is null").buildFuture();
         }
 
         if (input.getConstant() == null) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+                    ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null").buildFuture();
         }
 
         if (routedRegistrations.containsKey(input.getContext())) {
             return RpcResultBuilder.<RegisterBoundConstantOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already an rpc registered for context: " + input.getContext()).buildFuture();
+                ErrorTag.DATA_EXISTS, "There is already an rpc registered for context: " + input.getContext())
+                .buildFuture();
         }
 
-        final DOMRpcImplementationRegistration<RoutedGetConstantService> rpcRegistration =
-                RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
-                        input.getConstant(), input.getContext());
+        final var rpcRegistration = RoutedGetConstantService.registerNew(bindingNormalizedNodeSerializer, domRpcService,
+            input.getConstant(), input.getContext());
 
         routedRegistrations.put(input.getContext(), rpcRegistration);
         return RpcResultBuilder.success(new RegisterBoundConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
+    private ListenableFuture<RpcResult<RegisterFlappingSingletonOutput>> registerFlappingSingleton(
             final RegisterFlappingSingletonInput input) {
         LOG.info("In registerFlappingSingleton");
 
         if (flappingSingletonService != null) {
-            return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed().withError(ErrorType.RPC,
-                "data-exists", "There is already an rpc registered").buildFuture();
+            return RpcResultBuilder.<RegisterFlappingSingletonOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+                .buildFuture();
         }
 
         flappingSingletonService = new FlappingSingletonService(singletonService);
@@ -478,13 +439,13 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(new RegisterFlappingSingletonOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
+    private ListenableFuture<RpcResult<UnsubscribeDtclOutput>> unsubscribeDtcl(final UnsubscribeDtclInput input) {
         LOG.info("In unsubscribeDtcl");
 
         if (idIntsListener == null || dtclReg == null) {
-            return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(
-                    ErrorType.RPC, "data-missing", "No prior listener was registered").buildFuture();
+            return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING, "No prior listener was registered")
+                .buildFuture();
         }
 
         long timeout = 120L;
@@ -500,27 +461,29 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         dtclReg = null;
 
         if (!idIntsListener.hasTriggered()) {
-            return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "operation-failed",
-                    "id-ints listener has not received any notifications.").buildFuture();
+            return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                .withError(ErrorType.APPLICATION, ErrorTag.OPERATION_FAILED,
+                    "id-ints listener has not received any notifications.")
+                .buildFuture();
         }
 
-        final DOMDataReadOnlyTransaction rTx = domDataBroker.newReadOnlyTransaction();
-        try {
-            final Optional<NormalizedNode<?, ?>> readResult =
-                    rTx.read(CONTROLLER_CONFIG, WriteTransactionsHandler.ID_INT_YID).get();
+        try (DOMDataTreeReadTransaction rTx = domDataBroker.newReadOnlyTransaction()) {
+            final Optional<NormalizedNode> readResult = rTx.read(LogicalDatastoreType.CONFIGURATION,
+                WriteTransactionsHandler.ID_INT_YID).get();
 
             if (!readResult.isPresent()) {
-                return RpcResultBuilder.<UnsubscribeDtclOutput>failed().withError(ErrorType.APPLICATION, "data-missing",
-                        "No data read from id-ints list").buildFuture();
+                return RpcResultBuilder.<UnsubscribeDtclOutput>failed()
+                    .withError(ErrorType.APPLICATION, ErrorTag.DATA_MISSING, "No data read from id-ints list")
+                    .buildFuture();
             }
 
-            final boolean nodesEqual = idIntsListener.checkEqual(readResult.get());
+            final boolean nodesEqual = idIntsListener.checkEqual(readResult.orElseThrow());
             if (!nodesEqual) {
                 LOG.error("Final read of id-int does not match IdIntsListener's copy. {}",
-                        idIntsListener.diffWithLocalCopy(readResult.get()));
+                        idIntsListener.diffWithLocalCopy(readResult.orElseThrow()));
             }
 
-            return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual))
+            return RpcResultBuilder.success(new UnsubscribeDtclOutputBuilder().setCopyMatches(nodesEqual).build())
                     .buildFuture();
 
         } catch (final InterruptedException | ExecutionException e) {
@@ -530,46 +493,32 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<CreatePrefixShardOutput>> createPrefixShard(final CreatePrefixShardInput input) {
-        LOG.info("In createPrefixShard - input: {}", input);
-
-        return prefixShardHandler.onCreatePrefixShard(input);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<DeconfigureIdIntsShardOutput>> deconfigureIdIntsShard(
-            final DeconfigureIdIntsShardInput input) {
-        return null;
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
+    private ListenableFuture<RpcResult<UnsubscribeYnlOutput>> unsubscribeYnl(final UnsubscribeYnlInput input) {
         LOG.info("In unsubscribeYnl - input: {}", input);
 
         if (!ynlRegistrations.containsKey(input.getId())) {
-            return RpcResultBuilder.<UnsubscribeYnlOutput>failed().withError(
-                ErrorType.RPC, "data-missing", "No prior listener was registered for " + input.getId()).buildFuture();
+            return RpcResultBuilder.<UnsubscribeYnlOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_MISSING,
+                    "No prior listener was registered for " + input.getId())
+                .buildFuture();
         }
 
-        final ListenerRegistration<YnlListener> reg = ynlRegistrations.remove(input.getId());
-        final UnsubscribeYnlOutput output = reg.getInstance().getOutput();
-
-        reg.close();
-
-        return RpcResultBuilder.<UnsubscribeYnlOutput>success().withResult(output).buildFuture();
+        try (var reg = ynlRegistrations.remove(input.getId())) {
+            return RpcResultBuilder.<UnsubscribeYnlOutput>success()
+                .withResult(reg.getInstance().getOutput())
+                .buildFuture();
+        }
     }
 
-    @Override
-    public ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
+    private ListenableFuture<RpcResult<CheckPublishNotificationsOutput>> checkPublishNotifications(
             final CheckPublishNotificationsInput input) {
         LOG.info("In checkPublishNotifications - input: {}", input);
 
         final PublishNotificationsTask task = publishNotificationsTasks.get(input.getId());
 
         if (task == null) {
-            return Futures.immediateFuture(RpcResultBuilder.success(
-                    new CheckPublishNotificationsOutputBuilder().setActive(false)).build());
+            return RpcResultBuilder.success(new CheckPublishNotificationsOutputBuilder().setActive(false).build())
+                .buildFuture();
         }
 
         final CheckPublishNotificationsOutputBuilder checkPublishNotificationsOutputBuilder =
@@ -586,45 +535,20 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return RpcResultBuilder.success(output).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ProduceTransactionsOutput>> produceTransactions(
-            final ProduceTransactionsInput input) {
-        LOG.info("In produceTransactions - input: {}", input);
-        return ProduceTransactionsHandler.start(domDataTreeService, input);
-    }
-
-    @Override
-    public ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
+    private ListenableFuture<RpcResult<ShutdownShardReplicaOutput>> shutdownShardReplica(
             final ShutdownShardReplicaInput input) {
         LOG.info("In shutdownShardReplica - input: {}", input);
 
         final String shardName = input.getShardName();
         if (Strings.isNullOrEmpty(shardName)) {
-            return RpcResultBuilder.<ShutdownShardReplicaOutput>failed().withError(ErrorType.RPC, "bad-element",
-                shardName + "is not a valid shard name").buildFuture();
+            return RpcResultBuilder.<ShutdownShardReplicaOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.BAD_ELEMENT, shardName + "is not a valid shard name")
+                .buildFuture();
         }
 
         return shutdownShardGracefully(shardName, new ShutdownShardReplicaOutputBuilder().build());
     }
 
-    @Override
-    public ListenableFuture<RpcResult<ShutdownPrefixShardReplicaOutput>> shutdownPrefixShardReplica(
-            final ShutdownPrefixShardReplicaInput input) {
-        LOG.info("shutdownPrefixShardReplica - input: {}", input);
-
-        final InstanceIdentifier<?> shardPrefix = input.getPrefix();
-
-        if (shardPrefix == null) {
-            return RpcResultBuilder.<ShutdownPrefixShardReplicaOutput>failed().withError(ErrorType.RPC, "bad-element",
-                    "A valid shard prefix must be specified").buildFuture();
-        }
-
-        final YangInstanceIdentifier shardPath = bindingNormalizedNodeSerializer.toYangInstanceIdentifier(shardPrefix);
-        final String cleanPrefixShardName = ClusterUtils.getCleanShardName(shardPath);
-
-        return shutdownShardGracefully(cleanPrefixShardName, new ShutdownPrefixShardReplicaOutputBuilder().build());
-    }
-
     private <T> SettableFuture<RpcResult<T>> shutdownShardGracefully(final String shardName, final T success) {
         final SettableFuture<RpcResult<T>> rpcResult = SettableFuture.create();
         final ActorUtils context = configDataStore.getActorUtils();
@@ -632,7 +556,7 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         long timeoutInMS = Math.max(context.getDatastoreContext().getShardRaftConfig()
                 .getElectionTimeOutInterval().$times(3).toMillis(), 10000);
         final FiniteDuration duration = FiniteDuration.apply(timeoutInMS, TimeUnit.MILLISECONDS);
-        final scala.concurrent.Promise<Boolean> shutdownShardAsk = akka.dispatch.Futures.promise();
+        final scala.concurrent.Promise<Boolean> shutdownShardAsk = Futures.promise();
 
         context.findLocalShardAsync(shardName).onComplete(new OnComplete<ActorRef>() {
             @Override
@@ -662,101 +586,32 @@ public class MdsalLowLevelTestProvider implements OdlMdsalLowlevelControlService
         return rpcResult;
     }
 
-    @Override
-    public ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
+    private ListenableFuture<RpcResult<RegisterConstantOutput>> registerConstant(final RegisterConstantInput input) {
         LOG.info("In registerConstant - input: {}", input);
 
         if (input.getConstant() == null) {
-            return RpcResultBuilder.<RegisterConstantOutput>failed().withError(
-                    ErrorType.RPC, "invalid-value", "Constant value is null").buildFuture();
+            return RpcResultBuilder.<RegisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.INVALID_VALUE, "Constant value is null")
+                .buildFuture();
         }
 
         if (globalGetConstantRegistration != null) {
-            return RpcResultBuilder.<RegisterConstantOutput>failed().withError(ErrorType.RPC,
-                    "data-exists", "There is already an rpc registered").buildFuture();
+            return RpcResultBuilder.<RegisterConstantOutput>failed()
+                .withError(ErrorType.RPC, ErrorTag.DATA_EXISTS, "There is already an rpc registered")
+                .buildFuture();
         }
 
         globalGetConstantRegistration = GetConstantService.registerNew(domRpcService, input.getConstant());
         return RpcResultBuilder.success(new RegisterConstantOutputBuilder().build()).buildFuture();
     }
 
-    @Override
-    public ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
+    private ListenableFuture<RpcResult<UnregisterDefaultConstantOutput>> unregisterDefaultConstant(
             final UnregisterDefaultConstantInput input) {
-        return null;
+        throw new UnsupportedOperationException();
     }
 
-    @Override
     @SuppressWarnings("checkstyle:IllegalCatch")
-    public ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
-        LOG.info("In unsubscribeDdtl");
-
-        if (idIntsDdtl == null || ddtlReg == null) {
-            return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(
-                    ErrorType.RPC, "data-missing", "No prior listener was registered").buildFuture();
-        }
-
-        long timeout = 120L;
-        try {
-            idIntsDdtl.tryFinishProcessing().get(timeout, TimeUnit.SECONDS);
-        } catch (InterruptedException | ExecutionException | TimeoutException e) {
-            LOG.error("Unable to finish notification processing", e);
-            return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
-                    "Unable to finish notification processing in " + timeout + " seconds", e).buildFuture();
-        }
-
-        ddtlReg.close();
-        ddtlReg = null;
-
-        if (!idIntsDdtl.hasTriggered()) {
-            return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
-                    "No notification received.", "id-ints listener has not received any notifications").buildFuture();
-        }
-
-        final String shardName = ClusterUtils.getCleanShardName(ProduceTransactionsHandler.ID_INTS_YID);
-        LOG.debug("Creating distributed datastore client for shard {}", shardName);
-
-        final ActorUtils actorUtils = configDataStore.getActorUtils();
-        final Props distributedDataStoreClientProps =
-                SimpleDataStoreClientActor.props(actorUtils.getCurrentMemberName(),
-                        "Shard-" + shardName, actorUtils, shardName);
-
-        final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
-        final DataStoreClient distributedDataStoreClient;
-        try {
-            distributedDataStoreClient = SimpleDataStoreClientActor
-                    .getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
-        } catch (RuntimeException e) {
-            LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
-            clientActor.tell(PoisonPill.getInstance(), noSender());
-            return RpcResultBuilder.<UnsubscribeDdtlOutput>failed()
-                    .withError(ErrorType.APPLICATION, "Unable to create DataStoreClient for read", e).buildFuture();
-        }
-
-        final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
-        final ClientTransaction tx = localHistory.createTransaction();
-        final ListenableFuture<java.util.Optional<NormalizedNode<?, ?>>> read =
-                tx.read(YangInstanceIdentifier.of(ProduceTransactionsHandler.ID_INT));
-
-        tx.abort();
-        localHistory.close();
-        try {
-            final java.util.Optional<NormalizedNode<?, ?>> optional = read.get();
-            if (!optional.isPresent()) {
-                return RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withError(ErrorType.APPLICATION,
-                        "data-missing", "Final read from id-ints is empty").buildFuture();
-            }
-
-            return RpcResultBuilder.success(new UnsubscribeDdtlOutputBuilder().setCopyMatches(
-                    idIntsDdtl.checkEqual(optional.get()))).buildFuture();
-
-        } catch (InterruptedException | ExecutionException e) {
-            LOG.error("Unable to read data to verify ddtl data", e);
-            return RpcResultBuilder.<UnsubscribeDdtlOutput>failed()
-                    .withError(ErrorType.APPLICATION, "Final read from id-ints failed", e).buildFuture();
-        } finally {
-            distributedDataStoreClient.close();
-            clientActor.tell(PoisonPill.getInstance(), noSender());
-        }
+    private ListenableFuture<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl(final UnsubscribeDdtlInput input) {
+        throw new UnsupportedOperationException();
     }
 }
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java
deleted file mode 100644 (file)
index 457d819..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.clustering.it.provider;
-
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutput;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarOutputBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PurchaseCarProvider.class);
-
-    private NotificationProviderService notificationProvider;
-
-
-    public void setNotificationProvider(final NotificationProviderService salService) {
-        this.notificationProvider = salService;
-    }
-
-
-    @Override
-    public ListenableFuture<RpcResult<BuyCarOutput>> buyCar(final BuyCarInput input) {
-        LOG.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
-        final SettableFuture<RpcResult<BuyCarOutput>> futureResult = SettableFuture.create();
-        CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
-        carBoughtBuilder.setCarId(input.getCarId());
-        carBoughtBuilder.setPersonId(input.getPersonId());
-        notificationProvider.publish(carBoughtBuilder.build());
-        futureResult.set(RpcResultBuilder.success(new BuyCarOutputBuilder().build()).build());
-        return futureResult;
-    }
-
-    @Override
-    public void close() {
-    }
-}
index 1a0f39e6936dec0152da35cb7f96ee8e832be959..e821a2b5769211d53ad5cbcc2a43d58ba1f15e31 100644 (file)
@@ -8,18 +8,21 @@
 package org.opendaylight.controller.clustering.it.provider.impl;
 
 import com.google.common.base.Stopwatch;
+import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.TransactionsParams;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -57,19 +60,20 @@ abstract class AbstractTransactionHandler {
     static final long INIT_TX_TIMEOUT_SECONDS = 125;
 
     private static final long DEAD_TIMEOUT_SECONDS = TimeUnit.MINUTES.toSeconds(15);
+    private static final AtomicLong COUNTER = new AtomicLong();
 
     /*
      * writingExecutor is a single thread executor. Only this thread will write to datastore,
      * incurring sleep penalties if backend is not responsive. This thread never changes, but reads State.
      * This thread only adds to futures set.
      */
-    private final ScheduledExecutorService writingExecutor = FinalizableScheduledExecutorService.newSingleThread();
+    private final ScheduledExecutorService writingExecutor = newExecutorService("writing");
     /*
      * completingExecutor is a single thread executor. Only this thread writes to State.
      * This thread should never incur any sleep penalty, so RPC response should always come on time.
      * This thread only removes from futures set.
      */
-    private final ScheduledExecutorService completingExecutor = FinalizableScheduledExecutorService.newSingleThread();
+    private final ScheduledExecutorService completingExecutor = newExecutorService("completing");
     private final Collection<ListenableFuture<?>> futures = Collections.synchronizedSet(new HashSet<>());
     private final Stopwatch stopwatch = Stopwatch.createUnstarted();
     private final long runtimeNanos;
@@ -81,8 +85,8 @@ abstract class AbstractTransactionHandler {
     private volatile State state;
 
     AbstractTransactionHandler(final TransactionsParams params) {
-        runtimeNanos = TimeUnit.SECONDS.toNanos(params.getSeconds());
-        delayNanos = SECOND_AS_NANO / params.getTransactionsPerSecond();
+        runtimeNanos = TimeUnit.SECONDS.toNanos(params.getSeconds().toJava());
+        delayNanos = SECOND_AS_NANO / params.getTransactionsPerSecond().toJava();
     }
 
     final synchronized void doStart() {
@@ -119,14 +123,14 @@ abstract class AbstractTransactionHandler {
 
         // Not completed yet: create a transaction and hook it up
         final long txId = txCounter.incrementAndGet();
-        final ListenableFuture<?> execFuture = execWrite(txId);
+        final FluentFuture<? extends CommitInfo> execFuture = execWrite(txId);
         LOG.debug("New future #{} allocated", txId);
 
         // Ordering is important: we need to add the future before hooking the callback
         futures.add(execFuture);
-        Futures.addCallback(execFuture, new FutureCallback<Object>() {
+        execFuture.addCallback(new FutureCallback<CommitInfo>() {
             @Override
-            public void onSuccess(final Object result) {
+            public void onSuccess(final CommitInfo result) {
                 txSuccess(execFuture, txId);
             }
 
@@ -152,6 +156,7 @@ abstract class AbstractTransactionHandler {
             state = State.SUCCESSFUL;
             completingFuture.cancel(false);
             runSuccessful(txCounter.get());
+            shutdownExecutors();
             return true;
         }
 
@@ -190,6 +195,7 @@ abstract class AbstractTransactionHandler {
                 state = State.FAILED;
                 writingFuture.cancel(false);
                 runFailed(cause, txId);
+                shutdownExecutors();
                 break;
             default:
                 throw new IllegalStateException("Unhandled state " + local);
@@ -223,13 +229,29 @@ abstract class AbstractTransactionHandler {
 
         state = State.FAILED;
         runTimedOut("Transactions did not finish in " + DEAD_TIMEOUT_SECONDS + " seconds");
+        shutdownExecutors();
     }
 
-    abstract ListenableFuture<?> execWrite(long txId);
+    private void shutdownExecutors() {
+        writingExecutor.shutdown();
+        completingExecutor.shutdown();
+    }
+
+    abstract FluentFuture<? extends CommitInfo> execWrite(long txId);
 
     abstract void runFailed(Throwable cause, long txId);
 
     abstract void runSuccessful(long allTx);
 
     abstract void runTimedOut(String cause);
+
+    private ScheduledExecutorService newExecutorService(final String kind) {
+        final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder()
+            .setDaemon(true)
+            .setNameFormat(getClass().getSimpleName() + "-" + kind + "-" + COUNTER.getAndIncrement() + "%d")
+            .build());
+        executor.setKeepAliveTime(15, TimeUnit.SECONDS);
+        executor.allowCoreThreadTimeOut(true);
+        return executor;
+    }
 }
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/FinalizableScheduledExecutorService.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/FinalizableScheduledExecutorService.java
deleted file mode 100644 (file)
index f087303..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2017 Pantheon Technologies, s.r.o. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A simple ScheduledExecutorService, which shuts down its threads after a period of inactivity. It is safe to not
- * shutdown this
- *
- * @author Robert Varga
- */
-final class FinalizableScheduledExecutorService extends ScheduledThreadPoolExecutor {
-
-    private FinalizableScheduledExecutorService(final int maxThreads, final long time, final TimeUnit unit) {
-        super(maxThreads);
-        setKeepAliveTime(time, unit);
-        allowCoreThreadTimeOut(true);
-    }
-
-    static ScheduledThreadPoolExecutor newSingleThread() {
-        return new FinalizableScheduledExecutorService(1, 15, TimeUnit.SECONDS);
-    }
-
-    // This is a bit ugly, but allows
-    @Override
-    @SuppressWarnings("checkstyle:NoFinalizer")
-    protected void finalize() {
-        super.finalize();
-        super.shutdownNow();
-    }
-}
index 90ce618f3d14a403146e44a1c4ed07401eea2f17..afd2d3d0ecacefebffeadd90839c7de60484b439 100644 (file)
@@ -5,37 +5,35 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider.impl;
 
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class FlappingSingletonService implements ClusterSingletonService {
-
+public final class FlappingSingletonService implements ClusterSingletonService {
     private static final Logger LOG = LoggerFactory.getLogger(FlappingSingletonService.class);
-
     private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
-            ServiceGroupIdentifier.create("flapping-singleton-service");
+            new ServiceGroupIdentifier("flapping-singleton-service");
 
     private final ClusterSingletonServiceProvider singletonServiceProvider;
     private final AtomicBoolean active = new AtomicBoolean(true);
-
     private final AtomicLong flapCount = new AtomicLong();
-    private volatile ClusterSingletonServiceRegistration registration;
+
+    private volatile Registration registration;
 
     public FlappingSingletonService(final ClusterSingletonServiceProvider singletonServiceProvider) {
         LOG.debug("Registering flapping-singleton-service.");
-
-        this.singletonServiceProvider = singletonServiceProvider;
+        this.singletonServiceProvider = requireNonNull(singletonServiceProvider);
         registration = singletonServiceProvider.registerClusterSingletonService(this);
     }
 
index 458e2ed818944515c042b29ff110920ca8c11d54..54320705ac1c9ea68640703a2472ed75814c11d4 100644 (file)
@@ -7,36 +7,32 @@
  */
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.YangConstants;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class GetConstantService implements DOMRpcImplementation {
-
     private static final Logger LOG = LoggerFactory.getLogger(GetConstantService.class);
 
-    private static final QName OUTPUT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "output");
-    private static final QName CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "constant");
-    private static final QName GET_CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "get-constant");
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
+
+    private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
+    private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
+    private static final QName GET_CONSTANT = QName.create(MODULE, "get-constant").intern();
 
     private final String constant;
 
@@ -44,30 +40,19 @@ public final class GetConstantService implements DOMRpcImplementation {
         this.constant = constant;
     }
 
-    public static DOMRpcImplementationRegistration<GetConstantService> registerNew(
-            final DOMRpcProviderService rpcProviderService, final String constant) {
-
+    public static Registration registerNew(final DOMRpcProviderService rpcProviderService, final String constant) {
         LOG.debug("Registering get-constant service, constant value: {}", constant);
-        final DOMRpcIdentifier id = DOMRpcIdentifier.create(SchemaPath.create(true, GET_CONSTANT));
-
-        return rpcProviderService.registerRpcImplementation(new GetConstantService(constant), id);
+        return rpcProviderService.registerRpcImplementation(new GetConstantService(constant),
+            DOMRpcIdentifier.create(GET_CONSTANT));
     }
 
     @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final DOMRpcIdentifier rpc,
-                                                                  final NormalizedNode<?, ?> input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-constant invoked, current value: {}", constant);
 
-        final LeafNode<Object> value = ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build();
-
-        final ContainerNode result = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(OUTPUT))
-                .withChild(value)
-                .build();
-
-        return Futures.immediateCheckedFuture(new DefaultDOMRpcResult(result));
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(OUTPUT))
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
+            .build()));
     }
 }
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsDOMDataTreeLIstener.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/IdIntsDOMDataTreeLIstener.java
deleted file mode 100644 (file)
index 574952e..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
-import java.util.Map;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListener;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeListeningException;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class IdIntsDOMDataTreeLIstener implements DOMDataTreeListener {
-
-    private static final Logger LOG = LoggerFactory.getLogger(IdIntsDOMDataTreeLIstener.class);
-    private static final long SECOND_AS_NANO = 1000000000;
-
-    private NormalizedNode<?, ?> localCopy = null;
-    private final AtomicLong lastNotifTimestamp = new AtomicLong(0);
-    private ScheduledFuture<?> scheduledFuture;
-    private ScheduledExecutorService executorService;
-
-    @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes,
-                                  final Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> subtrees) {
-
-        // There should only be one candidate reported
-        Preconditions.checkState(changes.size() == 1);
-
-        lastNotifTimestamp.set(System.nanoTime());
-
-        // do not log the change into debug, only use trace since it will lead to OOM on default heap settings
-        LOG.debug("Received data tree changed");
-
-        changes.forEach(change -> {
-            if (change.getRootNode().getDataAfter().isPresent()) {
-                LOG.trace("Received change, data before: {}, data after: {}",
-                        change.getRootNode().getDataBefore().isPresent()
-                                ? change.getRootNode().getDataBefore().get() : "",
-                        change.getRootNode().getDataAfter().get());
-
-                if (localCopy == null || checkEqual(change.getRootNode().getDataBefore().get())) {
-                    localCopy = change.getRootNode().getDataAfter().get();
-                } else {
-                    LOG.warn("Ignoring notification.");
-                    LOG.trace("Ignored notification content: {}", change);
-                }
-            } else {
-                LOG.warn("getDataAfter() is missing from notification. change: {}", change);
-            }
-        });
-    }
-
-    @Override
-    public void onDataTreeFailed(Collection<DOMDataTreeListeningException> causes) {
-
-    }
-
-    public boolean hasTriggered() {
-        return localCopy != null;
-    }
-
-    public Future<Void> tryFinishProcessing() {
-        executorService = Executors.newSingleThreadScheduledExecutor();
-        final SettableFuture<Void> settableFuture = SettableFuture.create();
-
-        scheduledFuture = executorService.scheduleAtFixedRate(new CheckFinishedTask(settableFuture),
-                0, 1, TimeUnit.SECONDS);
-        return settableFuture;
-    }
-
-    public boolean checkEqual(final NormalizedNode<?, ?> expected) {
-        return localCopy.equals(expected);
-    }
-
-    private class CheckFinishedTask implements Runnable {
-
-        private final SettableFuture<Void> future;
-
-        CheckFinishedTask(final SettableFuture<Void> future) {
-            this.future = future;
-        }
-
-        @Override
-        public void run() {
-            if (System.nanoTime() - lastNotifTimestamp.get() > SECOND_AS_NANO * 4) {
-                scheduledFuture.cancel(false);
-                future.set(null);
-
-                executorService.shutdown();
-            }
-        }
-    }
-}
index 2727f529b255c102f2f46557dfa6f55d83245858..70f6f7811421164534b6b0b3d78728d97786f2b1 100644 (file)
@@ -7,45 +7,49 @@
  */
 package org.opendaylight.controller.clustering.it.provider.impl;
 
+import static com.google.common.base.Preconditions.checkState;
 import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ITEM;
 
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.SettableFuture;
 import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import java.util.Collection;
 import java.util.HashMap;
-import java.util.Map;
+import java.util.List;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.md.sal.dom.api.ClusteredDOMDataTreeChangeListener;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
-
+public final class IdIntsListener implements DOMDataTreeChangeListener {
     private static final Logger LOG = LoggerFactory.getLogger(IdIntsListener.class);
     private static final long SECOND_AS_NANO = 1000000000;
 
-    private volatile NormalizedNode<?, ?> localCopy;
     private final AtomicLong lastNotifTimestamp = new AtomicLong(0);
-    private ScheduledExecutorService executorService;
-    private ScheduledFuture<?> scheduledFuture;
+    private ScheduledExecutorService executorService = null;
+    private ScheduledFuture<?> scheduledFuture = null;
+
+    private volatile NormalizedNode localCopy;
 
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeCandidate> changes) {
+    public void onInitialData() {
+        // Intentional no-op
+    }
+
+    @Override
+    public void onDataTreeChanged(final List<DataTreeCandidate> changes) {
 
         // There should only be one candidate reported
-        Preconditions.checkState(changes.size() == 1);
+        checkState(changes.size() == 1);
 
         lastNotifTimestamp.set(System.nanoTime());
 
@@ -53,13 +57,12 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
         LOG.debug("Received data tree changed");
 
         changes.forEach(change -> {
-            if (change.getRootNode().getDataAfter().isPresent()) {
-                LOG.trace("Received change, data before: {}, data after: {}",
-                        change.getRootNode().getDataBefore().isPresent()
-                                ? change.getRootNode().getDataBefore().get() : "",
-                        change.getRootNode().getDataAfter().get());
-
-                localCopy = change.getRootNode().getDataAfter().get();
+            final var root = change.getRootNode();
+            final var after = root.dataAfter();
+            if (after != null) {
+                final var before = root.dataBefore();
+                LOG.trace("Received change, data before: {}, data after: {}", before != null ? before : "", after);
+                localCopy = after;
             } else {
                 LOG.warn("getDataAfter() is missing from notification. change: {}", change);
             }
@@ -70,18 +73,18 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
         return localCopy != null;
     }
 
-    public boolean checkEqual(final NormalizedNode<?, ?> expected) {
+    public boolean checkEqual(final NormalizedNode expected) {
         return localCopy.equals(expected);
     }
 
     @SuppressFBWarnings("BC_UNCONFIRMED_CAST")
-    public String diffWithLocalCopy(final NormalizedNode<?, ?> expected) {
+    public String diffWithLocalCopy(final NormalizedNode expected) {
         return diffNodes((MapNode)expected, (MapNode)localCopy);
     }
 
     public Future<Void> tryFinishProcessing() {
         executorService = Executors.newSingleThreadScheduledExecutor();
-        final SettableFuture<Void> settableFuture = SettableFuture.create();
+        final var settableFuture = SettableFuture.<Void>create();
 
         scheduledFuture = executorService.scheduleAtFixedRate(new CheckFinishedTask(settableFuture),
                 0, 1, TimeUnit.SECONDS);
@@ -91,43 +94,42 @@ public class IdIntsListener implements ClusteredDOMDataTreeChangeListener {
     public static String diffNodes(final MapNode expected, final MapNode actual) {
         StringBuilder builder = new StringBuilder("MapNodes diff:");
 
-        final YangInstanceIdentifier.NodeIdentifier itemNodeId = new YangInstanceIdentifier.NodeIdentifier(ITEM);
+        final var itemNodeId = new NodeIdentifier(ITEM);
 
-        Map<NodeIdentifierWithPredicates, MapEntryNode> expIdIntMap = new HashMap<>();
-        expected.getValue().forEach(node -> expIdIntMap.put(node.getIdentifier(), node));
+        final var expIdIntMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+        expected.body().forEach(node -> expIdIntMap.put(node.name(), node));
 
-        actual.getValue().forEach(actIdInt -> {
-            final MapEntryNode expIdInt = expIdIntMap.remove(actIdInt.getIdentifier());
+        actual.body().forEach(actIdInt -> {
+            final var expIdInt = expIdIntMap.remove(actIdInt.name());
             if (expIdInt == null) {
-                builder.append('\n').append("  Unexpected id-int entry for ").append(actIdInt.getIdentifier());
+                builder.append('\n').append("  Unexpected id-int entry for ").append(actIdInt.name());
                 return;
             }
 
-            Map<NodeIdentifierWithPredicates, MapEntryNode> expItemMap = new HashMap<>();
-            ((MapNode)expIdInt.getChild(itemNodeId).get()).getValue()
-                .forEach(node -> expItemMap.put(node.getIdentifier(), node));
+            final var expItemMap = new HashMap<NodeIdentifierWithPredicates, MapEntryNode>();
+            ((MapNode)expIdInt.getChildByArg(itemNodeId)).body()
+                .forEach(node -> expItemMap.put(node.name(), node));
 
-            ((MapNode)actIdInt.getChild(itemNodeId).get()).getValue().forEach(actItem -> {
-                final MapEntryNode expItem = expItemMap.remove(actItem.getIdentifier());
+            ((MapNode)actIdInt.getChildByArg(itemNodeId)).body().forEach(actItem -> {
+                final var expItem = expItemMap.remove(actItem.name());
                 if (expItem == null) {
-                    builder.append('\n').append("  Unexpected item entry ").append(actItem.getIdentifier())
-                        .append(" for id-int entry ").append(actIdInt.getIdentifier());
+                    builder.append('\n').append("  Unexpected item entry ").append(actItem.name())
+                        .append(" for id-int entry ").append(actIdInt.name());
                 }
             });
 
             expItemMap.values().forEach(node -> builder.append('\n')
-                .append("  Actual is missing item entry ").append(node.getIdentifier())
-                    .append(" for id-int entry ").append(actIdInt.getIdentifier()));
+                .append("  Actual is missing item entry ").append(node.name())
+                    .append(" for id-int entry ").append(actIdInt.name()));
         });
 
         expIdIntMap.values().forEach(node -> builder.append('\n')
-            .append("  Actual is missing id-int entry for ").append(node.getIdentifier()));
+            .append("  Actual is missing id-int entry for ").append(node.name()));
 
         return builder.toString();
     }
 
-    private class CheckFinishedTask implements Runnable {
-
+    private final class CheckFinishedTask implements Runnable {
         private final SettableFuture<Void> future;
 
         CheckFinishedTask(final SettableFuture<Void> future) {
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixLeaderHandler.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixLeaderHandler.java
deleted file mode 100644 (file)
index 0d8391a..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import java.util.Collections;
-import java.util.concurrent.CompletionStage;
-import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
-import org.opendaylight.controller.cluster.dom.api.CDSDataTreeProducer;
-import org.opendaylight.controller.cluster.dom.api.CDSShardAccess;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.BecomePrefixLeaderOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PrefixLeaderHandler {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PrefixLeaderHandler.class);
-
-    private final DOMDataTreeService domDataTreeService;
-    private final BindingNormalizedNodeSerializer serializer;
-
-    public PrefixLeaderHandler(final DOMDataTreeService domDataTreeService,
-                               final BindingNormalizedNodeSerializer serializer) {
-        this.domDataTreeService = domDataTreeService;
-        this.serializer = serializer;
-    }
-
-    public ListenableFuture<RpcResult<BecomePrefixLeaderOutput>> makeLeaderLocal(final BecomePrefixLeaderInput input) {
-
-        final YangInstanceIdentifier yid = serializer.toYangInstanceIdentifier(input.getPrefix());
-        final DOMDataTreeIdentifier prefix = new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, yid);
-
-        try (CDSDataTreeProducer producer =
-                     (CDSDataTreeProducer) domDataTreeService.createProducer(Collections.singleton(prefix))) {
-
-            final CDSShardAccess shardAccess = producer.getShardAccess(prefix);
-
-            final CompletionStage<Void> completionStage = shardAccess.makeLeaderLocal();
-
-            completionStage.exceptionally(throwable -> {
-                LOG.error("Leader movement failed.", throwable);
-                return null;
-            });
-        } catch (final DOMDataTreeProducerException e) {
-            LOG.warn("Error while closing producer", e);
-        } catch (final TimeoutException e) {
-            LOG.warn("Timeout while on producer operation", e);
-            Futures.immediateFuture(RpcResultBuilder.failed().withError(RpcError.ErrorType.RPC,
-                    "resource-denied-transport", "Timeout while opening producer please retry.", "clustering-it",
-                    "clustering-it", e));
-        }
-
-        return Futures.immediateFuture(RpcResultBuilder.success(new BecomePrefixLeaderOutputBuilder().build()).build());
-    }
-}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixShardHandler.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/PrefixShardHandler.java
deleted file mode 100644 (file)
index be35234..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID_INT;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ID_INTS;
-import static org.opendaylight.controller.clustering.it.provider.impl.AbstractTransactionHandler.ITEM;
-
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.CompletionStage;
-import java.util.stream.Collectors;
-import org.opendaylight.controller.cluster.access.concepts.MemberName;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory;
-import org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration;
-import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.CreatePrefixShardOutputBuilder;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.RemovePrefixShardOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class PrefixShardHandler {
-
-    private static final Logger LOG = LoggerFactory.getLogger(PrefixShardHandler.class);
-    private static final int MAX_PREFIX = 4;
-    private static final String PREFIX_TEMPLATE = "prefix-";
-
-    private final DistributedShardFactory shardFactory;
-    private final DOMDataTreeService domDataTreeService;
-    private final BindingNormalizedNodeSerializer serializer;
-
-    private final Map<YangInstanceIdentifier, DistributedShardRegistration> registrations =
-            Collections.synchronizedMap(new HashMap<>());
-
-    public PrefixShardHandler(final DistributedShardFactory shardFactory,
-                              final DOMDataTreeService domDataTreeService,
-                              final BindingNormalizedNodeSerializer serializer) {
-
-        this.shardFactory = shardFactory;
-        this.domDataTreeService = domDataTreeService;
-        this.serializer = serializer;
-    }
-
-    public ListenableFuture<RpcResult<CreatePrefixShardOutput>> onCreatePrefixShard(
-            final CreatePrefixShardInput input) {
-
-        final SettableFuture<RpcResult<CreatePrefixShardOutput>> future = SettableFuture.create();
-
-        final CompletionStage<DistributedShardRegistration> completionStage;
-        final YangInstanceIdentifier identifier = serializer.toYangInstanceIdentifier(input.getPrefix());
-
-        try {
-            completionStage = shardFactory.createDistributedShard(
-                    new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, identifier),
-                    input.getReplicas().stream().map(MemberName::forName).collect(Collectors.toList()));
-
-            completionStage.thenAccept(registration -> {
-                LOG.debug("Shard[{}] created successfully.", identifier);
-                registrations.put(identifier, registration);
-
-                final ListenableFuture<?> ensureFuture = ensureListExists();
-                Futures.addCallback(ensureFuture, new FutureCallback<Object>() {
-                    @Override
-                    public void onSuccess(final Object result) {
-                        LOG.debug("Initial list write successful.");
-                        future.set(RpcResultBuilder.success(new CreatePrefixShardOutputBuilder().build()).build());
-                    }
-
-                    @Override
-                    public void onFailure(final Throwable throwable) {
-                        LOG.warn("Shard[{}] creation failed:", identifier, throwable);
-
-                        final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION,
-                                "create-shard-failed", "Shard creation failed", "cluster-test-app", "", throwable);
-                        future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
-                    }
-                }, MoreExecutors.directExecutor());
-            });
-            completionStage.exceptionally(throwable -> {
-                LOG.warn("Shard[{}] creation failed:", identifier, throwable);
-
-                final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed",
-                        "Shard creation failed", "cluster-test-app", "", throwable);
-                future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
-                return null;
-            });
-        } catch (final DOMDataTreeShardingConflictException e) {
-            LOG.warn("Unable to register shard for: {}.", identifier);
-
-            final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed",
-                    "Sharding conflict", "cluster-test-app", "", e);
-            future.set(RpcResultBuilder.<CreatePrefixShardOutput>failed().withRpcError(error).build());
-        }
-
-        return future;
-    }
-
-    public ListenableFuture<RpcResult<RemovePrefixShardOutput>> onRemovePrefixShard(
-            final RemovePrefixShardInput input) {
-
-        final YangInstanceIdentifier identifier = serializer.toYangInstanceIdentifier(input.getPrefix());
-        final DistributedShardRegistration registration = registrations.get(identifier);
-
-        if (registration == null) {
-            final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "registration-missing",
-                    "No shard registered at this prefix.");
-            return Futures.immediateFuture(RpcResultBuilder.<RemovePrefixShardOutput>failed().withRpcError(error)
-                .build());
-        }
-
-        final SettableFuture<RpcResult<RemovePrefixShardOutput>> future = SettableFuture.create();
-
-        final CompletionStage<Void> close = registration.close();
-        close.thenRun(() -> future.set(RpcResultBuilder.success(new RemovePrefixShardOutputBuilder().build()).build()));
-        close.exceptionally(throwable -> {
-            LOG.warn("Shard[{}] removal failed:", identifier, throwable);
-
-            final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "remove-shard-failed",
-                    "Shard removal failed", "cluster-test-app", "", throwable);
-            future.set(RpcResultBuilder.<RemovePrefixShardOutput>failed().withRpcError(error).build());
-            return null;
-        });
-
-        return future;
-    }
-
-    private ListenableFuture<?> ensureListExists() {
-
-        final CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = ImmutableNodes.mapNodeBuilder(ID_INT);
-
-        // hardcoded initial list population for parallel produce-transactions testing on multiple nodes
-        for (int i = 1; i < MAX_PREFIX; i++) {
-            mapBuilder.withChild(
-                    ImmutableNodes.mapEntryBuilder(ID_INT, ID, PREFIX_TEMPLATE + i)
-                            .withChild(ImmutableNodes.mapNodeBuilder(ITEM).build())
-                            .build());
-        }
-        final MapNode mapNode = mapBuilder.build();
-
-        final ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(ID_INTS))
-                .withChild(mapNode)
-                .build();
-
-        final DOMDataTreeProducer producer = domDataTreeService.createProducer(Collections.singleton(
-                new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY)));
-
-        final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
-
-        final DOMDataTreeWriteCursor cursor =
-                tx.createCursor(new DOMDataTreeIdentifier(
-                        LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY));
-
-        cursor.merge(containerNode.getIdentifier(), containerNode);
-        cursor.close();
-
-        final ListenableFuture<?> future = tx.commit();
-        Futures.addCallback(future, new FutureCallback<Object>() {
-            @Override
-            public void onSuccess(final Object result) {
-                try {
-                    LOG.debug("Closing producer for initial list.");
-                    producer.close();
-                } catch (DOMDataTreeProducerException e) {
-                    LOG.warn("Error while closing producer.", e);
-                }
-            }
-
-            @Override
-            public void onFailure(final Throwable throwable) {
-                //NOOP handled by the caller of this method.
-            }
-        }, MoreExecutors.directExecutor());
-        return future;
-    }
-}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/ProduceTransactionsHandler.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/impl/ProduceTransactionsHandler.java
deleted file mode 100644 (file)
index db29583..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (c) 2017 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.clustering.it.provider.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FluentFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.SplittableRandom;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import org.eclipse.jdt.annotation.NonNull;
-import org.opendaylight.mdsal.common.api.CommitInfo;
-import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCursorAwareTransaction;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducer;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeProducerException;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeService;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteCursor;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsInput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutput;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.ProduceTransactionsOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
-import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
-import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class ProduceTransactionsHandler extends AbstractTransactionHandler {
-    private static final Logger LOG = LoggerFactory.getLogger(ProduceTransactionsHandler.class);
-
-    private final SettableFuture<RpcResult<ProduceTransactionsOutput>> future = SettableFuture.create();
-    private final SplittableRandom random = new SplittableRandom();
-    private final Set<Integer> usedValues = new HashSet<>();
-    private final DOMDataTreeIdentifier idListItem;
-    private final DOMDataTreeProducer itemProducer;
-
-    private long insertTx = 0;
-    private long deleteTx = 0;
-
-    private ProduceTransactionsHandler(final DOMDataTreeProducer producer, final DOMDataTreeIdentifier idListItem,
-            final ProduceTransactionsInput input) {
-        super(input);
-        this.itemProducer = Preconditions.checkNotNull(producer);
-        this.idListItem = Preconditions.checkNotNull(idListItem);
-    }
-
-    public static ListenableFuture<RpcResult<ProduceTransactionsOutput>> start(
-            final DOMDataTreeService domDataTreeService, final ProduceTransactionsInput input) {
-        final String id = input.getId();
-        LOG.debug("Filling the item list {} with initial values.", id);
-
-        final YangInstanceIdentifier idListWithKey = ID_INT_YID.node(new NodeIdentifierWithPredicates(ID_INT, ID, id));
-
-        final DOMDataTreeProducer itemProducer = domDataTreeService.createProducer(
-            Collections.singleton(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey)));
-
-        final DOMDataTreeCursorAwareTransaction tx = itemProducer.createTransaction(false);
-        final DOMDataTreeWriteCursor cursor =
-                tx.createCursor(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey));
-
-        final MapNode list = ImmutableNodes.mapNodeBuilder(ITEM).build();
-        cursor.write(list.getIdentifier(), list);
-        cursor.close();
-
-        try {
-            tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-        } catch (InterruptedException | ExecutionException | TimeoutException e) {
-            LOG.warn("Unable to fill the initial item list.", e);
-            closeProducer(itemProducer);
-
-            return Futures.immediateFuture(RpcResultBuilder.<ProduceTransactionsOutput>failed()
-                .withError(RpcError.ErrorType.APPLICATION, "Unexpected-exception", e).build());
-        }
-
-        final ProduceTransactionsHandler handler = new ProduceTransactionsHandler(itemProducer,
-            new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, idListWithKey.node(list.getIdentifier())
-                .toOptimized()), input);
-        // It is handler's responsibility to close itemProducer when the work is finished.
-        handler.doStart();
-        return handler.future;
-    }
-
-    private static void closeProducer(final DOMDataTreeProducer producer) {
-        try {
-            producer.close();
-        } catch (final DOMDataTreeProducerException exception) {
-            LOG.warn("Failure while closing producer.", exception);
-        }
-    }
-
-    @Override
-    FluentFuture<? extends @NonNull CommitInfo> execWrite(final long txId) {
-        final int i = random.nextInt(MAX_ITEM + 1);
-        final DOMDataTreeCursorAwareTransaction tx = itemProducer.createTransaction(false);
-        final DOMDataTreeWriteCursor cursor = tx.createCursor(idListItem);
-
-        final NodeIdentifierWithPredicates entryId = new NodeIdentifierWithPredicates(ITEM, NUMBER, i);
-        if (usedValues.contains(i)) {
-            LOG.debug("Deleting item: {}", i);
-            deleteTx++;
-            cursor.delete(entryId);
-            usedValues.remove(i);
-
-        } else {
-            LOG.debug("Inserting item: {}", i);
-            insertTx++;
-
-            final MapEntryNode entry = ImmutableNodes.mapEntryBuilder().withNodeIdentifier(entryId)
-                    .withChild(ImmutableNodes.leafNode(NUMBER, i)).build();
-            cursor.write(entryId, entry);
-            usedValues.add(i);
-        }
-
-        cursor.close();
-
-        return tx.commit();
-    }
-
-    @Override
-    void runFailed(final Throwable cause, final long txId) {
-        closeProducer(itemProducer);
-        future.set(RpcResultBuilder.<ProduceTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
-    }
-
-    @Override
-    void runSuccessful(final long allTx) {
-        closeProducer(itemProducer);
-        final ProduceTransactionsOutput output = new ProduceTransactionsOutputBuilder()
-                .setAllTx(allTx)
-                .setInsertTx(insertTx)
-                .setDeleteTx(deleteTx)
-                .build();
-        future.set(RpcResultBuilder.<ProduceTransactionsOutput>success()
-                .withResult(output).build());
-    }
-
-    @Override
-    void runTimedOut(final String cause) {
-        closeProducer(itemProducer);
-        future.set(RpcResultBuilder.<ProduceTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, cause).build());
-    }
-}
index bd755d40c8b24fc87cb1ff9b608ca1b0970fb8c1..043325fea285b2bbbf78db62c98e4545862683e9 100644 (file)
@@ -5,24 +5,24 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.Objects.requireNonNull;
+
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
-import org.opendaylight.controller.md.sal.binding.api.NotificationPublishService;
+import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequenceBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class PublishNotificationsTask implements Runnable {
-
+public final class PublishNotificationsTask implements Runnable {
     private static final Logger LOG = LoggerFactory.getLogger(PublishNotificationsTask.class);
-    private static final int SECOND_AS_NANO = 1000000000;
+    private static final int SECOND_AS_NANO = 1_000_000_000;
 
     private final NotificationPublishService notificationPublishService;
     private final String notificationId;
@@ -39,15 +39,13 @@ public class PublishNotificationsTask implements Runnable {
 
     public PublishNotificationsTask(final NotificationPublishService notificationPublishService,
                                     final String notificationId, final long secondsToTake, final long maxPerSecond) {
-        Preconditions.checkNotNull(notificationPublishService);
-        Preconditions.checkNotNull(notificationId);
-        Preconditions.checkArgument(secondsToTake > 0);
-        Preconditions.checkArgument(maxPerSecond > 0);
 
-        this.notificationPublishService = notificationPublishService;
-        this.notificationId = notificationId;
-        this.timeToTake = secondsToTake * SECOND_AS_NANO;
-        this.delay = SECOND_AS_NANO / maxPerSecond;
+        this.notificationPublishService = requireNonNull(notificationPublishService);
+        this.notificationId = requireNonNull(notificationId);
+        checkArgument(secondsToTake > 0);
+        timeToTake = secondsToTake * SECOND_AS_NANO;
+        checkArgument(maxPerSecond > 0);
+        delay = SECOND_AS_NANO / maxPerSecond;
 
         LOG.debug("Delay : {}", delay);
     }
@@ -76,7 +74,7 @@ public class PublishNotificationsTask implements Runnable {
         LOG.debug("current {}, starttime: {}, timetotake: {}, current-start = {}",
                 current, startTime, timeToTake, current - startTime);
 
-        if ((current - startTime) > timeToTake) {
+        if (current - startTime > timeToTake) {
             LOG.debug("Sequence number: {}", sequenceNumber);
             scheduledFuture.cancel(false);
             executor.shutdown();
index 6f90c8c947eaa81dca524c62d93018a93e10eb81..e3c1b20ff38ac19d641ec06327639718d6950a95 100644 (file)
@@ -7,41 +7,33 @@
  */
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer;
+import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.YangConstants;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class RoutedGetConstantService implements DOMRpcImplementation {
-
     private static final Logger LOG = LoggerFactory.getLogger(RoutedGetConstantService.class);
 
-    private static final QName OUTPUT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "output");
-    private static final QName CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "constant");
-    private static final QName CONTEXT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "context");
-    private static final QName GET_CONTEXTED_CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15",
-                    "get-contexted-constant");
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
+    private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
+    private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
+    private static final QName GET_CONTEXTED_CONSTANT = QName.create(MODULE, "get-contexted-constant").intern();
 
     private final String constant;
 
@@ -49,33 +41,25 @@ public final class RoutedGetConstantService implements DOMRpcImplementation {
         this.constant = constant;
     }
 
-    public static DOMRpcImplementationRegistration<RoutedGetConstantService> registerNew(
-            final BindingNormalizedNodeSerializer codec, final DOMRpcProviderService rpcProviderService,
-            final String constant, final InstanceIdentifier<?> context) {
+    public static Registration registerNew(final BindingNormalizedNodeSerializer codec,
+            final DOMRpcProviderService rpcProviderService, final String constant,
+            final InstanceIdentifier<?> context) {
 
         LOG.debug("Registering get-contexted-constant on context: {}, with value: {}", context, constant);
 
-        final YangInstanceIdentifier yid = codec.toYangInstanceIdentifier(context);
-        final DOMRpcIdentifier id = DOMRpcIdentifier.create(SchemaPath.create(true, GET_CONTEXTED_CONSTANT), yid);
+        final var yid = codec.toYangInstanceIdentifier(context);
+        final var id = DOMRpcIdentifier.create(GET_CONTEXTED_CONSTANT, yid);
 
         return rpcProviderService.registerRpcImplementation(new RoutedGetConstantService(constant), id);
     }
 
     @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(final DOMRpcIdentifier rpc,
-                                                                  final NormalizedNode<?, ?> input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-contexted-constant invoked, current value: {}", constant);
 
-        final LeafNode<Object> value = ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build();
-
-        final ContainerNode result = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(OUTPUT))
-                .withChild(value)
-                .build();
-
-        return Futures.immediateCheckedFuture(new DefaultDOMRpcResult(result));
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(OUTPUT))
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
+            .build()));
     }
 }
index 50060d36c4485d83e3e279c91885fc907e54788c..9177cc6ec7a2370de82887ca5fd7dc5fd9002573 100644 (file)
@@ -7,91 +7,71 @@
  */
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcIdentifier;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementation;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcImplementationRegistration;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService;
-import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
-import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonService;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider;
-import org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceRegistration;
-import org.opendaylight.mdsal.singleton.common.api.ServiceGroupIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcIdentifier;
+import org.opendaylight.mdsal.dom.api.DOMRpcImplementation;
+import org.opendaylight.mdsal.dom.api.DOMRpcProviderService;
+import org.opendaylight.mdsal.dom.api.DOMRpcResult;
+import org.opendaylight.mdsal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonService;
+import org.opendaylight.mdsal.singleton.api.ClusterSingletonServiceProvider;
+import org.opendaylight.mdsal.singleton.api.ServiceGroupIdentifier;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.QNameModule;
+import org.opendaylight.yangtools.yang.common.YangConstants;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
-import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableLeafNodeBuilder;
-import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public final class SingletonGetConstantService implements DOMRpcImplementation, ClusterSingletonService {
-
     private static final Logger LOG = LoggerFactory.getLogger(SingletonGetConstantService.class);
 
-    private static final QName OUTPUT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "output");
-    private static final QName CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "constant");
-    private static final QName CONTEXT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15", "context");
-    private static final QName GET_SINGLETON_CONSTANT =
-            QName.create("tag:opendaylight.org,2017:controller:yang:lowlevel:target","2017-02-15",
-                    "get-singleton-constant");
+    private static final QNameModule MODULE =
+        QNameModule.ofRevision("tag:opendaylight.org,2017:controller:yang:lowlevel:target", "2017-02-15").intern();
+    private static final QName OUTPUT = YangConstants.operationOutputQName(MODULE).intern();
+    private static final QName CONSTANT = QName.create(MODULE, "constant").intern();
+    private static final QName CONTEXT = QName.create(MODULE, "context").intern();
+    private static final QName GET_SINGLETON_CONSTANT = QName.create(MODULE, "get-singleton-constant").intern();
 
     private static final ServiceGroupIdentifier SERVICE_GROUP_IDENTIFIER =
-            ServiceGroupIdentifier.create("get-singleton-constant-service");
+        new ServiceGroupIdentifier("get-singleton-constant-service");
 
     private final DOMRpcProviderService rpcProviderService;
     private final String constant;
-    private DOMRpcImplementationRegistration<SingletonGetConstantService> rpcRegistration;
-
-    private SingletonGetConstantService(final DOMRpcProviderService rpcProviderService,
-                                        final String constant) {
 
+    private Registration rpcRegistration = null;
 
+    private SingletonGetConstantService(final DOMRpcProviderService rpcProviderService, final String constant) {
         this.rpcProviderService = rpcProviderService;
         this.constant = constant;
     }
 
-    public static ClusterSingletonServiceRegistration registerNew(
-            final ClusterSingletonServiceProvider singletonService, final DOMRpcProviderService rpcProviderService,
-            final String constant) {
+    public static Registration registerNew(final ClusterSingletonServiceProvider singletonService,
+            final DOMRpcProviderService rpcProviderService, final String constant) {
         LOG.debug("Registering get-singleton-constant into ClusterSingletonService, value {}", constant);
 
-        return singletonService
-                .registerClusterSingletonService(new SingletonGetConstantService(rpcProviderService, constant));
+        return singletonService.registerClusterSingletonService(
+            new SingletonGetConstantService(rpcProviderService, constant));
     }
 
     @Override
-    public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(DOMRpcIdentifier rpc,
-            NormalizedNode<?, ?> input) {
+    public ListenableFuture<DOMRpcResult> invokeRpc(final DOMRpcIdentifier rpc, final ContainerNode input) {
         LOG.debug("get-singleton-constant invoked, current value: {}", constant);
 
-        final LeafNode<Object> value = ImmutableLeafNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(CONSTANT))
-                .withValue(constant)
-                .build();
-
-        final ContainerNode result = ImmutableContainerNodeBuilder.create()
-                .withNodeIdentifier(new NodeIdentifier(OUTPUT))
-                .withChild(value)
-                .build();
-
-        return Futures.immediateCheckedFuture(new DefaultDOMRpcResult(result));
+        return Futures.immediateFuture(new DefaultDOMRpcResult(ImmutableNodes.newContainerBuilder()
+            .withNodeIdentifier(new NodeIdentifier(OUTPUT))
+            .withChild(ImmutableNodes.leafNode(CONSTANT, constant))
+            .build()));
     }
 
     @Override
     public void instantiateServiceInstance() {
         LOG.debug("Gained ownership of get-singleton-constant, registering service into rpcService");
-        final DOMRpcIdentifier id = DOMRpcIdentifier.create(SchemaPath.create(true, GET_SINGLETON_CONSTANT));
+        final DOMRpcIdentifier id = DOMRpcIdentifier.create(GET_SINGLETON_CONSTANT);
 
         rpcRegistration = rpcProviderService.registerRpcImplementation(this, id);
     }
index eb552b4d940c3bc3ee06aa0553912815795bc247..97ca77944f2334e636ff28ff409d37305cb641bf 100644 (file)
@@ -5,10 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+import static org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes.mapEntryBuilder;
+
+import com.google.common.util.concurrent.FluentFuture;
+import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.LinkedHashSet;
@@ -19,19 +22,18 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
-import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
-import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
-import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
-import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
+import org.eclipse.jdt.annotation.NonNull;
+import org.opendaylight.mdsal.common.api.CommitInfo;
+import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
+import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
+import org.opendaylight.mdsal.dom.api.DOMDataBroker;
+import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
+import org.opendaylight.mdsal.dom.api.DOMTransactionChain;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsInput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.WriteTransactionsOutputBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.Empty;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -39,25 +41,24 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
-import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
-import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.data.spi.node.ImmutableNodes;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 public abstract class WriteTransactionsHandler extends AbstractTransactionHandler {
-    private static final class Chained extends WriteTransactionsHandler implements TransactionChainListener {
+    private static final class Chained extends WriteTransactionsHandler implements FutureCallback<Empty> {
         private final SplittableRandom random = new SplittableRandom();
         private final DOMTransactionChain transactionChain;
 
         Chained(final DOMDataBroker dataBroker, final YangInstanceIdentifier idListItem,
             final WriteTransactionsInput input) {
             super(idListItem, input);
-            transactionChain = dataBroker.createTransactionChain(this);
+            transactionChain = dataBroker.createTransactionChain();
+            transactionChain.addCallback(this);
         }
 
         @Override
-        DOMDataWriteTransaction createTransaction() {
+        DOMDataTreeWriteTransaction createTransaction() {
             return transactionChain.newWriteOnlyTransaction();
         }
 
@@ -67,15 +68,14 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         }
 
         @Override
-        public void onTransactionChainFailed(final TransactionChain<?, ?> chain,
-                final AsyncTransaction<?, ?> transaction, final Throwable cause) {
+        public void onFailure(final Throwable cause) {
             // This is expected to happen frequently in isolation testing.
             LOG.debug("Transaction chain failed.", cause);
             // Do not return RPC here, rely on transaction failure to call runFailed.
         }
 
         @Override
-        public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
+        public void onSuccess(final Empty result) {
             LOG.debug("Transaction chain closed successfully.");
         }
     }
@@ -88,11 +88,11 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         Simple(final DOMDataBroker dataBroker, final YangInstanceIdentifier idListItem,
             final WriteTransactionsInput input) {
             super(idListItem, input);
-            this.dataBroker = Preconditions.checkNotNull(dataBroker);
+            this.dataBroker = requireNonNull(dataBroker);
         }
 
         @Override
-        DOMDataWriteTransaction createTransaction() {
+        DOMDataTreeWriteTransaction createTransaction() {
             return dataBroker.newWriteOnlyTransaction();
         }
 
@@ -123,7 +123,7 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
 
     WriteTransactionsHandler(final YangInstanceIdentifier idListItem, final WriteTransactionsInput input) {
         super(input);
-        this.idListItem = Preconditions.checkNotNull(idListItem);
+        this.idListItem = requireNonNull(idListItem);
     }
 
     public static ListenableFuture<RpcResult<WriteTransactionsOutput>> start(final DOMDataBroker domDataBroker,
@@ -131,64 +131,76 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         LOG.info("Starting write transactions with input {}", input);
 
         final String id = input.getId();
-        final MapEntryNode entry = ImmutableNodes.mapEntryBuilder(ID_INT, ID, id)
-                .withChild(ImmutableNodes.mapNodeBuilder(ITEM).build())
+        final MapEntryNode entry = mapEntryBuilder(ID_INT, ID, id)
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(ITEM))
+                    .build())
                 .build();
-        final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.getIdentifier());
+        final YangInstanceIdentifier idListItem = ID_INT_YID.node(entry.name());
 
-        final ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
+        final ContainerNode containerNode = ImmutableNodes.newContainerBuilder()
                 .withNodeIdentifier(new NodeIdentifier(ID_INTS))
-                .withChild(ImmutableNodes.mapNodeBuilder(ID_INT).build())
+                .withChild(ImmutableNodes.newSystemMapBuilder()
+                    .withNodeIdentifier(new NodeIdentifier(ID_INT))
+                    .build())
                 .build();
 
-        DOMDataWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
+        DOMDataTreeWriteTransaction tx = domDataBroker.newWriteOnlyTransaction();
         // write only the top list
         tx.merge(LogicalDatastoreType.CONFIGURATION, ID_INTS_YID, containerNode);
         try {
-            tx.submit().checkedGet(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
-        } catch (final OptimisticLockFailedException e) {
-            // when multiple write-transactions are executed concurrently we need to ignore this.
-            // If we get optimistic lock here it means id-ints already exists and we can continue.
-            LOG.debug("Got an optimistic lock when writing initial top level list element.", e);
-        } catch (final TransactionCommitFailedException | TimeoutException e) {
+            tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+        } catch (InterruptedException | TimeoutException e) {
             LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error writing top-level path %s:  %s",
                     ID_INTS_YID, containerNode), e).buildFuture();
+        } catch (ExecutionException e) {
+            final Throwable cause = e.getCause();
+            if (cause instanceof OptimisticLockFailedException) {
+                // when multiple write-transactions are executed concurrently we need to ignore this.
+                // If we get optimistic lock here it means id-ints already exists and we can continue.
+                LOG.debug("Got an optimistic lock when writing initial top level list element.", e);
+            } else {
+                LOG.error("Error writing top-level path {}: {}", ID_INTS_YID, containerNode, e);
+                return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
+                    String.format("Could not start write transactions - error writing top-level path %s:  %s",
+                        ID_INTS_YID, containerNode), e).buildFuture();
+            }
         }
 
         tx = domDataBroker.newWriteOnlyTransaction();
         tx.merge(LogicalDatastoreType.CONFIGURATION, idListItem, entry);
 
         try {
-            tx.submit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+            tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
         } catch (InterruptedException | ExecutionException | TimeoutException e) {
             LOG.error("Error writing top-level path {}: {}", idListItem, entry, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error writing list entry path %s: %s",
                     idListItem, entry), e).buildFuture();
         }
 
         LOG.debug("Filling the item list with initial values.");
 
-        final CollectionNodeBuilder<MapEntryNode, MapNode> mapBuilder = ImmutableNodes.mapNodeBuilder(ITEM);
-
         final YangInstanceIdentifier itemListId = idListItem.node(ITEM);
         tx = domDataBroker.newWriteOnlyTransaction();
-        final MapNode itemListNode = mapBuilder.build();
+        final MapNode itemListNode = ImmutableNodes.newSystemMapBuilder()
+            .withNodeIdentifier(new NodeIdentifier(ITEM))
+            .build();
         tx.put(LogicalDatastoreType.CONFIGURATION, itemListId, itemListNode);
 
         try {
-            tx.submit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+            tx.commit().get(INIT_TX_TIMEOUT_SECONDS, TimeUnit.SECONDS);
         } catch (InterruptedException | ExecutionException | TimeoutException e) {
             LOG.error("Error filling initial item list path {}: {}", itemListId, itemListNode, e);
-            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(RpcError.ErrorType.APPLICATION,
+            return RpcResultBuilder.<WriteTransactionsOutput>failed().withError(ErrorType.APPLICATION,
                 String.format("Could not start write transactions - error filling initial item list path %s: %s",
                     itemListId, itemListNode), e).buildFuture();
         }
 
         final WriteTransactionsHandler handler;
-        if (input.isChainedTransactions()) {
+        if (input.getChainedTransactions()) {
             handler = new Chained(domDataBroker, idListItem, input);
         } else {
             handler = new Simple(domDataBroker, idListItem, input);
@@ -201,13 +213,13 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
     }
 
     @Override
-    ListenableFuture<Void> execWrite(final long txId) {
+    FluentFuture<? extends @NonNull CommitInfo> execWrite(final long txId) {
         final int i = nextInt(MAX_ITEM + 1);
 
         final YangInstanceIdentifier entryId =
-                idListItem.node(ITEM).node(new YangInstanceIdentifier.NodeIdentifierWithPredicates(ITEM, NUMBER, i));
+                idListItem.node(ITEM).node(YangInstanceIdentifier.NodeIdentifierWithPredicates.of(ITEM, NUMBER, i));
 
-        final DOMDataWriteTransaction tx = createTransaction();
+        final DOMDataTreeWriteTransaction tx = createTransaction();
 
         if (usedValues.contains(i)) {
             LOG.debug("Deleting item: {}", i);
@@ -218,18 +230,17 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
         } else {
             LOG.debug("Inserting item: {}", i);
             insertTx.incrementAndGet();
-            final MapEntryNode entry = ImmutableNodes.mapEntry(ITEM, NUMBER, i);
-            tx.put(LogicalDatastoreType.CONFIGURATION, entryId, entry);
+            tx.put(LogicalDatastoreType.CONFIGURATION, entryId, mapEntryBuilder(ITEM, NUMBER, i).build());
             usedValues.add(i);
         }
 
-        return tx.submit();
+        return tx.commit();
     }
 
     @Override
     void runFailed(final Throwable cause, final long txId) {
         completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
+            .withError(ErrorType.APPLICATION, "Commit failed for tx # " + txId, cause).build());
     }
 
     @Override
@@ -247,10 +258,10 @@ public abstract class WriteTransactionsHandler extends AbstractTransactionHandle
     @Override
     void runTimedOut(final String cause) {
         completionFuture.set(RpcResultBuilder.<WriteTransactionsOutput>failed()
-            .withError(RpcError.ErrorType.APPLICATION, cause).build());
+            .withError(ErrorType.APPLICATION, cause).build());
     }
 
-    abstract DOMDataWriteTransaction createTransaction();
+    abstract DOMDataTreeWriteTransaction createTransaction();
 
     abstract int nextInt(int bound);
 }
index c6744a9393faf96a0a8e643e7258b03516ee3c59..4dd650ec40e6532f678b29c499a075c8c68d50af 100644 (file)
@@ -5,20 +5,19 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.clustering.it.provider.impl;
 
-import com.google.common.base.Preconditions;
+import static java.util.Objects.requireNonNull;
+
 import java.util.concurrent.atomic.AtomicLong;
+import org.opendaylight.mdsal.binding.api.NotificationService.Listener;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutput;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.control.rev170215.UnsubscribeYnlOutputBuilder;
 import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.IdSequence;
-import org.opendaylight.yang.gen.v1.tag.opendaylight.org._2017.controller.yang.lowlevel.target.rev170215.OdlMdsalLowlevelTargetListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class YnlListener implements OdlMdsalLowlevelTargetListener {
-
+public class YnlListener implements Listener<IdSequence> {
     private static final Logger LOG = LoggerFactory.getLogger(YnlListener.class);
 
     private final String id;
@@ -29,12 +28,11 @@ public class YnlListener implements OdlMdsalLowlevelTargetListener {
     private final AtomicLong errNot = new AtomicLong();
 
     public YnlListener(final String id) {
-        Preconditions.checkNotNull(id);
-        this.id = id;
+        this.id = requireNonNull(id);
     }
 
     @Override
-    public void onIdSequence(final IdSequence notification) {
+    public void onNotification(final IdSequence notification) {
         LOG.debug("Received id-sequence notification, : {}", notification);
 
         allNot.incrementAndGet();
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/resources/OSGI-INF/blueprint/cluster-test-app.xml
deleted file mode 100644 (file)
index ba2e312..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:use-default-for-reference-types="true">
-
-  <reference id="dataBroker" interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"/>
-  <reference id="notificationService" interface="org.opendaylight.controller.sal.binding.api.NotificationProviderService"/>
-  <reference id="entityOwnershipService" interface="org.opendaylight.mdsal.eos.binding.api.EntityOwnershipService"/>
-  <reference id="bindingRpcRegistry" interface="org.opendaylight.controller.sal.binding.api.RpcProviderRegistry"/>
-  <reference id="domRpcProviderService" interface="org.opendaylight.controller.md.sal.dom.api.DOMRpcProviderService"/>
-  <reference id="clusterSingletonService" interface="org.opendaylight.mdsal.singleton.common.api.ClusterSingletonServiceProvider"/>
-  <reference id="domDataBroker" interface="org.opendaylight.controller.md.sal.dom.api.DOMDataBroker"/>
-  <reference id="schemaService" interface="org.opendaylight.mdsal.dom.api.DOMSchemaService"/>
-  <reference id="normalizedNodeSerializer" interface="org.opendaylight.mdsal.binding.dom.codec.api.BindingNormalizedNodeSerializer"/>
-  <reference id="notificationPublishService" interface="org.opendaylight.controller.md.sal.binding.api.NotificationPublishService" />
-  <reference id="notificationListenerService" interface="org.opendaylight.controller.md.sal.binding.api.NotificationService" />
-  <reference id="domDataTreeService" interface="org.opendaylight.mdsal.dom.api.DOMDataTreeService"/>
-  <reference id="distributedShardFactory" interface="org.opendaylight.controller.cluster.sharding.DistributedShardFactory"/>
-  <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
-             odl:type="distributed-config"/>
-  <reference id="actorSystemProvider" interface="org.opendaylight.controller.cluster.ActorSystemProvider"/>
-
-
-  <bean id="purchaseCarProvider" class="org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider" >
-    <property name="notificationProvider" ref="notificationService"/>
-  </bean>
-
-  <odl:routed-rpc-implementation id="carPurchaseRpcReg" ref="purchaseCarProvider"/>
-
-  <bean id="peopleProvider" class="org.opendaylight.controller.clustering.it.provider.PeopleProvider" >
-    <property name="dataProvider" ref="dataBroker"/>
-    <property name="rpcRegistration" ref="carPurchaseRpcReg"/>
-  </bean>
-
-  <bean id="carProvider" class="org.opendaylight.controller.clustering.it.provider.CarProvider"
-       destroy-method="close">
-    <argument ref="dataBroker"/>
-    <argument ref="entityOwnershipService"/>
-    <argument ref="domDataBroker"/>
-  </bean>
-
-  <odl:rpc-implementation ref="carProvider"/>
-  <odl:rpc-implementation ref="peopleProvider"/>
-
-  <bean id="peopleCarListener" class="org.opendaylight.controller.clustering.it.listener.PeopleCarListener" >
-    <property name="dataProvider" ref="dataBroker"/>
-  </bean>
-
-  <odl:notification-listener ref="peopleCarListener"/>
-
-  <bean id="basicTestProvider" class="org.opendaylight.controller.clustering.it.provider.BasicRpcTestProvider">
-    <argument ref="bindingRpcRegistry"/>
-    <argument ref="clusterSingletonService"/>
-  </bean>
-
-  <bean id="lowLevelTestProvider" class="org.opendaylight.controller.clustering.it.provider.MdsalLowLevelTestProvider">
-    <argument ref="bindingRpcRegistry"/>
-    <argument ref="domRpcProviderService"/>
-    <argument ref="clusterSingletonService"/>
-    <argument ref="schemaService"/>
-    <argument ref="normalizedNodeSerializer"/>
-    <argument ref="notificationPublishService"/>
-    <argument ref="notificationListenerService"/>
-    <argument ref="domDataBroker"/>
-    <argument ref="domDataTreeService"/>
-    <argument ref="distributedShardFactory"/>
-    <argument ref="configDatastore"/>
-    <argument ref="actorSystemProvider"/>
-  </bean>
-
-</blueprint>
index a1475836907ef45b5f04e17450b40b58c281a40f..80752933e0502c991609d3a35a1b943f1aafa905 100644 (file)
@@ -2,15 +2,22 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
-    <groupId>org.opendaylight.controller</groupId>
-    <artifactId>sal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <groupId>org.opendaylight.odlparent</groupId>
+    <artifactId>odlparent-lite</artifactId>
+    <version>13.0.11</version>
+    <relativePath/>
   </parent>
 
   <groupId>org.opendaylight.controller.samples</groupId>
-  <artifactId>sal-samples</artifactId>
+  <artifactId>samples-aggregator</artifactId>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
 
+  <properties>
+    <maven.deploy.skip>true</maven.deploy.skip>
+    <maven.install.skip>true</maven.install.skip>
+  </properties>
+
   <modules>
     <module>toaster</module>
     <module>toaster-consumer</module>
     <module>clustering-test-app</module>
     <module>toaster-it</module>
   </modules>
-  <scm>
-    <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-    <tag>HEAD</tag>
-    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-  </scm>
-
 </project>
index c80543db9507967a1f2c70d47b209f3e467b91b0..99d9c76c55ed7979a0b1bb9597445054dbd8d953 100644 (file)
@@ -4,32 +4,44 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
   <groupId>org.opendaylight.controller.samples</groupId>
   <artifactId>sample-toaster-consumer</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
     <dependency>
-      <groupId>${project.groupId}</groupId>
+      <groupId>org.opendaylight.controller.samples</groupId>
       <artifactId>sample-toaster</artifactId>
-      <version>${project.version}</version>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-common-util</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>mdsal-binding-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.mdsal</groupId>
+      <artifactId>yang-binding</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
     </dependency>
     <dependency>
       <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
     </dependency>
   </dependencies>
 
index b4c621797987a5e14da6b24cc238185f708d355f..e3034dd4828b29adb31ab47bf0a1f8de9044f253 100644 (file)
@@ -5,13 +5,13 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.sample.kitchen.api;
 
-import java.util.concurrent.Future;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
 public interface KitchenService {
-    Future<RpcResult<Void>> makeBreakfast(EggsType eggs, Class<? extends ToastType> toast, int toastDoneness);
+
+    ListenableFuture<RpcResult<Void>> makeBreakfast(EggsType eggs, ToastType toast, int toastDoneness);
 }
index b9b9b3d678901190e45d36b5d26221d99165fa05..b67de11367874fd0e3aeb13645fd602fc7ac7a7c 100644 (file)
@@ -11,52 +11,83 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList.Builder;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.sample.kitchen.api.EggsType;
 import org.opendaylight.controller.sample.kitchen.api.KitchenService;
 import org.opendaylight.controller.sample.kitchen.api.KitchenServiceRuntimeMXBean;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
+import org.opendaylight.mdsal.binding.api.NotificationService;
+import org.opendaylight.mdsal.binding.api.NotificationService.CompositeListener;
+import org.opendaylight.mdsal.binding.api.RpcService;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToastType;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterListener;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBread;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WheatBread;
+import org.opendaylight.yangtools.concepts.Registration;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.opendaylight.yangtools.yang.common.Uint32;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class KitchenServiceImpl extends AbstractMXBean
-        implements KitchenService, KitchenServiceRuntimeMXBean, ToasterListener {
-
+@Singleton
+@Component(service = KitchenService.class, immediate = true)
+public final class KitchenServiceImpl extends AbstractMXBean implements KitchenService, KitchenServiceRuntimeMXBean {
     private static final Logger LOG = LoggerFactory.getLogger(KitchenServiceImpl.class);
     private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
 
-    private final ToasterService toaster;
-
-    private final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
+    private final ExecutorService executor = Executors.newCachedThreadPool();
+    private final MakeToast makeToast;
+    private final Registration reg;
 
     private volatile boolean toasterOutOfBread;
 
-    public KitchenServiceImpl(final ToasterService toaster) {
+    @Inject
+    @Activate
+    public KitchenServiceImpl(@Reference final RpcService rpcService,
+            @Reference final NotificationService notifService) {
         super("KitchenService", "toaster-consumer", null);
-        this.toaster = toaster;
+        makeToast = rpcService.getRpc(MakeToast.class);
+        reg = notifService.registerCompositeListener(new CompositeListener(Set.of(
+            new CompositeListener.Component<>(ToasterOutOfBread.class, notification -> {
+                LOG.info("ToasterOutOfBread notification");
+                toasterOutOfBread = true;
+            }),
+            new CompositeListener.Component<>(ToasterRestocked.class, notification -> {
+                LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
+                toasterOutOfBread = false;
+            }))));
+        register();
+    }
+
+    @PreDestroy
+    @Deactivate
+    public void close() {
+        unregister();
+        reg.close();
     }
 
     @Override
-    public Future<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final Class<? extends ToastType> toastType,
+    public ListenableFuture<RpcResult<Void>> makeBreakfast(final EggsType eggsType, final ToastType toastType,
             final int toastDoneness) {
         // Call makeToast, The OpendaylightToaster impl already returns a ListenableFuture so the conversion is
         // actually a no-op.
@@ -85,38 +116,35 @@ public class KitchenServiceImpl extends AbstractMXBean
                 }
             }
 
-            return Futures.immediateFuture(RpcResultBuilder.<Void>status(atLeastOneSucceeded)
-                    .withRpcErrors(errorList.build()).build());
+            return RpcResultBuilder.<Void>status(atLeastOneSucceeded).withRpcErrors(errorList.build()).buildFuture();
         }, MoreExecutors.directExecutor());
     }
 
     private ListenableFuture<RpcResult<Void>> makeEggs(final EggsType eggsType) {
-        return executor.submit(() -> RpcResultBuilder.<Void>success().build());
+        return Futures.submit(() -> RpcResultBuilder.<Void>success().build(), executor);
     }
 
-    private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final Class<? extends ToastType> toastType,
-            final int toastDoneness) {
-
+    private ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final ToastType toastType, final int toastDoneness) {
         if (toasterOutOfBread) {
             LOG.info("We're out of toast but we can make eggs");
-            return Futures.immediateFuture(RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
-                .withWarning(ErrorType.APPLICATION, "partial-operation",
-                    "Toaster is out of bread but we can make you eggs").build());
+            return RpcResultBuilder.success(EMPTY_MAKE_OUTPUT)
+                .withWarning(ErrorType.APPLICATION, ErrorTag.PARTIAL_OPERATION,
+                    "Toaster is out of bread but we can make you eggs")
+                .buildFuture();
         }
 
         // Access the ToasterService to make the toast.
-
-        MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness((long) toastDoneness)
-                .setToasterToastType(toastType).build();
-
-        return toaster.makeToast(toastInput);
+        return makeToast.invoke(new MakeToastInputBuilder()
+            .setToasterDoneness(Uint32.valueOf(toastDoneness))
+            .setToasterToastType(toastType)
+            .build());
     }
 
     @Override
     public Boolean makeScrambledWithWheat() {
         try {
             // This call has to block since we must return a result to the JMX client.
-            RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.class, 2).get();
+            RpcResult<Void> result = makeBreakfast(EggsType.SCRAMBLED, WheatBread.VALUE, 2).get();
             if (result.isSuccessful()) {
                 LOG.info("makeBreakfast succeeded");
             } else {
@@ -130,22 +158,4 @@ public class KitchenServiceImpl extends AbstractMXBean
 
         return Boolean.FALSE;
     }
-
-    /**
-     * Implemented from the ToasterListener interface.
-     */
-    @Override
-    public void onToasterOutOfBread(final ToasterOutOfBread notification) {
-        LOG.info("ToasterOutOfBread notification");
-        toasterOutOfBread = true;
-    }
-
-    /**
-     * Implemented from the ToasterListener interface.
-     */
-    @Override
-    public void onToasterRestocked(final ToasterRestocked notification) {
-        LOG.info("ToasterRestocked notification - amountOfBread: {}", notification.getAmountOfBread());
-        toasterOutOfBread = false;
-    }
 }
diff --git a/opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml b/opendaylight/md-sal/samples/toaster-consumer/src/main/resources/OSGI-INF/blueprint/toaster-consumer.xml
deleted file mode 100644 (file)
index 16e8f98..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-    odl:use-default-for-reference-types="true">
-
-  <!-- Retrieves the RPC service for the ToasterService interface -->
-  <odl:rpc-service id="toasterService" interface="org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService"/>
-
-  <!-- Create the KitchenServiceImpl instance and inject the RPC service identified by "toasterService" -->
-  <bean id="kitchenService" class="org.opendaylight.controller.sample.kitchen.impl.KitchenServiceImpl"
-          init-method="register" destroy-method="unregister">
-    <argument ref="toasterService"/>
-  </bean>
-
-  <!-- Register the KitchenServiceImpl to receive yang notifications -->
-  <odl:notification-listener ref="kitchenService"/>
-
-  <!-- Advertise the KitchenServiceImpl with the OSGi registry with the type property set to "default" . The
-       type property is optional but can be used to distinguish this implementation from any other potential
-       KitchenService implementations (if there were any). Clients consuming the KitchenService can pick the
-       desired implementation via the particular type.
-  -->
-  <service ref="kitchenService" interface="org.opendaylight.controller.sample.kitchen.api.KitchenService"
-          odl:type="default"/>
-</blueprint>
index 94de6254a229394847f767b926d7f99c14cb237d..6b8c35e645eea37d816bbb433c5b534910c34d88 100644 (file)
@@ -5,7 +5,7 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-it-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../mdsal-it-parent</relativePath>
   </parent>
   <artifactId>sample-toaster-it</artifactId>
@@ -13,7 +13,7 @@
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-mdsal</artifactId>
+      <artifactId>features-controller-experimental</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
     </dependency>
index d6c3db900dafb9ef33c8bd923e563584f62aa5de..0b4ea8e72a13cfabf78bd75fe47f304925de5f94 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.sample.toaster.it;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.ops4j.pax.exam.CoreOptions.maven;
+
 import java.lang.management.ManagementFactory;
 import javax.inject.Inject;
 import javax.management.MBeanServer;
@@ -20,6 +21,7 @@ import org.opendaylight.controller.mdsal.it.base.AbstractMdsalTestBase;
 import org.opendaylight.controller.sample.kitchen.api.EggsType;
 import org.opendaylight.controller.sample.kitchen.api.KitchenService;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.HashBrown;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WhiteBread;
 import org.ops4j.pax.exam.junit.PaxExam;
 import org.ops4j.pax.exam.options.MavenUrlReference;
@@ -28,13 +30,17 @@ import org.ops4j.pax.exam.util.Filter;
 @RunWith(PaxExam.class)
 public class ToasterTest extends AbstractMdsalTestBase {
     @Inject
-    @Filter(timeout=60*1000)
+    @Filter(timeout = 60 * 1000)
     KitchenService kitchenService;
+    @Inject
+    @Filter(timeout = 60 * 1000)
+    // proxy for the entire toaster, nothing else
+    MakeToast makeToast;
 
     @Override
     public MavenUrlReference getFeatureRepo() {
-        return maven().groupId("org.opendaylight.controller").artifactId("features-mdsal").classifier("features")
-                .type("xml").versionAsInProject();
+        return maven().groupId("org.opendaylight.controller").artifactId("features-controller-experimental")
+                .classifier("features").type("xml").versionAsInProject();
     }
 
     @Override
@@ -45,7 +51,8 @@ public class ToasterTest extends AbstractMdsalTestBase {
     @Test
     public void testToaster() throws Exception {
         MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
-        ObjectName providerOn = new ObjectName("org.opendaylight.controller:name=OpendaylightToaster,type=toaster-provider");
+        ObjectName providerOn = new ObjectName(
+                "org.opendaylight.controller:name=OpendaylightToaster,type=toaster-provider");
 
         long toastsMade = (long) platformMBeanServer.getAttribute(providerOn, "ToastsMade");
         assertEquals(0, toastsMade);
@@ -53,8 +60,8 @@ public class ToasterTest extends AbstractMdsalTestBase {
         boolean success = true;
 
         // Make toasts using OSGi service
-        success &= kitchenService.makeBreakfast( EggsType.SCRAMBLED, HashBrown.class, 4).get().isSuccessful();
-        success &= kitchenService.makeBreakfast( EggsType.POACHED, WhiteBread.class, 8 ).get().isSuccessful();
+        success &= kitchenService.makeBreakfast(EggsType.SCRAMBLED, HashBrown.VALUE, 4).get().isSuccessful();
+        success &= kitchenService.makeBreakfast(EggsType.POACHED, WhiteBread.VALUE, 8).get().isSuccessful();
 
         assertTrue("Not all breakfasts succeeded", success);
 
index 55df328e7f0723dccf69c1f9ca6cc62a053764a0..ea6ee620811c1ed0728f95ab99c5eaf2cfd202c3 100644 (file)
@@ -4,20 +4,18 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
   <groupId>org.opendaylight.controller.samples</groupId>
   <artifactId>sample-toaster-provider</artifactId>
-  <version>1.10.0-SNAPSHOT</version>
   <packaging>bundle</packaging>
 
   <dependencies>
     <dependency>
       <groupId>${project.groupId}</groupId>
       <artifactId>sample-toaster</artifactId>
-      <version>${project.version}</version>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-common-util</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.osgi</groupId>
-      <artifactId>org.osgi.core</artifactId>
-    </dependency>
 
     <!-- dependencies to use AbstractDataBrokerTest -->
     <dependency>
       <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-adapter</artifactId>
-      <scope>test</scope>
+      <artifactId>mdsal-binding-test-utils</artifactId>
     </dependency>
+
     <dependency>
-      <groupId>org.opendaylight.mdsal</groupId>
-      <artifactId>mdsal-binding-dom-adapter</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-    <!-- used to mock up classes -->
-     <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <scope>test</scope>
+      <groupId>jakarta.annotation</groupId>
+      <artifactId>jakarta.annotation-api</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.component.annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.guicedee.services</groupId>
+      <artifactId>javax.inject</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.service.metatype.annotations</artifactId>
+      <scope>compile</scope>
     </dependency>
   </dependencies>
 
index fc1e380a4c16bf324ee85e611dc510d602d89073..cc275a9b006005bf84ff09c0ca9028b986b0d3c0 100644 (file)
@@ -7,47 +7,53 @@
  */
 package org.opendaylight.controller.sample.toaster.provider;
 
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.DELETE;
-import static org.opendaylight.mdsal.binding.api.DataObjectModification.ModificationType.WRITE;
+import static java.util.Objects.requireNonNull;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION;
 import static org.opendaylight.mdsal.common.api.LogicalDatastoreType.OPERATIONAL;
-import static org.opendaylight.yangtools.yang.common.RpcError.ErrorType.APPLICATION;
+import static org.opendaylight.yangtools.yang.common.ErrorType.APPLICATION;
 
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.FluentFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.common.util.concurrent.SettableFuture;
-import java.util.Collection;
+import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Function;
+import javax.annotation.PreDestroy;
+import javax.inject.Inject;
+import javax.inject.Singleton;
+import org.eclipse.jdt.annotation.NonNull;
 import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.mdsal.binding.api.DataBroker;
-import org.opendaylight.mdsal.binding.api.DataObjectModification;
 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 import org.opendaylight.mdsal.binding.api.DataTreeIdentifier;
 import org.opendaylight.mdsal.binding.api.DataTreeModification;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.ReadWriteTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.api.WriteTransaction;
 import org.opendaylight.mdsal.common.api.CommitInfo;
 import org.opendaylight.mdsal.common.api.OptimisticLockFailedException;
 import org.opendaylight.mdsal.common.api.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.CancelToastOutputBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToast;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.MakeToastOutputBuilder;
+import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToaster;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterInput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutput;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.RestockToasterOutputBuilder;
@@ -57,20 +63,38 @@ import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterOutOfBreadBuilder;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestocked;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterRestockedBuilder;
-import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.ToasterService;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfigBuilder;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.concepts.Registration;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.ErrorTag;
+import org.opendaylight.yangtools.yang.common.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.osgi.service.component.annotations.Activate;
+import org.osgi.service.component.annotations.Component;
+import org.osgi.service.component.annotations.Deactivate;
+import org.osgi.service.component.annotations.Reference;
+import org.osgi.service.metatype.annotations.AttributeDefinition;
+import org.osgi.service.metatype.annotations.Designate;
+import org.osgi.service.metatype.annotations.ObjectClassDefinition;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class OpendaylightToaster extends AbstractMXBean
-        implements ToasterService, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+@Singleton
+@Component(service = MakeToast.class, immediate = true)
+@Designate(ocd = OpendaylightToaster.Configuration.class)
+public final class OpendaylightToaster extends AbstractMXBean
+        implements MakeToast, ToasterProviderRuntimeMXBean, DataTreeChangeListener<Toaster>, AutoCloseable {
+    @ObjectClassDefinition
+    public @interface Configuration {
+        @AttributeDefinition(description = "The name of the toaster's manufacturer", max = "255")
+        String manufacturer() default TOASTER_MANUFACTURER;
+        @AttributeDefinition(description = "The name of the toaster's model", max = "255")
+        String modelNumber() default TOASTER_MODEL_NUMBER;
+        @AttributeDefinition(description = "How many times we attempt to make toast before failing ",
+            min = "0", max = "65535")
+        int maxMakeToastTries() default 2;
+    }
 
     private static final CancelToastOutput EMPTY_CANCEL_OUTPUT = new CancelToastOutputBuilder().build();
     private static final MakeToastOutput EMPTY_MAKE_OUTPUT = new MakeToastOutputBuilder().build();
@@ -79,12 +103,13 @@ public class OpendaylightToaster extends AbstractMXBean
     private static final Logger LOG = LoggerFactory.getLogger(OpendaylightToaster.class);
 
     private static final InstanceIdentifier<Toaster> TOASTER_IID = InstanceIdentifier.builder(Toaster.class).build();
-    private static final DisplayString TOASTER_MANUFACTURER = new DisplayString("Opendaylight");
-    private static final DisplayString TOASTER_MODEL_NUMBER = new DisplayString("Model 1 - Binding Aware");
+    private static final String TOASTER_MANUFACTURER = "Opendaylight";
+    private static final String TOASTER_MODEL_NUMBER = "Model 1 - Binding Aware";
 
-    private DataBroker dataBroker;
-    private NotificationPublishService notificationProvider;
-    private ListenerRegistration<OpendaylightToaster> dataTreeChangeListenerRegistration;
+    private final DataBroker dataBroker;
+    private final NotificationPublishService notificationProvider;
+    private final Registration dataTreeChangeListenerRegistration;
+    private final Registration reg;
 
     private final ExecutorService executor;
 
@@ -96,48 +121,68 @@ public class OpendaylightToaster extends AbstractMXBean
     private final AtomicLong toastsMade = new AtomicLong(0);
     private final AtomicLong darknessFactor = new AtomicLong(1000);
 
-    private final ToasterAppConfig toasterAppConfig;
-
-    public OpendaylightToaster() {
-        this(new ToasterAppConfigBuilder().setManufacturer(TOASTER_MANUFACTURER).setModelNumber(TOASTER_MODEL_NUMBER)
-                .setMaxMakeToastTries(2).build());
-    }
+    private final @NonNull DisplayString manufacturer;
+    private final @NonNull DisplayString modelNumber;
+    private final int maxMakeToastTries;
 
-    public OpendaylightToaster(final ToasterAppConfig toasterAppConfig) {
+    public OpendaylightToaster(final DataBroker dataProvider,
+            final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService,
+            final String manufacturer, final String modelNumber, final int maxMakeToastTries) {
         super("OpendaylightToaster", "toaster-provider", null);
-        executor = Executors.newFixedThreadPool(1);
-        this.toasterAppConfig = toasterAppConfig;
-    }
+        notificationProvider = requireNonNull(notificationPublishService);
+        dataBroker = requireNonNull(dataProvider);
 
-    public void setNotificationProvider(final NotificationPublishService notificationPublishService) {
-        this.notificationProvider = notificationPublishService;
-    }
+        this.manufacturer = new DisplayString(manufacturer);
+        this.modelNumber = new DisplayString(modelNumber);
+        this.maxMakeToastTries = maxMakeToastTries;
 
-    public void setDataBroker(final DataBroker dataBroker) {
-        this.dataBroker = dataBroker;
-    }
+        executor = Executors.newFixedThreadPool(1);
+        reg = rpcProviderService.registerRpcImplementations(
+            (CancelToast) this::cancelToast,
+            this,
+            (RestockToaster) this::restockToaster);
 
-    public void init() {
         LOG.info("Initializing...");
 
-        Preconditions.checkNotNull(dataBroker, "dataBroker must be set");
-        dataTreeChangeListenerRegistration = dataBroker.registerDataTreeChangeListener(
-                DataTreeIdentifier.create(CONFIGURATION, TOASTER_IID), this);
-        setToasterStatusUp(null);
+        dataTreeChangeListenerRegistration = requireNonNull(dataBroker, "dataBroker must be set")
+            .registerTreeChangeListener(DataTreeIdentifier.of(CONFIGURATION, TOASTER_IID), this);
+        try {
+            setToasterStatusUp(null).get();
+        } catch (InterruptedException | ExecutionException e) {
+            throw new IllegalStateException("Failed to commit initial data", e);
+        }
 
         // Register our MXBean.
         register();
     }
 
+    @Inject
+    public OpendaylightToaster(final DataBroker dataProvider,
+            final NotificationPublishService notificationPublishService, final RpcProviderService rpcProviderService) {
+        this(dataProvider, notificationPublishService, rpcProviderService, TOASTER_MANUFACTURER, TOASTER_MODEL_NUMBER,
+            2);
+    }
+
+    @Activate
+    public OpendaylightToaster(@Reference final DataBroker dataProvider,
+            @Reference final NotificationPublishService notificationPublishService,
+            @Reference final RpcProviderService rpcProviderService, final @NonNull Configuration configuration) {
+        this(dataProvider, notificationPublishService, rpcProviderService, configuration.manufacturer(),
+            configuration.modelNumber(), configuration.maxMakeToastTries());
+    }
+
     /**
      * Implemented from the AutoCloseable interface.
      */
     @Override
+    @PreDestroy
+    @Deactivate
     public void close() {
         LOG.info("Closing...");
 
         // Unregister our MXBean.
         unregister();
+        reg.close();
 
         // When we close this service we need to shutdown our executor!
         executor.shutdown();
@@ -167,31 +212,37 @@ public class OpendaylightToaster extends AbstractMXBean
         // note - we are simulating a device whose manufacture and model are
         // fixed (embedded) into the hardware.
         // This is why the manufacture and model number are hardcoded.
-        return new ToasterBuilder().setToasterManufacturer(toasterAppConfig.getManufacturer())
-                .setToasterModelNumber(toasterAppConfig.getModelNumber()).setToasterStatus(status).build();
+        return new ToasterBuilder()
+            .setToasterManufacturer(manufacturer)
+            .setToasterModelNumber(modelNumber)
+            .setToasterStatus(status)
+            .build();
     }
 
     /**
      * Implemented from the DataTreeChangeListener interface.
      */
     @Override
-    public void onDataTreeChanged(final Collection<DataTreeModification<Toaster>> changes) {
-        for (DataTreeModification<Toaster> change: changes) {
-            DataObjectModification<Toaster> rootNode = change.getRootNode();
-            if (rootNode.getModificationType() == WRITE) {
-                Toaster oldToaster = rootNode.getDataBefore();
-                Toaster newToaster = rootNode.getDataAfter();
-                LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: "
-                        + "old Toaster: {}, new Toaster: {}", change.getRootPath().getRootIdentifier(),
-                        oldToaster, newToaster);
-
-                Long darkness = newToaster.getDarknessFactor();
-                if (darkness != null) {
-                    darknessFactor.set(darkness);
+    public void onDataTreeChanged(final List<DataTreeModification<Toaster>> changes) {
+        for (var change: changes) {
+            final var rootNode = change.getRootNode();
+            switch (rootNode.modificationType()) {
+                case WRITE -> {
+                    final var oldToaster = rootNode.dataBefore();
+                    final var newToaster = rootNode.dataAfter();
+                    LOG.info("onDataTreeChanged - Toaster config with path {} was added or replaced: old Toaster: {}, "
+                        + "new Toaster: {}", change.getRootPath().path(), oldToaster, newToaster);
+
+                    final var darkness = newToaster.getDarknessFactor();
+                    if (darkness != null) {
+                        darknessFactor.set(darkness.toJava());
+                    }
+                }
+                case DELETE -> LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
+                        change.getRootPath().path(), rootNode.dataBefore());
+                default -> {
+                    // No-op
                 }
-            } else if (rootNode.getModificationType() == DELETE) {
-                LOG.info("onDataTreeChanged - Toaster config with path {} was deleted: old Toaster: {}",
-                        change.getRootPath().getRootIdentifier(), rootNode.getDataBefore());
             }
         }
     }
@@ -199,9 +250,8 @@ public class OpendaylightToaster extends AbstractMXBean
     /**
      * RPC call implemented from the ToasterService interface that cancels the current toast, if any.
      */
-    @Override
-    public ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
-        Future<?> current = currentMakeToastTask.getAndSet(null);
+    private ListenableFuture<RpcResult<CancelToastOutput>> cancelToast(final CancelToastInput input) {
+        final var current = currentMakeToastTask.getAndSet(null);
         if (current != null) {
             current.cancel(true);
         }
@@ -214,23 +264,20 @@ public class OpendaylightToaster extends AbstractMXBean
      * RPC call implemented from the ToasterService interface that attempts to make toast.
      */
     @Override
-    public ListenableFuture<RpcResult<MakeToastOutput>> makeToast(final MakeToastInput input) {
+    public ListenableFuture<RpcResult<MakeToastOutput>> invoke(final MakeToastInput input) {
         LOG.info("makeToast: {}", input);
-
-        final SettableFuture<RpcResult<MakeToastOutput>> futureResult = SettableFuture.create();
-
-        checkStatusAndMakeToast(input, futureResult, toasterAppConfig.getMaxMakeToastTries());
-
+        final var futureResult = SettableFuture.<RpcResult<MakeToastOutput>>create();
+        checkStatusAndMakeToast(input, futureResult, maxMakeToastTries);
         return futureResult;
     }
 
     private static RpcError makeToasterOutOfBreadError() {
-        return RpcResultBuilder.newError(APPLICATION, "resource-denied", "Toaster is out of bread", "out-of-stock",
-                null, null);
+        return RpcResultBuilder.newError(APPLICATION, ErrorTag.RESOURCE_DENIED, "Toaster is out of bread",
+            "out-of-stock", null, null);
     }
 
     private static RpcError makeToasterInUseError() {
-        return RpcResultBuilder.newWarning(APPLICATION, "in-use", "Toaster is busy", null, null, null);
+        return RpcResultBuilder.newWarning(APPLICATION, ErrorTag.IN_USE, "Toaster is busy", null, null, null);
     }
 
     private void checkStatusAndMakeToast(final MakeToastInput input,
@@ -245,7 +292,7 @@ public class OpendaylightToaster extends AbstractMXBean
             Futures.transformAsync(readFuture, toasterData -> {
                 ToasterStatus toasterStatus = ToasterStatus.Up;
                 if (toasterData.isPresent()) {
-                    toasterStatus = toasterData.get().getToasterStatus();
+                    toasterStatus = toasterData.orElseThrow().getToasterStatus();
                 }
 
                 LOG.debug("Read toaster status: {}", toasterStatus);
@@ -320,11 +367,10 @@ public class OpendaylightToaster extends AbstractMXBean
      * Restocks the bread for the toaster, resets the toastsMade counter to 0, and sends a
      * ToasterRestocked notification.
      */
-    @Override
-    public ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
+    private ListenableFuture<RpcResult<RestockToasterOutput>> restockToaster(final RestockToasterInput input) {
         LOG.info("restockToaster: {}", input);
 
-        amountOfBreadInStock.set(input.getAmountOfBreadToStock());
+        amountOfBreadInStock.set(input.getAmountOfBreadToStock().toJava());
 
         if (amountOfBreadInStock.get() > 0) {
             ToasterRestocked reStockedNotification = new ToasterRestockedBuilder()
@@ -352,11 +398,12 @@ public class OpendaylightToaster extends AbstractMXBean
         return toastsMade.get();
     }
 
-    private void setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
+    private ListenableFuture<?> setToasterStatusUp(final Function<Boolean, MakeToastOutput> resultCallback) {
         WriteTransaction tx = dataBroker.newWriteOnlyTransaction();
         tx.put(OPERATIONAL,TOASTER_IID, buildToaster(ToasterStatus.Up));
 
-        Futures.addCallback(tx.commit(), new FutureCallback<CommitInfo>() {
+        final var future = tx.commit();
+        Futures.addCallback(future, new FutureCallback<CommitInfo>() {
             @Override
             public void onSuccess(final CommitInfo result) {
                 LOG.info("Successfully set ToasterStatus to Up");
@@ -378,6 +425,8 @@ public class OpendaylightToaster extends AbstractMXBean
                 }
             }
         }, MoreExecutors.directExecutor());
+
+        return future;
     }
 
     private boolean outOfBread() {
@@ -399,7 +448,8 @@ public class OpendaylightToaster extends AbstractMXBean
         public Void call() {
             try {
                 // make toast just sleeps for n seconds per doneness level.
-                Thread.sleep(OpendaylightToaster.this.darknessFactor.get() * toastRequest.getToasterDoneness());
+                Thread.sleep(darknessFactor.get()
+                        * toastRequest.getToasterDoneness().toJava());
 
             } catch (InterruptedException e) {
                 LOG.info("Interrupted while making the toast");
diff --git a/opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml b/opendaylight/md-sal/samples/toaster-provider/src/main/resources/OSGI-INF/blueprint/toaster-provider.xml
deleted file mode 100644 (file)
index 4c7af5b..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.1.0"
-    odl:restart-dependents-on-updates="true" odl:use-default-for-reference-types="true">
-
-  <!-- "restart-dependents-on-updates" is an ODL extension attribute that processes any "property-placeholder"
-       elements and reacts to updates to the corresponding cfg file by restarting this blueprint container any
-       dependent containers that consume OSGi services provided by this container in an atomic and orderly
-       manner.
-
-       "use-default-for-reference-types" is an ODL extension attribute that adds a filter to all services
-       imported via "reference" elements where the "type" property is either not set or set to "default" if
-       the odl:type attribute isn't explicitly specified. This ensures the default implementation is imported
-       if there are other implementations advertised with other types.
-  -->
-
-  <!-- Accesses properties via the etc/org.opendaylight.toaster.cfg file. The properties are made available
-       as variables that can be referenced. The variables are substituted with the actual values read from
-       the cfg file, if present, or the default-properties.
-   -->
-  <cm:property-placeholder persistent-id="org.opendaylight.toaster" update-strategy="none">
-    <cm:default-properties>
-      <cm:property name="databroker-type" value="default"/>
-    </cm:default-properties>
-  </cm:property-placeholder>
-
-  <!-- "clustered-app-config" is an ODL extension that obtains an application configuration yang container
-       from the MD-SAL data store and makes the binding DataObject available as a bean that can be injected
-       into other beans. Here we obtain the ToasterAppConfig container DataObject. This also shows how to
-       specify default data via the "default-config" child element. While default leaf values defined in the
-       yang are returned, one may have more complex data, eg lists, that require default data. The
-       "default-config" must contain the XML representation of the yang data, including namespace, wrapped
-       in a CDATA section to prevent the blueprint container from treating it as markup.
-  -->
-  <odl:clustered-app-config id="toasterAppConfig"
-      binding-class="org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.toaster.app.config.rev160503.ToasterAppConfig">
-    <odl:default-config><![CDATA[
-      <toaster-app-config xmlns="urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config">
-        <max-make-toast-tries>3</max-make-toast-tries>
-      </toaster-app-config>
-    ]]></odl:default-config>
-  </odl:clustered-app-config>
-
-  <!-- Import MD-SAL services. For the DataBroker, we explicitly specify the odl:type which is configurable
-       via the cfg file. In this manner the toaster can be configured to use the default clustered DataBroker
-       or the specialized "pingpong" DataBroker (or any other DataBroker implementation).
-   -->
-  <reference id="dataBroker" interface="org.opendaylight.mdsal.binding.api.DataBroker" odl:type="${databroker-type}" />
-  <reference id="notificationService" interface="org.opendaylight.mdsal.binding.api.NotificationPublishService"/>
-
-  <!-- Create the OpendaylightToaster instance and inject its dependencies -->
-  <bean id="toaster" class="org.opendaylight.controller.sample.toaster.provider.OpendaylightToaster"
-          init-method="init" destroy-method="close">
-    <argument ref="toasterAppConfig"/>
-    <property name="dataBroker" ref="dataBroker"/>
-    <property name="notificationProvider" ref="notificationService"/>
-  </bean>
-
-  <!-- Register the OpendaylightToaster instance as an RPC implementation provider. The "rpc-implementation"
-       element automatically figures out the RpcService interface although it can be explicitly specified.
-   -->
-  <odl:rpc-implementation ref="toaster"/>
-</blueprint>
diff --git a/opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang b/opendaylight/md-sal/samples/toaster-provider/src/main/yang/toaster-app-config.yang
deleted file mode 100644 (file)
index 369ba46..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-module toaster-app-config {
-    yang-version 1;
-
-    namespace "urn:opendaylight:params:xml:ns:yang:controller:toaster-app-config";
-    prefix toaster-app-config;
-
-    import toaster { prefix toaster; revision-date 2009-11-20; }
-
-    description
-      "Configuration for the Opendaylight toaster application.";
-
-    revision "2016-05-03" {
-        description
-            "Initial revision.";
-    }
-
-    container toaster-app-config {
-        leaf manufacturer {
-            type toaster:DisplayString;
-            default "Opendaylight";
-        }
-
-        leaf model-number {
-            type toaster:DisplayString;
-            default "Model 1 - Binding Aware";
-        }
-
-        leaf max-make-toast-tries {
-            type uint16;
-            default 2;
-        }
-    }
-}
\ No newline at end of file
index 3be46b0a4d1ab5f822d8909b4aa5e6f8ef7c7db7..86d2e6d19c249dad322f0f8a94d5ecbd2b546264 100644 (file)
@@ -22,6 +22,7 @@ import org.junit.Test;
 import org.opendaylight.mdsal.binding.api.DataBroker;
 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 import org.opendaylight.mdsal.binding.api.ReadTransaction;
+import org.opendaylight.mdsal.binding.api.RpcProviderService;
 import org.opendaylight.mdsal.binding.dom.adapter.test.AbstractConcurrentDataBrokerTest;
 import org.opendaylight.mdsal.common.api.LogicalDatastoreType;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.DisplayString;
@@ -32,52 +33,52 @@ import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.toaster.rev091120.WheatBread;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.Uint32;
 
 public class OpenDaylightToasterTest extends AbstractConcurrentDataBrokerTest {
 
-    private static InstanceIdentifier<Toaster> TOASTER_IID = InstanceIdentifier.builder(Toaster.class).build();
+    private static final InstanceIdentifier<Toaster> TOASTER_IID = InstanceIdentifier.builder(Toaster.class).build();
     private OpendaylightToaster toaster;
 
     @Before
     public void setupToaster() {
-        toaster = new OpendaylightToaster();
-        toaster.setDataBroker(getDataBroker());
-        toaster.init();
-
-        // We'll mock the NotificationProviderService.
-        NotificationPublishService mockNotification = mock(NotificationPublishService.class);
-        toaster.setNotificationProvider(mockNotification);
+        toaster = new OpendaylightToaster(getDataBroker(), mock(NotificationPublishService.class),
+            mock(RpcProviderService.class));
     }
 
     @Test
     public void testToasterInitOnStartUp() throws Exception {
         DataBroker broker = getDataBroker();
 
-        ReadTransaction readTx = broker.newReadOnlyTransaction();
-        Optional<Toaster> optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+        Optional<Toaster> optional;
+        try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+            optional = readTx.read(LogicalDatastoreType.OPERATIONAL, TOASTER_IID).get();
+        }
         assertNotNull(optional);
         assertTrue("Operational toaster not present", optional.isPresent());
 
-        Toaster toasterData = optional.get();
+        Toaster toasterData = optional.orElseThrow();
 
         assertEquals(Toaster.ToasterStatus.Up, toasterData.getToasterStatus());
         assertEquals(new DisplayString("Opendaylight"), toasterData.getToasterManufacturer());
         assertEquals(new DisplayString("Model 1 - Binding Aware"), toasterData.getToasterModelNumber());
 
-        Optional<Toaster> configToaster = readTx.read(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
-        assertFalse("Didn't expect config data for toaster.", configToaster.isPresent());
+        try (ReadTransaction readTx = broker.newReadOnlyTransaction()) {
+            Boolean configToaster = readTx.exists(LogicalDatastoreType.CONFIGURATION, TOASTER_IID).get();
+            assertFalse("Didn't expect config data for toaster.", configToaster);
+        }
     }
 
     @Test
     @Ignore //ignored because it is not a test right now. Illustrative purposes only.
     public void testSomething() throws Exception {
-        MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(1L)
-                .setToasterToastType(WheatBread.class).build();
+        MakeToastInput toastInput = new MakeToastInputBuilder().setToasterDoneness(Uint32.valueOf(1))
+                .setToasterToastType(WheatBread.VALUE).build();
 
         // NOTE: In a real test we would want to override the Thread.sleep() to
         // prevent our junit test
         // for sleeping for a second...
-        Future<RpcResult<MakeToastOutput>> makeToast = toaster.makeToast(toastInput);
+        Future<RpcResult<MakeToastOutput>> makeToast = toaster.invoke(toastInput);
 
         RpcResult<MakeToastOutput> rpcResult = makeToast.get();
 
index 4d7ad9039be8e98f8b8abebe065adc09d9676c87..57230fa79d95283a8ad263e7bd944e34032de4f9 100644 (file)
@@ -5,12 +5,11 @@
   <parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>mdsal-parent</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
+    <version>9.0.3-SNAPSHOT</version>
     <relativePath>../../parent</relativePath>
   </parent>
 
   <groupId>org.opendaylight.controller.samples</groupId>
-  <version>1.10.0-SNAPSHOT</version>
   <artifactId>sample-toaster</artifactId>
   <packaging>bundle</packaging>
 
diff --git a/opendaylight/model/model-inventory/pom.xml b/opendaylight/model/model-inventory/pom.xml
deleted file mode 100644 (file)
index e75d24d..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.mdsal</groupId>
-        <artifactId>binding-parent</artifactId>
-        <version>3.0.6</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller.model</groupId>
-    <artifactId>model-inventory</artifactId>
-    <packaging>bundle</packaging>
-    <version>1.10.0-SNAPSHOT</version>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.binding.model.ietf</groupId>
-            <artifactId>rfc6991-ietf-inet-types</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>yang-ext</artifactId>
-        </dependency>
-    </dependencies>
-
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-    </scm>
-</project>
diff --git a/opendaylight/model/model-inventory/src/main/yang/opendaylight-inventory.yang b/opendaylight/model/model-inventory/src/main/yang/opendaylight-inventory.yang
deleted file mode 100644 (file)
index bf025f4..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-module opendaylight-inventory {
-    namespace "urn:opendaylight:inventory";
-    prefix inv;
-
-    import yang-ext {prefix ext; revision-date "2013-07-09";}
-    import ietf-inet-types {prefix inet; revision-date "2013-07-15";}
-
-
-    revision "2013-08-19" {
-        description "Initial revision of Inventory model";
-    }
-
-
-    typedef support-type {
-        status deprecated;
-        type enumeration {
-            enum native;
-            enum emulated;
-            enum not-supported;
-        }
-    }
-
-    typedef node-id {
-        status deprecated;
-        type inet:uri;
-        description "Identifier for a particular node. For example:
-
-                        myprotocol:<unique_node_id>
-
-                        myprotocol:12
-
-                    It is a good practice to always lead with a scoping identifier.
-                    In the example above the scoping was 'myprotocol'. In your app you
-                    could use 'myapp' etc.";
-    }
-
-    typedef node-connector-id {
-        status deprecated;
-        type inet:uri;
-        description "Identifier for a particular node-connector. For example:
-
-                        myprotocol:<unique_node_connector_id>
-                        myprotocol:3
-
-                    It is a good practice to always lead with a scoping identifier.
-                    In the example above the scoping was 'myprotocol'. In your app you
-                    could use 'myapp' etc.";
-
-    }
-
-    //YANG does not have a statement which limits the scope of an instance-identifier to a particular subtree,
-    //which is why we are using a type capture and not an instance-identifier to define a node-ref and a node-connector-ref.
-    typedef node-ref {
-        status deprecated;
-        type instance-identifier;
-        description "A reference that points to an opendaylight-light:nodes/node in the data tree.";
-    }
-
-    typedef node-connector-ref {
-        status deprecated;
-        type instance-identifier;
-        description "A reference that points to an opendaylight-list:nodes/node/{node-id}/node-connector in the data tree.";
-    }
-
-    identity node-context {
-        status deprecated;
-        description "A node-context is a classifier for node elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
-    }
-
-    identity node-connector-context {
-        status deprecated;
-        description "A node-connector-context is a classifier for node-connector elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
-    }
-
-    //We are defining a base identity here because there are limitations with yang enums. Yang doesn't allow you to extend enumeratations.
-    //Therefore by defining a base identity we allow other yang files to extend this identity to define additional "enumerations". By
-    //using node-type as their base they are able to pass their object to fields that accept "node-types" while uniquely describing their
-    //type of node, such as "router-node" or "switch-node" etc.
-    //See https://wiki.opendaylight.org/view/YANG_Tools:YANG_to_Java_Mapping#Identity for more information.
-    identity node-type {
-        status deprecated;
-        description "A base identity definition which represents a generic node type and can be extended in other yang files.";
-    }
-
-    identity node-connector-type {
-        status deprecated;
-        description "A base identity definition which represents a generic node connector type and can be extended in other yang files.";
-    }
-
-    grouping node {
-        status deprecated;
-        description "Describes the contents of a generic node -
-                     essentially an ID and a list of node-connectors.
-                     Acts as an augmentation point where other yang files
-                      can add additional information.";
-
-        leaf id {
-            type node-id;
-            description "The unique identifier for the node.";
-        }
-
-        list "node-connector" {
-            key "id";
-
-            description "A list of node connectors that belong this node.";
-            ext:context-instance "node-connector-context";
-
-            uses node-connector;
-        }
-    }
-
-    grouping node-connector {
-        status deprecated;
-        description "Describes a generic node connector which consists of an ID.
-                     Acts as an augmentation point where other yang files can
-                      add additional information.";
-
-        leaf id {
-            type node-connector-id;
-            description "The unique identifier for the node-connector.";
-        }
-    }
-
-    grouping node-context-ref {
-        status deprecated;
-        description
-        "A helper grouping which contains a reference to a node classified with a node-context. This allows RPCs in other yang files to refine their input to a particular node instance.";
-
-        leaf node {
-            ext:context-reference "node-context";
-            type node-ref;
-            description "A reference to a particular node.";
-        }
-    }
-
-    /** Base structure **/
-    container nodes {
-        status deprecated;
-        description "The root container of all nodes.";
-
-        list node {
-            key "id";
-            ext:context-instance "node-context";
-            description "A list of nodes (as defined by the 'grouping node').";
-            uses node; //this refers to the 'grouping node' defined above.
-        }
-    }
-
-    //The following notifications should really be replaced by direct writes to the data tree with data change listeners listening to those changes.
-    //Notifications should be reserved for one time events which do not require persistence to the data tree.
-    notification node-updated {
-        status deprecated;
-
-        description "A notification sent by someone who realized there was a modification to a node, but did not modify the data tree.
-                    Describes that something on the node has been updated (including addition of a new node), but is for
-                    whatever reason is not modifying the data tree.
-
-                    Deprecated: If a process determines that a node was updated, then that
-                    logic should update the node using the DataBroker directly. Listeners interested
-                    update changes should register a data change listener for notifications on removals.";
-
-        leaf node-ref {
-            ext:context-reference "node-context";
-            description "A reference to the node which changed.";
-
-            type node-ref;
-        }
-        uses node;
-    }
-
-    notification node-connector-updated {
-
-        status deprecated;
-
-        description "A notification sent by someone who realized there was a modification to a node-connector, but did not modify the data tree.
-                    Describes that something on the node-connector has been updated (including addition of a new node-connector), but is for
-                    whatever reason is not modifying the data tree.
-
-                    Deprecated: If a process determines that a node-connector was updated, then that
-                    logic should update the node-connector using the DataBroker directly. Listeners interested
-                    update changes should register a data change listener for notifications on removals.";
-
-        leaf node-connector-ref {
-            ext:context-reference "node-connector-context";
-            type node-connector-ref;
-            description "A reference to the node-connector which changed.";
-        }
-        uses node-connector;
-    }
-
-    notification node-removed {
-
-        status deprecated;
-
-        description "A notification sent by someone who realized there was a node was removed, but did not modify the data tree.
-                    Describes that a node has been removed but is for
-                    whatever reason is not modifying the data tree.
-
-                    Deprecated: If a process determines that a node was removed, then that
-                    logic should remove the node from the DataBroker directly. Listeners interested
-                    in changes should register a data change listener for notifications on removals.";
-
-        leaf node-ref {
-            description "A reference to the node that was removed.";
-            ext:context-reference "node-context";
-            type node-ref;
-        }
-    }
-
-    notification node-connector-removed {
-
-        status deprecated;
-
-        description "A notification sent by someone who realized there was a node-connector was removed, but did not modify the data tree.
-                    Describes that a node-connector has been removed but is for
-                    whatever reason is not modifying the data tree.
-
-                    Deprecated: If a process determines that a node-connector was removed, then that
-                    logic should remove the node-connector from the DataBroker directly. Listeners interested
-                    in changes should register a data change listener for notifications on removals.";
-
-        leaf node-connector-ref {
-            description "A reference to the node-connector that was removed.";
-            ext:context-reference "node-connector-context";
-            type node-connector-ref;
-        }
-    }
-}
diff --git a/opendaylight/model/model-topology/pom.xml b/opendaylight/model/model-topology/pom.xml
deleted file mode 100644 (file)
index d197981..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.mdsal</groupId>
-        <artifactId>binding-parent</artifactId>
-        <version>3.0.6</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller.model</groupId>
-    <artifactId>model-topology</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>bundle</packaging>
-
-    <dependencies>
-        <dependency>
-            <groupId>org.opendaylight.controller.model</groupId>
-            <artifactId>model-inventory</artifactId>
-            <version>${project.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.opendaylight.mdsal.model</groupId>
-            <artifactId>ietf-topology</artifactId>
-        </dependency>
-    </dependencies>
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-    </scm>
-</project>
diff --git a/opendaylight/model/model-topology/src/main/yang/opendaylight-topology-inventory.yang b/opendaylight/model/model-topology/src/main/yang/opendaylight-topology-inventory.yang
deleted file mode 100644 (file)
index 9fccb99..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-module opendaylight-topology-inventory {
-    yang-version 1;
-    namespace "urn:opendaylight:model:topology:inventory";
-    // replace with IANA namespace when assigned
-    prefix "nt";
-
-    import yang-ext { prefix "ext"; }
-    import ietf-inet-types { prefix "inet"; }
-    import opendaylight-inventory {prefix "inv";}
-    import opendaylight-topology {prefix "odt";}
-    import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
-    organization "TBD";
-
-    contact "WILL-BE-DEFINED-LATER";
-
-    revision 2013-10-30 {
-        description
-            "Initial revision.";
-    }
-
-    augment "/topo:network-topology/topo:topology/topo:node" {
-        ext:augment-identifier "inventory-node";
-        leaf inventory-node-ref {
-            type inv:node-ref;
-        }
-    }
-
-    augment "/topo:network-topology/topo:topology/topo:node/topo:termination-point" {
-        ext:augment-identifier "inventory-node-connector";
-        leaf inventory-node-connector-ref {
-            ext:context-reference "inv:node-connector-context";
-            type inv:node-connector-ref;
-        }
-    }
-}
diff --git a/opendaylight/model/model-topology/src/main/yang/opendaylight-topology-view.yang b/opendaylight/model/model-topology/src/main/yang/opendaylight-topology-view.yang
deleted file mode 100644 (file)
index 99e5eff..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-module opendaylight-topology-view  {
-    yang-version 1;
-    namespace "urn:opendaylight:model:topology:view";
-    // replace with IANA namespace when assigned
-    prefix "nt";
-
-    import yang-ext { prefix "ext"; }
-    import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
-    organization "TBD";
-
-    contact "WILL-BE-DEFINED-LATER";
-
-    revision 2013-10-30 {
-        description
-            "Initial revision.";
-    }
-    
-
-    grouping aggregate-topology {
-        leaf-list original-topology {
-            type topo:topology-ref;    
-        }
-    }
-
-    grouping aggregate-node {
-        list original-node {
-            leaf topology {
-                type topo:topology-ref;
-            }
-            leaf node {
-                type topo:node-ref;
-            }
-        }
-    }
-
-    augment "/topo:network-topology/topo:topology" {
-        ext:augment-identifier "aggregated-topology";
-        uses aggregate-topology;
-    }
-
-    augment "/topo:network-topology/topo:topology/topo:node" {
-        ext:augment-identifier "aggregated-node";
-        uses aggregate-node;
-    }
-}
diff --git a/opendaylight/model/model-topology/src/main/yang/opendaylight-topology.yang b/opendaylight/model/model-topology/src/main/yang/opendaylight-topology.yang
deleted file mode 100644 (file)
index c39759a..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-module opendaylight-topology {
-    yang-version 1;
-    namespace "urn:opendaylight:model:topology:general";
-    // replace with IANA namespace when assigned
-    prefix "nt";
-
-    import yang-ext { prefix "ext"; }
-    import ietf-inet-types { prefix "inet"; }
-    import opendaylight-inventory {prefix "inv";}
-    import network-topology {prefix "topo"; revision-date "2013-10-21"; }
-
-    organization "TBD";
-
-    contact "WILL-BE-DEFINED-LATER";
-
-    revision 2013-10-30 {
-        description
-            "Initial revision.";
-    }
-
-    identity node-type {
-    
-    }
-    
-    typedef node-type-ref {
-        type identityref {
-            base node-type;
-       }
-    }
-    
-    identity topology-context {
-    
-    }
-    
-    identity topology-node-context {
-        
-    }
-
-    grouping node-identifiers {
-        list node-identifier {
-            key "type identifier";
-            leaf type {
-                type node-type-ref;
-            }
-            leaf identifier {
-                type inet:uri;
-            }
-        }
-    }
-
-    augment "/topo:network-topology/topo:topology" {
-        ext:context-instance "topology-context";
-    }
-
-    /* Inventory Augmentations */
-    augment "/topo:network-topology/topo:topology/topo:node" {
-        ext:context-instance "topology-node-context";
-    }
-
-    augment "/topo:network-topology/topo:topology/topo:node" {
-        ext:augment-identifier "identifiable-node";
-        uses node-identifiers;
-    }
-}
diff --git a/opendaylight/model/pom.xml b/opendaylight/model/pom.xml
deleted file mode 100644 (file)
index ee98da7..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-
-    <modelVersion>4.0.0</modelVersion>
-    <parent>
-        <groupId>org.opendaylight.odlparent</groupId>
-        <artifactId>odlparent-lite</artifactId>
-        <version>4.0.9</version>
-        <relativePath/>
-    </parent>
-
-    <groupId>org.opendaylight.controller.model</groupId>
-    <artifactId>model-aggregator</artifactId>
-    <version>1.10.0-SNAPSHOT</version>
-    <packaging>pom</packaging>
-
-    <modules>
-        <module>model-inventory</module>
-        <module>model-topology</module>
-    </modules>
-
-    <scm>
-        <connection>scm:git:http://git.opendaylight.org/gerrit/controller.git</connection>
-        <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
-        <tag>HEAD</tag>
-        <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
-    </scm>
-</project>
diff --git a/pom.xml b/pom.xml
index 91d0428c7312d4a2b0a46fd825641adb8f3077fc..7b3c5253d3436995679581f4a81f90b0ef037cf9 100644 (file)
--- a/pom.xml
+++ b/pom.xml
@@ -4,47 +4,38 @@
   <parent>
     <groupId>org.opendaylight.odlparent</groupId>
     <artifactId>odlparent-lite</artifactId>
-    <version>4.0.9</version>
+    <version>13.0.11</version>
     <relativePath/>
   </parent>
+
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>releasepom</artifactId>
-  <version>0.10.0-SNAPSHOT</version>
+  <version>9.0.3-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>controller</name>
   <!-- Used by Sonar to set project name -->
 
-  <modules>
-
-    <!-- md-sal -->
-    <module>opendaylight/md-sal</module>
-    <!-- config -->
-    <module>opendaylight/config</module>
-
-    <module>opendaylight/model</module>
-
-    <module>opendaylight/blueprint</module>
-
-    <!-- Parents -->
-    <module>benchmark</module>
-    <module>opendaylight/commons/jolokia</module>
+  <properties>
+    <maven.deploy.skip>true</maven.deploy.skip>
+    <maven.install.skip>true</maven.install.skip>
+  </properties>
 
-    <!-- Karaf Distribution -->
-    <module>karaf</module>
+  <modules>
+    <module>artifacts</module>
+    <module>docs</module>
     <module>features</module>
+    <module>karaf</module>
 
-    <!-- archetypes -->
-    <module>opendaylight/archetypes</module>
+    <module>akka</module>
+    <module>atomix-storage</module>
+    <module>bundle-parent</module>
+    <module>benchmark</module>
+    <module>jolokia</module>
 
-    <!-- documentation -->
-    <module>docs</module>
+    <module>opendaylight/blueprint</module>
+    <module>opendaylight/md-sal</module>
   </modules>
 
-  <properties>
-    <maven.deploy.skip>true</maven.deploy.skip>
-    <maven.install.skip>true</maven.install.skip>
-  </properties>
-
   <profiles>
     <profile>
       <id>sonar-jacoco-aggregate</id>
diff --git a/tox.ini b/tox.ini
index 8d4a48fd5695e7b5b275a60975d277387ab10e15..df9a2d9b65e7e43c114424aea57b632010fa536c 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -12,5 +12,5 @@ commands =
 
 [testenv:docs-linkcheck]
 deps = -rdocs/requirements.txt
-commands = sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck